--- /dev/null
+From 0ff737c655cd61bb2b593b79fe085ccb0229742b Mon Sep 17 00:00:00 2001
+From: Anjali Singhai Jain <anjali.singhai@intel.com>
+Date: Wed, 21 Oct 2015 19:47:07 -0400
+Subject: [PATCH 001/135] i40e: Workaround fix for mss < 256 issue
+
+[ Upstream commit 4f2f017c6101ab2ba202d6059c238c15577ad38b ]
+
+HW/NVM sets a limit of no less than 256 bytes for MSS. Stack can send as
+low as 76 bytes MSS. This patch lowers the HW limit to 64 bytes to avoid
+MDDs from firing and causing a reset when the MSS is lower than 256.
+
+Change-ID: I36b500a6bb227d283c3e321a7718e0672b11fab0
+Signed-off-by: Anjali Singhai Jain <anjali.singhai@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -6685,6 +6685,7 @@ static void i40e_reset_and_rebuild(struc
+ struct i40e_hw *hw = &pf->hw;
+ u8 set_fc_aq_fail = 0;
+ i40e_status ret;
++ u32 val;
+ u32 v;
+
+ /* Now we wait for GRST to settle out.
+@@ -6823,6 +6824,20 @@ static void i40e_reset_and_rebuild(struc
+ }
+ }
+
++ /* Reconfigure hardware for allowing smaller MSS in the case
++ * of TSO, so that we avoid the MDD being fired and causing
++ * a reset in the case of small MSS+TSO.
++ */
++#define I40E_REG_MSS 0x000E64DC
++#define I40E_REG_MSS_MIN_MASK 0x3FF0000
++#define I40E_64BYTE_MSS 0x400000
++ val = rd32(hw, I40E_REG_MSS);
++ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
++ val &= ~I40E_REG_MSS_MIN_MASK;
++ val |= I40E_64BYTE_MSS;
++ wr32(hw, I40E_REG_MSS, val);
++ }
++
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
+@@ -10183,6 +10198,7 @@ static int i40e_probe(struct pci_dev *pd
+ u16 link_status;
+ int err;
+ u32 len;
++ u32 val;
+ u32 i;
+ u8 set_fc_aq_fail;
+
+@@ -10493,6 +10509,17 @@ static int i40e_probe(struct pci_dev *pd
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
++ /* Reconfigure hardware for allowing smaller MSS in the case
++ * of TSO, so that we avoid the MDD being fired and causing
++ * a reset in the case of small MSS+TSO.
++ */
++ val = rd32(hw, I40E_REG_MSS);
++ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
++ val &= ~I40E_REG_MSS_MIN_MASK;
++ val |= I40E_64BYTE_MSS;
++ wr32(hw, I40E_REG_MSS, val);
++ }
++
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
--- /dev/null
+From 0fde260c8b4187bd5645dd6642d281e942f1f5f7 Mon Sep 17 00:00:00 2001
+From: Mitch Williams <mitch.a.williams@intel.com>
+Date: Wed, 21 Oct 2015 19:47:12 -0400
+Subject: [PATCH 002/135] i40evf: handle many MAC filters correctly
+
+[ Upstream commit 1418c3458118c6969d08e23aa377da7e2a7be36c ]
+
+When a lot (many hundreds) of MAC or VLAN filters are added at one time,
+we can overflow the Admin Queue buffer size with all the requests.
+Unfortunately, the driver would then calculate the message size
+incorrectly, causing it to be rejected by the PF. Furthermore, there was
+no mechanism to trigger another request to allow for configuring the
+rest of the filters that didn't fit into the first request.
+
+To fix this, recalculate the correct buffer size when we detect the
+overflow condition instead of just assuming the max buffer size. Also,
+don't clear the request bit in adapter->aq_required when we have an
+overflow, so that the rest of the filters can be processed later.
+
+Change-ID: Idd7cbbc5af31315e0dcb1b10e6a02ad9817ce65c
+Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 32 +++++++++++++++-----
+ 1 file changed, 24 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
++++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+@@ -391,6 +391,7 @@ void i40evf_add_ether_addrs(struct i40ev
+ struct i40e_virtchnl_ether_addr_list *veal;
+ int len, i = 0, count = 0;
+ struct i40evf_mac_filter *f;
++ bool more = false;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+@@ -415,7 +416,9 @@ void i40evf_add_ether_addrs(struct i40ev
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+- len = I40EVF_MAX_AQ_BUF_SIZE;
++ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
++ (count * sizeof(struct i40e_virtchnl_ether_addr));
++ more = true;
+ }
+
+ veal = kzalloc(len, GFP_ATOMIC);
+@@ -431,7 +434,8 @@ void i40evf_add_ether_addrs(struct i40ev
+ f->add = false;
+ }
+ }
+- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
++ if (!more)
++ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+@@ -450,6 +454,7 @@ void i40evf_del_ether_addrs(struct i40ev
+ struct i40e_virtchnl_ether_addr_list *veal;
+ struct i40evf_mac_filter *f, *ftmp;
+ int len, i = 0, count = 0;
++ bool more = false;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+@@ -474,7 +479,9 @@ void i40evf_del_ether_addrs(struct i40ev
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+- len = I40EVF_MAX_AQ_BUF_SIZE;
++ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
++ (count * sizeof(struct i40e_virtchnl_ether_addr));
++ more = true;
+ }
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal)
+@@ -490,7 +497,8 @@ void i40evf_del_ether_addrs(struct i40ev
+ kfree(f);
+ }
+ }
+- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
++ if (!more)
++ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+@@ -509,6 +517,7 @@ void i40evf_add_vlans(struct i40evf_adap
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ int len, i = 0, count = 0;
+ struct i40evf_vlan_filter *f;
++ bool more = false;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+@@ -534,7 +543,9 @@ void i40evf_add_vlans(struct i40evf_adap
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+- len = I40EVF_MAX_AQ_BUF_SIZE;
++ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
++ (count * sizeof(u16));
++ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl)
+@@ -549,7 +560,8 @@ void i40evf_add_vlans(struct i40evf_adap
+ f->add = false;
+ }
+ }
+- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
++ if (!more)
++ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ }
+@@ -567,6 +579,7 @@ void i40evf_del_vlans(struct i40evf_adap
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct i40evf_vlan_filter *f, *ftmp;
+ int len, i = 0, count = 0;
++ bool more = false;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+@@ -592,7 +605,9 @@ void i40evf_del_vlans(struct i40evf_adap
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+- len = I40EVF_MAX_AQ_BUF_SIZE;
++ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
++ (count * sizeof(u16));
++ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl)
+@@ -608,7 +623,8 @@ void i40evf_del_vlans(struct i40evf_adap
+ kfree(f);
+ }
+ }
+- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
++ if (!more)
++ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ }
--- /dev/null
+From 90a34cb0f870cef3277c89e7927be06d532a06e1 Mon Sep 17 00:00:00 2001
+From: Anjali Singhai Jain <anjali.singhai@intel.com>
+Date: Mon, 26 Oct 2015 19:44:29 -0400
+Subject: [PATCH 003/135] i40e/i40evf: Fix RS bit update in Tx path and disable
+ force WB workaround
+
+[ Upstream commit 6a7fded776a778f728b13d83a2c9fc893580c080 ]
+
+This patch fixes the issue of forcing WB too often causing us to not
+benefit from NAPI.
+
+Without this patch we were forcing WB/arming interrupt too often taking
+away the benefits of NAPI and causing a performance impact.
+
+With this patch we disable force WB in the clean routine for X710
+and XL710 adapters. X722 adapters do not enable interrupt to force
+a WB and benefit from WB_ON_ITR and hence force WB is left enabled
+for those adapters.
+For XL710 and X710 adapters if we have less than 4 packets pending
+a software Interrupt triggered from service task will force a WB.
+
+This patch also changes the conditions for setting RS bit as described
+in code comments. This optimizes when the HW does a tail bump amd when
+it does a WB. It also optimizes when we do a wmb.
+
+Change-ID: Id831e1ae7d3e2ec3f52cd0917b41ce1d22d75d9d
+Signed-off-by: Anjali Singhai Jain <anjali.singhai@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 118 ++++++++++++++++----------
+ drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 2
+ 2 files changed, 77 insertions(+), 43 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+@@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+
+- /* check to see if there are any non-cache aligned descriptors
+- * waiting to be written back, and kick the hardware to force
+- * them to be written back in case of napi polling
+- */
+- if (budget &&
+- !((i & WB_STRIDE) == WB_STRIDE) &&
+- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+- tx_ring->arm_wb = true;
+-
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+@@ -1770,6 +1760,9 @@ static inline void i40evf_tx_map(struct
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
++ u16 desc_count = 0;
++ bool tail_bump = true;
++ bool do_rs = false;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+@@ -1810,6 +1803,8 @@ static inline void i40evf_tx_map(struct
+
+ tx_desc++;
+ i++;
++ desc_count++;
++
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+@@ -1829,6 +1824,8 @@ static inline void i40evf_tx_map(struct
+
+ tx_desc++;
+ i++;
++ desc_count++;
++
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+@@ -1843,35 +1840,7 @@ static inline void i40evf_tx_map(struct
+ tx_bi = &tx_ring->tx_bi[i];
+ }
+
+- /* Place RS bit on last descriptor of any packet that spans across the
+- * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+- */
+ #define WB_STRIDE 0x3
+- if (((i & WB_STRIDE) != WB_STRIDE) &&
+- (first <= &tx_ring->tx_bi[i]) &&
+- (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+- tx_desc->cmd_type_offset_bsz =
+- build_ctob(td_cmd, td_offset, size, td_tag) |
+- cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+- I40E_TXD_QW1_CMD_SHIFT);
+- } else {
+- tx_desc->cmd_type_offset_bsz =
+- build_ctob(td_cmd, td_offset, size, td_tag) |
+- cpu_to_le64((u64)I40E_TXD_CMD <<
+- I40E_TXD_QW1_CMD_SHIFT);
+- }
+-
+- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+- tx_ring->queue_index),
+- first->bytecount);
+-
+- /* Force memory writes to complete before letting h/w
+- * know there are new descriptors to fetch. (Only
+- * applicable for weak-ordered memory model archs,
+- * such as IA-64).
+- */
+- wmb();
+-
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+@@ -1881,15 +1850,78 @@ static inline void i40evf_tx_map(struct
+
+ tx_ring->next_to_use = i;
+
++ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
++ tx_ring->queue_index),
++ first->bytecount);
+ i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
++
++ /* Algorithm to optimize tail and RS bit setting:
++ * if xmit_more is supported
++ * if xmit_more is true
++ * do not update tail and do not mark RS bit.
++ * if xmit_more is false and last xmit_more was false
++ * if every packet spanned less than 4 desc
++ * then set RS bit on 4th packet and update tail
++ * on every packet
++ * else
++ * update tail and set RS bit on every packet.
++ * if xmit_more is false and last_xmit_more was true
++ * update tail and set RS bit.
++ * else (kernel < 3.18)
++ * if every packet spanned less than 4 desc
++ * then set RS bit on 4th packet and update tail
++ * on every packet
++ * else
++ * set RS bit on EOP for every packet and update tail
++ *
++ * Optimization: wmb to be issued only in case of tail update.
++ * Also optimize the Descriptor WB path for RS bit with the same
++ * algorithm.
++ *
++ * Note: If there are less than 4 packets
++ * pending and interrupts were disabled the service task will
++ * trigger a force WB.
++ */
++ if (skb->xmit_more &&
++ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
++ tx_ring->queue_index))) {
++ tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
++ tail_bump = false;
++ } else if (!skb->xmit_more &&
++ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
++ tx_ring->queue_index)) &&
++ (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
++ (tx_ring->packet_stride < WB_STRIDE) &&
++ (desc_count < WB_STRIDE)) {
++ tx_ring->packet_stride++;
++ } else {
++ tx_ring->packet_stride = 0;
++ tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
++ do_rs = true;
++ }
++ if (do_rs)
++ tx_ring->packet_stride = 0;
++
++ tx_desc->cmd_type_offset_bsz =
++ build_ctob(td_cmd, td_offset, size, td_tag) |
++ cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
++ I40E_TX_DESC_CMD_EOP) <<
++ I40E_TXD_QW1_CMD_SHIFT);
++
+ /* notify HW of packet */
+- if (!skb->xmit_more ||
+- netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+- tx_ring->queue_index)))
+- writel(i, tx_ring->tail);
+- else
++ if (!tail_bump)
+ prefetchw(tx_desc + 1);
+
++ if (tail_bump) {
++ /* Force memory writes to complete before letting h/w
++ * know there are new descriptors to fetch. (Only
++ * applicable for weak-ordered memory model archs,
++ * such as IA-64).
++ */
++ wmb();
++ writel(i, tx_ring->tail);
++ }
++
+ return;
+
+ dma_error:
+--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+@@ -267,6 +267,8 @@ struct i40e_ring {
+
+ bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
++ u8 packet_stride;
++#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
+
+ u16 flags;
+ #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
--- /dev/null
+From 85e29ebd4dc47ea1a11d368455843967464384bd Mon Sep 17 00:00:00 2001
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Date: Thu, 5 Nov 2015 17:01:01 -0800
+Subject: [PATCH 004/135] i40e: fix: do not sleep in netdev_ops
+
+[ Upstream commit 0e4425ed641f3eef67c892bc541949cd745a9ba9 ]
+
+The driver was being called by VLAN, bonding, teaming operations
+that expected to be able to hold locks like rcu_read_lock().
+
+This causes the driver to be held to the requirement to not sleep,
+and was found by the kernel debug options for checking sleep
+inside critical section, and the locking validator.
+
+Change-ID: Ibc68c835f5ffa8ffe0638ffe910a66fc5649a7f7
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 44 ++++++++++++----------------
+ 1 file changed, 20 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1547,9 +1547,11 @@ static int i40e_set_mac(struct net_devic
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ }
+
+- i40e_sync_vsi_filters(vsi, false);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+-
++ /* schedule our worker thread which will take care of
++ * applying the new filter changes
++ */
++ i40e_service_event_schedule(vsi->back);
+ return 0;
+ }
+
+@@ -2112,12 +2114,7 @@ int i40e_sync_vsi_filters(struct i40e_vs
+ */
+ if (pf->cur_promisc != cur_promisc) {
+ pf->cur_promisc = cur_promisc;
+- if (grab_rtnl)
+- i40e_do_reset_safe(pf,
+- BIT(__I40E_PF_RESET_REQUESTED));
+- else
+- i40e_do_reset(pf,
+- BIT(__I40E_PF_RESET_REQUESTED));
++ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ }
+ } else {
+ ret = i40e_aq_set_vsi_unicast_promiscuous(
+@@ -2377,16 +2374,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *v
+ }
+ }
+
+- /* Make sure to release before sync_vsi_filter because that
+- * function will lock/unlock as necessary
+- */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+- return 0;
+-
+- return i40e_sync_vsi_filters(vsi, false);
++ /* schedule our worker thread which will take care of
++ * applying the new filter changes
++ */
++ i40e_service_event_schedule(vsi->back);
++ return 0;
+ }
+
+ /**
+@@ -2459,16 +2453,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *
+ }
+ }
+
+- /* Make sure to release before sync_vsi_filter because that
+- * function with lock/unlock as necessary
+- */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+- return 0;
+-
+- return i40e_sync_vsi_filters(vsi, false);
++ /* schedule our worker thread which will take care of
++ * applying the new filter changes
++ */
++ i40e_service_event_schedule(vsi->back);
++ return 0;
+ }
+
+ /**
+@@ -2711,6 +2702,11 @@ static void i40e_config_xps_tx_ring(stru
+ netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+ free_cpumask_var(mask);
+ }
++
++ /* schedule our worker thread which will take care of
++ * applying the new filter changes
++ */
++ i40e_service_event_schedule(vsi->back);
+ }
+
+ /**
--- /dev/null
+From 47930ccdb064dbe13b28eacd638b2b52939f5727 Mon Sep 17 00:00:00 2001
+From: Kiran Patil <kiran.patil@intel.com>
+Date: Fri, 6 Nov 2015 15:26:03 -0800
+Subject: [PATCH 005/135] i40e: Fix memory leaks, sideband filter programming
+
+[ Upstream commit a42e7a369ea2b73a554a85dea7d6243af51cd4f0 ]
+
+This patch fixes the memory leak which would be seen otherwise when user
+programs flow-director filter using ethtool (sideband filter programming).
+
+When ethtool is used to program flow directory filter, 'raw_buf' gets
+allocated and it is supposed to be freed as part of queue cleanup. But
+check of 'tx_buffer->skb' was preventing it from being freed.
+
+Change-ID: Ief4f0a1a32a653180498bf6e987c1b4342ab8923
+Signed-off-by: Kiran Patil <kiran.patil@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 19 ++++++++++++++-----
+ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 10 +++++-----
+ 2 files changed, 19 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struc
+ "Filter deleted for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
+ }
++ if (err)
++ kfree(raw_packet);
++
+ return err ? -EOPNOTSUPP : 0;
+ }
+
+@@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struc
+ fd_data->pctype, fd_data->fd_id);
+ }
+
++ if (err)
++ kfree(raw_packet);
++
+ return err ? -EOPNOTSUPP : 0;
+ }
+
+@@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct
+ }
+ }
+
++ if (err)
++ kfree(raw_packet);
++
+ return err ? -EOPNOTSUPP : 0;
+ }
+
+@@ -526,11 +535,7 @@ static void i40e_unmap_and_free_tx_resou
+ struct i40e_tx_buffer *tx_buffer)
+ {
+ if (tx_buffer->skb) {
+- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+- kfree(tx_buffer->raw_buf);
+- else
+- dev_kfree_skb_any(tx_buffer->skb);
+-
++ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+@@ -542,6 +547,10 @@ static void i40e_unmap_and_free_tx_resou
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
++
++ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
++ kfree(tx_buffer->raw_buf);
++
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+@@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resou
+ struct i40e_tx_buffer *tx_buffer)
+ {
+ if (tx_buffer->skb) {
+- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+- kfree(tx_buffer->raw_buf);
+- else
+- dev_kfree_skb_any(tx_buffer->skb);
+-
++ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+@@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resou
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
++
++ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
++ kfree(tx_buffer->raw_buf);
++
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
--- /dev/null
+From 0163083d5ed77ee3e08b0cd722ef340ee78682d4 Mon Sep 17 00:00:00 2001
+From: Mitch Williams <mitch.a.williams@intel.com>
+Date: Thu, 19 Nov 2015 11:34:16 -0800
+Subject: [PATCH 006/135] i40e: properly delete VF MAC filters
+
+[ Upstream commit b36e9ab59b7e3a5b14bf88dc0536e6579db7b54d ]
+
+The virtual channel interface was using incorrect semantics to remove
+MAC addresses, which would leave incorrect filters active when using
+VLANs. To correct this, add a new function that unconditionally removes
+MAC addresses from all VLANs, and call this function when the VF
+requests a MAC filter removal.
+
+Change-ID: I69826908ae4f6c847f5bf9b32f11faa760189c74
+Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e.h | 2 +
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 36 +++++++++++++++++++++
+ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 8 +++-
+ 3 files changed, 44 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -767,6 +767,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *v
+ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
++int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
++ bool is_vf, bool is_netdev);
+ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1317,6 +1317,42 @@ struct i40e_mac_filter *i40e_put_mac_in_
+ }
+
+ /**
++ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
++ * @vsi: the VSI to be searched
++ * @macaddr: the mac address to be removed
++ * @is_vf: true if it is a VF
++ * @is_netdev: true if it is a netdev
++ *
++ * Removes a given MAC address from a VSI, regardless of VLAN
++ *
++ * Returns 0 for success, or error
++ **/
++int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
++ bool is_vf, bool is_netdev)
++{
++ struct i40e_mac_filter *f = NULL;
++ int changed = 0;
++
++ WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
++ "Missing mac_filter_list_lock\n");
++ list_for_each_entry(f, &vsi->mac_filter_list, list) {
++ if ((ether_addr_equal(macaddr, f->macaddr)) &&
++ (is_vf == f->is_vf) &&
++ (is_netdev == f->is_netdev)) {
++ f->counter--;
++ f->changed = true;
++ changed = 1;
++ }
++ }
++ if (changed) {
++ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
++ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
++ return 0;
++ }
++ return -ENOENT;
++}
++
++/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1680,8 +1680,12 @@ static int i40e_vc_del_mac_addr_msg(stru
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ /* delete addresses from the list */
+ for (i = 0; i < al->num_elements; i++)
+- i40e_del_filter(vsi, al->list[i].addr,
+- I40E_VLAN_ANY, true, false);
++ if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
++ ret = I40E_ERR_INVALID_MAC_ADDR;
++ spin_unlock_bh(&vsi->mac_filter_list_lock);
++ goto error_param;
++ }
++
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ /* program the updated filter list */
--- /dev/null
+From 290c71a6a36614671ac13432d26c0e02370afe5b Mon Sep 17 00:00:00 2001
+From: Mitch Williams <mitch.a.williams@intel.com>
+Date: Thu, 19 Nov 2015 11:34:17 -0800
+Subject: [PATCH 007/135] i40e: don't add zero MAC filter
+
+[ Upstream commit b7b713a8eaf325607d37229f024ad0b9f3e7f320 ]
+
+When VFs are created, the MAC address defaults to all zeros, indicating
+to the VF driver that it should use a random MAC address. However, the
+PF driver was incorrectly adding this zero MAC to the filter table,
+along with the VF's randomly generated MAC address.
+
+Check for a good address before adding the default filter. While we're
+at it, make the error message a bit more useful.
+
+Change-ID: Ia100947d68140e0f73a19ba755cbffc3e79a8fcf
+Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40
+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+- f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+- vf->port_vlan_id ? vf->port_vlan_id : -1,
+- true, false);
+- if (!f)
+- dev_info(&pf->pdev->dev,
+- "Could not allocate VF MAC addr\n");
++ if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
++ f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
++ vf->port_vlan_id ? vf->port_vlan_id : -1,
++ true, false);
++ if (!f)
++ dev_info(&pf->pdev->dev,
++ "Could not add MAC filter %pM for VF %d\n",
++ vf->default_lan_addr.addr, vf->vf_id);
++ }
+ f = i40e_add_filter(vsi, brdcast,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
+ true, false);
--- /dev/null
+From a8cad39850bbbd03834ce1c1d21603358f7651fd Mon Sep 17 00:00:00 2001
+From: Mitch Williams <mitch.a.williams@intel.com>
+Date: Thu, 19 Nov 2015 11:34:18 -0800
+Subject: [PATCH 008/135] i40evf: check rings before freeing resources
+
+[ Upstream commit fdb47ae87af537b24977a03bc69cfe1c5c55ca62 ]
+
+If the driver gets unloaded during reset recovery, it's possible
+that it will attempt to free resources when they're already free.
+
+Add a check to make sure that the Tx and Rx rings actually exist
+before dereferencing them to free resources.
+
+Change-ID: I4d2b7e9ede49f634d421a4c5deaa5446bc755eee
+Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40evf/i40evf_main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
++++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+@@ -1864,6 +1864,9 @@ void i40evf_free_all_tx_resources(struct
+ {
+ int i;
+
++ if (!adapter->tx_rings)
++ return;
++
+ for (i = 0; i < adapter->num_active_queues; i++)
+ if (adapter->tx_rings[i]->desc)
+ i40evf_free_tx_resources(adapter->tx_rings[i]);
+@@ -1932,6 +1935,9 @@ void i40evf_free_all_rx_resources(struct
+ {
+ int i;
+
++ if (!adapter->rx_rings)
++ return;
++
+ for (i = 0; i < adapter->num_active_queues; i++)
+ if (adapter->rx_rings[i]->desc)
+ i40evf_free_rx_resources(adapter->rx_rings[i]);
--- /dev/null
+From 83c8f9244cde13960583de6bc8666d8e87fc6995 Mon Sep 17 00:00:00 2001
+From: Shannon Nelson <shannon.nelson@intel.com>
+Date: Thu, 19 Nov 2015 11:34:23 -0800
+Subject: [PATCH 009/135] i40e: clean whole mac filter list
+
+[ Upstream commit f11999987bc0b5559ab56dedc6f4ca32fab5438a ]
+
+Clean the whole mac filter list when resetting after an intermediate
+add or delete push to the firmware. The code had evolved from using
+a list from the stack to a heap allocation, but the memset() didn't
+follow the change correctly. This now cleans the whole list rather
+that just part of the first element.
+
+Change-ID: I4cd03d5a103b7407dd8556a3a231e800f2d6f2d5
+Reported-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1973,11 +1973,13 @@ int i40e_sync_vsi_filters(struct i40e_vs
+
+ /* Now process 'del_list' outside the lock */
+ if (!list_empty(&tmp_del_list)) {
++ int del_list_size;
++
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+- del_list = kcalloc(filter_list_len,
+- sizeof(struct i40e_aqc_remove_macvlan_element_data),
+- GFP_KERNEL);
++ del_list_size = filter_list_len *
++ sizeof(struct i40e_aqc_remove_macvlan_element_data);
++ del_list = kzalloc(del_list_size, GFP_KERNEL);
+ if (!del_list) {
+ i40e_cleanup_add_list(&tmp_add_list);
+
+@@ -2009,7 +2011,7 @@ int i40e_sync_vsi_filters(struct i40e_vs
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ num_del = 0;
+- memset(del_list, 0, sizeof(*del_list));
++ memset(del_list, 0, del_list_size);
+
+ if (ret && aq_err != I40E_AQ_RC_ENOENT)
+ dev_err(&pf->pdev->dev,
+@@ -2042,13 +2044,14 @@ int i40e_sync_vsi_filters(struct i40e_vs
+ }
+
+ if (!list_empty(&tmp_add_list)) {
++ int add_list_size;
+
+ /* do all the adds now */
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_add_macvlan_element_data),
+- add_list = kcalloc(filter_list_len,
+- sizeof(struct i40e_aqc_add_macvlan_element_data),
+- GFP_KERNEL);
++ add_list_size = filter_list_len *
++ sizeof(struct i40e_aqc_add_macvlan_element_data);
++ add_list = kzalloc(add_list_size, GFP_KERNEL);
+ if (!add_list) {
+ /* Purge element from temporary lists */
+ i40e_cleanup_add_list(&tmp_add_list);
+@@ -2086,7 +2089,7 @@ int i40e_sync_vsi_filters(struct i40e_vs
+
+ if (ret)
+ break;
+- memset(add_list, 0, sizeof(*add_list));
++ memset(add_list, 0, add_list_size);
+ }
+ /* Entries from tmp_add_list were cloned from MAC
+ * filter list, hence clean those cloned entries
--- /dev/null
+From c792f057b4b05e56408402204372b8e7e6eff4ec Mon Sep 17 00:00:00 2001
+From: Anjali Singhai Jain <anjali.singhai@intel.com>
+Date: Wed, 9 Dec 2015 15:50:21 -0800
+Subject: [PATCH 010/135] i40e: Fix Rx hash reported to the stack by our driver
+
+[ Upstream commit 857942fd1aa15edf7356a4a4bad5369c8e70a633 ]
+
+If the driver calls skb_set_hash even with a zero hash, that
+indicates to the stack that the hash calculation is offloaded
+in hardware. So the Stack doesn't do a SW hash which is required
+for load balancing if the user decides to turn of rx-hashing
+on our device.
+
+This patch fixes the path so that we do not call skb_set_hash
+if the feature is disabled.
+
+Change-ID: Ic4debfa4ff91b5a72e447348a75768ed7a2d3e1b
+Signed-off-by: Anjali Singhai Jain <anjali.singhai@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 54 +++++++++++++-------------
+ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 54 +++++++++++++-------------
+ 2 files changed, 58 insertions(+), 50 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1425,31 +1425,12 @@ checksum_fail:
+ }
+
+ /**
+- * i40e_rx_hash - returns the hash value from the Rx descriptor
+- * @ring: descriptor ring
+- * @rx_desc: specific descriptor
+- **/
+-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+- union i40e_rx_desc *rx_desc)
+-{
+- const __le64 rss_mask =
+- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+-
+- if ((ring->netdev->features & NETIF_F_RXHASH) &&
+- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+- else
+- return 0;
+-}
+-
+-/**
+- * i40e_ptype_to_hash - get a hash type
++ * i40e_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
++static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+ {
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+@@ -1467,6 +1448,30 @@ static inline enum pkt_hash_types i40e_p
+ }
+
+ /**
++ * i40e_rx_hash - set the hash value in the skb
++ * @ring: descriptor ring
++ * @rx_desc: specific descriptor
++ **/
++static inline void i40e_rx_hash(struct i40e_ring *ring,
++ union i40e_rx_desc *rx_desc,
++ struct sk_buff *skb,
++ u8 rx_ptype)
++{
++ u32 hash;
++ const __le64 rss_mask =
++ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
++ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
++
++ if (ring->netdev->features & NETIF_F_RXHASH)
++ return;
++
++ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
++ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
++ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
++ }
++}
++
++/**
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+@@ -1615,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i
+ continue;
+ }
+
+- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+- i40e_ptype_to_hash(rx_ptype));
++ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
++
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+@@ -1745,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct
+ continue;
+ }
+
+- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+- i40e_ptype_to_hash(rx_ptype));
++ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+@@ -879,31 +879,12 @@ checksum_fail:
+ }
+
+ /**
+- * i40e_rx_hash - returns the hash value from the Rx descriptor
+- * @ring: descriptor ring
+- * @rx_desc: specific descriptor
+- **/
+-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+- union i40e_rx_desc *rx_desc)
+-{
+- const __le64 rss_mask =
+- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+-
+- if ((ring->netdev->features & NETIF_F_RXHASH) &&
+- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+- else
+- return 0;
+-}
+-
+-/**
+- * i40e_ptype_to_hash - get a hash type
++ * i40e_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
++static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+ {
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+@@ -921,6 +902,30 @@ static inline enum pkt_hash_types i40e_p
+ }
+
+ /**
++ * i40e_rx_hash - set the hash value in the skb
++ * @ring: descriptor ring
++ * @rx_desc: specific descriptor
++ **/
++static inline void i40e_rx_hash(struct i40e_ring *ring,
++ union i40e_rx_desc *rx_desc,
++ struct sk_buff *skb,
++ u8 rx_ptype)
++{
++ u32 hash;
++ const __le64 rss_mask =
++ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
++ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
++
++ if (ring->netdev->features & NETIF_F_RXHASH)
++ return;
++
++ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
++ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
++ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
++ }
++}
++
++/**
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+@@ -1061,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i
+ continue;
+ }
+
+- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+- i40e_ptype_to_hash(rx_ptype));
++ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
++
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+@@ -1179,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct
+ continue;
+ }
+
+- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+- i40e_ptype_to_hash(rx_ptype));
++ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
--- /dev/null
+From 16fb0c20315adf90ded31fd3937cce8b66bb22bd Mon Sep 17 00:00:00 2001
+From: Jarod Wilson <jarod@redhat.com>
+Date: Thu, 10 Sep 2015 15:37:50 -0400
+Subject: [PATCH 011/135] igb: don't unmap NULL hw_addr
+
+[ Upstream commit 73bf8048d7c86a20a59d427e55deb1a778e94df7 ]
+
+I've got a startech thunderbolt dock someone loaned me, which among other
+things, has the following device in it:
+
+08:00.0 Ethernet controller: Intel Corporation I210 Gigabit Network Connection (rev 03)
+
+This hotplugs just fine (kernel 4.2.0 plus a patch or two here):
+
+[ 863.020315] igb: Intel(R) Gigabit Ethernet Network Driver - version 5.2.18-k
+[ 863.020316] igb: Copyright (c) 2007-2014 Intel Corporation.
+[ 863.028657] igb 0000:08:00.0: enabling device (0000 -> 0002)
+[ 863.062089] igb 0000:08:00.0: added PHC on eth0
+[ 863.062090] igb 0000:08:00.0: Intel(R) Gigabit Ethernet Network Connection
+[ 863.062091] igb 0000:08:00.0: eth0: (PCIe:2.5Gb/s:Width x1) e8:ea:6a:00:1b:2a
+[ 863.062194] igb 0000:08:00.0: eth0: PBA No: 000200-000
+[ 863.062196] igb 0000:08:00.0: Using MSI-X interrupts. 4 rx queue(s), 4 tx queue(s)
+[ 863.064889] igb 0000:08:00.0 enp8s0: renamed from eth0
+
+But disconnecting it is another story:
+
+[ 1002.807932] igb 0000:08:00.0: removed PHC on enp8s0
+[ 1002.807944] igb 0000:08:00.0 enp8s0: PCIe link lost, device now detached
+[ 1003.341141] ------------[ cut here ]------------
+[ 1003.341148] WARNING: CPU: 0 PID: 199 at lib/iomap.c:43 bad_io_access+0x38/0x40()
+[ 1003.341149] Bad IO access at port 0x0 ()
+[ 1003.342767] Modules linked in: snd_usb_audio snd_usbmidi_lib snd_rawmidi igb dca firewire_ohci firewire_core crc_itu_t rfcomm ctr ccm arc4 iwlmvm mac80211 fuse xt_CHECKSUM ipt_MASQUERADE
+nf_nat_masquerade_ipv4 tun ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 ipt_REJECT nf_reject_ipv4 xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat
+nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat
+nf_conntrack iptable_mangle iptable_security iptable_raw iptable_filter bnep dm_mirror dm_region_hash dm_log dm_mod coretemp x86_pkg_temp_thermal intel_powerclamp kvm_intel snd_hda_codec_hdmi kvm
+crct10dif_pclmul crc32_pclmul ghash_clmulni_intel drbg
+[ 1003.342793] ansi_cprng aesni_intel hp_wmi aes_x86_64 iTCO_wdt lrw iTCO_vendor_support ppdev gf128mul sparse_keymap glue_helper ablk_helper cryptd snd_hda_codec_realtek snd_hda_codec_generic
+microcode snd_hda_intel uvcvideo iwlwifi snd_hda_codec videobuf2_vmalloc videobuf2_memops snd_hda_core videobuf2_core snd_hwdep btusb v4l2_common btrtl snd_seq btbcm btintel videodev cfg80211
+snd_seq_device rtsx_pci_ms bluetooth pcspkr input_leds i2c_i801 media parport_pc memstick rfkill sg lpc_ich snd_pcm 8250_fintek parport joydev snd_timer snd soundcore hp_accel ie31200_edac
+mei_me lis3lv02d edac_core input_polldev mei hp_wireless shpchp tpm_infineon sch_fq_codel nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables autofs4 xfs libcrc32c sd_mod sr_mod cdrom
+rtsx_pci_sdmmc mmc_core crc32c_intel serio_raw rtsx_pci
+[ 1003.342822] nouveau ahci libahci mxm_wmi e1000e xhci_pci hwmon ptp drm_kms_helper pps_core xhci_hcd ttm wmi video ipv6
+[ 1003.342839] CPU: 0 PID: 199 Comm: kworker/0:2 Not tainted 4.2.0-2.el7_UNSUPPORTED.x86_64 #1
+[ 1003.342840] Hardware name: Hewlett-Packard HP ZBook 15 G2/2253, BIOS M70 Ver. 01.07 02/26/2015
+[ 1003.342843] Workqueue: pciehp-3 pciehp_power_thread
+[ 1003.342844] ffffffff81a90655 ffff8804866d3b48 ffffffff8164763a 0000000000000000
+[ 1003.342846] ffff8804866d3b98 ffff8804866d3b88 ffffffff8107134a ffff8804866d3b88
+[ 1003.342847] ffff880486f46000 ffff88046c8a8000 ffff880486f46840 ffff88046c8a8098
+[ 1003.342848] Call Trace:
+[ 1003.342852] [<ffffffff8164763a>] dump_stack+0x45/0x57
+[ 1003.342855] [<ffffffff8107134a>] warn_slowpath_common+0x8a/0xc0
+[ 1003.342857] [<ffffffff810713c6>] warn_slowpath_fmt+0x46/0x50
+[ 1003.342859] [<ffffffff8133719e>] ? pci_disable_msix+0x3e/0x50
+[ 1003.342860] [<ffffffff812f6328>] bad_io_access+0x38/0x40
+[ 1003.342861] [<ffffffff812f6567>] pci_iounmap+0x27/0x40
+[ 1003.342865] [<ffffffffa0b728d7>] igb_remove+0xc7/0x160 [igb]
+[ 1003.342867] [<ffffffff8132189f>] pci_device_remove+0x3f/0xc0
+[ 1003.342869] [<ffffffff81433426>] __device_release_driver+0x96/0x130
+[ 1003.342870] [<ffffffff814334e3>] device_release_driver+0x23/0x30
+[ 1003.342871] [<ffffffff8131b404>] pci_stop_bus_device+0x94/0xa0
+[ 1003.342872] [<ffffffff8131b3ad>] pci_stop_bus_device+0x3d/0xa0
+[ 1003.342873] [<ffffffff8131b3ad>] pci_stop_bus_device+0x3d/0xa0
+[ 1003.342874] [<ffffffff8131b516>] pci_stop_and_remove_bus_device+0x16/0x30
+[ 1003.342876] [<ffffffff81333f5b>] pciehp_unconfigure_device+0x9b/0x180
+[ 1003.342877] [<ffffffff81333a73>] pciehp_disable_slot+0x43/0xb0
+[ 1003.342878] [<ffffffff81333b6d>] pciehp_power_thread+0x8d/0xb0
+[ 1003.342885] [<ffffffff810881b2>] process_one_work+0x152/0x3d0
+[ 1003.342886] [<ffffffff8108854a>] worker_thread+0x11a/0x460
+[ 1003.342887] [<ffffffff81088430>] ? process_one_work+0x3d0/0x3d0
+[ 1003.342890] [<ffffffff8108ddd9>] kthread+0xc9/0xe0
+[ 1003.342891] [<ffffffff8108dd10>] ? kthread_create_on_node+0x180/0x180
+[ 1003.342893] [<ffffffff8164e29f>] ret_from_fork+0x3f/0x70
+[ 1003.342894] [<ffffffff8108dd10>] ? kthread_create_on_node+0x180/0x180
+[ 1003.342895] ---[ end trace 65a77e06d5aa9358 ]---
+
+Upon looking at the igb driver, I see that igb_rd32() attempted to read from
+hw_addr and failed, so it set hw->hw_addr to NULL and spit out the message
+in the log output above, "PCIe link lost, device now detached".
+
+Well, now that hw_addr is NULL, the attempt to call pci_iounmap is obviously
+not going to go well. As suggested by Mark Rustad, do something similar to
+what ixgbe does, and save a copy of hw_addr as adapter->io_addr, so we can
+still call pci_iounmap on it on teardown. Additionally, for consistency,
+make the pci_iomap call assignment directly to io_addr, so map and unmap
+match.
+
+Signed-off-by: Jarod Wilson <jarod@redhat.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/igb/igb.h | 2 ++
+ drivers/net/ethernet/intel/igb/igb_main.c | 10 ++++++----
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/intel/igb/igb.h
++++ b/drivers/net/ethernet/intel/igb/igb.h
+@@ -389,6 +389,8 @@ struct igb_adapter {
+ u16 link_speed;
+ u16 link_duplex;
+
++ u8 __iomem *io_addr; /* Mainly for iounmap use */
++
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ bool fc_autoneg;
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2294,9 +2294,11 @@ static int igb_probe(struct pci_dev *pde
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ err = -EIO;
+- hw->hw_addr = pci_iomap(pdev, 0, 0);
+- if (!hw->hw_addr)
++ adapter->io_addr = pci_iomap(pdev, 0, 0);
++ if (!adapter->io_addr)
+ goto err_ioremap;
++ /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
++ hw->hw_addr = adapter->io_addr;
+
+ netdev->netdev_ops = &igb_netdev_ops;
+ igb_set_ethtool_ops(netdev);
+@@ -2656,7 +2658,7 @@ err_sw_init:
+ #ifdef CONFIG_PCI_IOV
+ igb_disable_sriov(pdev);
+ #endif
+- pci_iounmap(pdev, hw->hw_addr);
++ pci_iounmap(pdev, adapter->io_addr);
+ err_ioremap:
+ free_netdev(netdev);
+ err_alloc_etherdev:
+@@ -2823,7 +2825,7 @@ static void igb_remove(struct pci_dev *p
+
+ igb_clear_interrupt_scheme(adapter);
+
+- pci_iounmap(pdev, hw->hw_addr);
++ pci_iounmap(pdev, adapter->io_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev,
--- /dev/null
+From 379a0140a9487b32ae1f3cda40a90def2edcae9b Mon Sep 17 00:00:00 2001
+From: Todd Fujinaka <todd.fujinaka@intel.com>
+Date: Fri, 18 Sep 2015 15:43:51 -0700
+Subject: [PATCH 012/135] igb: use the correct i210 register for EEMNGCTL
+
+[ Upstream commit 08c991297582114a6e1220f913eec91789c4eac6 ]
+
+The i210 has two EEPROM access registers that are located in
+non-standard offsets: EEARBC and EEMNGCTL. EEARBC was fixed previously
+and EEMNGCTL should also be corrected.
+
+Reported-by: Roman Hodek <roman.aud@siemens.com>
+Signed-off-by: Todd Fujinaka <todd.fujinaka@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/igb/e1000_82575.c | 1 +
+ drivers/net/ethernet/intel/igb/e1000_i210.c | 27 +++++++++++++++++++++++++++
+ drivers/net/ethernet/intel/igb/e1000_i210.h | 1 +
+ drivers/net/ethernet/intel/igb/e1000_regs.h | 1 +
+ 4 files changed, 30 insertions(+)
+
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
+@@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(str
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = igb_check_polarity_m88;
++ phy->ops.get_cfg_done = igb_get_cfg_done_i210;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
+@@ -900,3 +900,30 @@ s32 igb_pll_workaround_i210(struct e1000
+ wr32(E1000_MDICNFG, mdicnfg);
+ return ret_val;
+ }
++
++/**
++ * igb_get_cfg_done_i210 - Read config done bit
++ * @hw: pointer to the HW structure
++ *
++ * Read the management control register for the config done bit for
++ * completion status. NOTE: silicon which is EEPROM-less will fail trying
++ * to read the config done bit, so an error is *ONLY* logged and returns
++ * 0. If we were to return with error, EEPROM-less silicon
++ * would not be able to be reset or change link.
++ **/
++s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
++{
++ s32 timeout = PHY_CFG_TIMEOUT;
++ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
++
++ while (timeout) {
++ if (rd32(E1000_EEMNGCTL_I210) & mask)
++ break;
++ usleep_range(1000, 2000);
++ timeout--;
++ }
++ if (!timeout)
++ hw_dbg("MNG configuration cycle has not completed.\n");
++
++ return 0;
++}
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
+@@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw
+ s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+ bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+ s32 igb_pll_workaround_i210(struct e1000_hw *hw);
++s32 igb_get_cfg_done_i210(struct e1000_hw *hw);
+
+ #define E1000_STM_OPCODE 0xDB00
+ #define E1000_EEPROM_FLASH_SIZE_WORD 0x11
+--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
++++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
+@@ -66,6 +66,7 @@
+ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+ #define E1000_PBS 0x01008 /* Packet Buffer Size */
+ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
++#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */
+ #define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
+ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
--- /dev/null
+From 27cef86ee3d8eb95966b1689cb91115bf933de2f Mon Sep 17 00:00:00 2001
+From: Jan Beulich <JBeulich@suse.com>
+Date: Mon, 19 Oct 2015 04:23:29 -0600
+Subject: [PATCH 013/135] igb: fix NULL derefs due to skipped SR-IOV enabling
+
+[ Upstream commit be06998f96ecb93938ad2cce46c4289bf7cf45bc ]
+
+The combined effect of commits 6423fc3416 ("igb: do not re-init SR-IOV
+during probe") and ceee3450b3 ("igb: make sure SR-IOV init uses the
+right number of queues") causes VFs no longer getting set up, leading
+to NULL pointer dereferences due to the adapter's ->vf_data being NULL
+while ->vfs_allocated_count is non-zero. The first commit not only
+neglected the side effect of igb_sriov_reinit() that the second commit
+tried to account for, but also that of setting IGB_FLAG_HAS_MSIX,
+without which igb_enable_sriov() is effectively a no-op. Calling
+igb_{,re}set_interrupt_capability() as done here seems to address this,
+but I'm not sure whether this is better than sinply reverting the other
+two commits.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2858,6 +2858,13 @@ static void igb_probe_vfs(struct igb_ada
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ return;
+
++ /* Of the below we really only want the effect of getting
++ * IGB_FLAG_HAS_MSIX set (if available), without which
++ * igb_enable_sriov() has no effect.
++ */
++ igb_set_interrupt_capability(adapter, true);
++ igb_reset_interrupt_capability(adapter);
++
+ pci_sriov_set_totalvfs(pdev, 7);
+ igb_enable_sriov(pdev, max_vfs);
+
--- /dev/null
+From 280b3ddc982e0cfc9f508d24ad0d465912591c53 Mon Sep 17 00:00:00 2001
+From: Alexander Duyck <aduyck@mirantis.com>
+Date: Tue, 22 Sep 2015 14:35:41 -0700
+Subject: [PATCH 014/135] ixgbe: Fix handling of NAPI budget when multiple
+ queues are enabled per vector
+
+[ Upstream commit 5d6002b7b822c7423e75d4651e6790bfb5642b1b ]
+
+This patch corrects an issue in which the polling routine would increase
+the budget for Rx to at least 1 per queue if multiple queues were present.
+This would result in Rx packets being processed when the budget was 0 which
+is meant to indicate that no Rx can be handled.
+
+Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
+Tested-by: Darin Miller <darin.j.miller@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi,
+ ixgbe_for_each_ring(ring, q_vector->tx)
+ clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+
+- if (!ixgbe_qv_lock_napi(q_vector))
++ /* Exit if we are called by netpoll or busy polling is active */
++ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
+ return budget;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
--- /dev/null
+From 234b1ae373e7d281952a1de697ab308b044d9137 Mon Sep 17 00:00:00 2001
+From: Dmitriy Vyukov <dvyukov@google.com>
+Date: Tue, 8 Sep 2015 10:52:44 +0200
+Subject: [PATCH 015/135] e1000: fix data race between tx_ring->next_to_clean
+
+[ Upstream commit 9eab46b7cb8d0b0dcf014bf7b25e0e72b9e4d929 ]
+
+e1000_clean_tx_irq cleans buffers and sets tx_ring->next_to_clean,
+then e1000_xmit_frame reuses the cleaned buffers. But there are no
+memory barriers when buffers gets recycled, so the recycled buffers
+can be corrupted.
+
+Use smp_store_release to update tx_ring->next_to_clean and
+smp_load_acquire to read tx_ring->next_to_clean to properly
+hand off buffers from e1000_clean_tx_irq to e1000_xmit_frame.
+
+The data race was found with KernelThreadSanitizer (KTSAN).
+
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/e1000/e1000.h | 7 +++++--
+ drivers/net/ethernet/intel/e1000/e1000_main.c | 5 ++++-
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000/e1000.h
++++ b/drivers/net/ethernet/intel/e1000/e1000.h
+@@ -213,8 +213,11 @@ struct e1000_rx_ring {
+ };
+
+ #define E1000_DESC_UNUSED(R) \
+- ((((R)->next_to_clean > (R)->next_to_use) \
+- ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
++({ \
++ unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \
++ unsigned int use = READ_ONCE((R)->next_to_use); \
++ (clean > use ? 0 : (R)->count) + clean - use - 1; \
++})
+
+ #define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -3876,7 +3876,10 @@ static bool e1000_clean_tx_irq(struct e1
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ }
+
+- tx_ring->next_to_clean = i;
++ /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
++ * which will reuse the cleaned buffers.
++ */
++ smp_store_release(&tx_ring->next_to_clean, i);
+
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
--- /dev/null
+From 6f962b9a4211413d3159bb652edba114e5b8758b Mon Sep 17 00:00:00 2001
+From: Dmitry Fleytman <dmitry@daynix.com>
+Date: Tue, 13 Oct 2015 12:48:18 +0300
+Subject: [PATCH 016/135] e1000e: fix division by zero on jumbo MTUs
+
+[ Upstream commit b77ac46bbae862dcb3f51296825c940404c69b0f ]
+
+This patch fixes possible division by zero in receive
+interrupt handler when working without adaptive interrupt
+moderation.
+
+The adaptive interrupt moderation mechanism is typically
+disabled on jumbo MTUs.
+
+Signed-off-by: Dmitry Fleytman <dmitry@daynix.com>
+Signed-off-by: Leonid Bloch <leonid@daynix.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/e1000e/netdev.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -1959,8 +1959,10 @@ static irqreturn_t e1000_intr_msix_rx(in
+ * previous interrupt.
+ */
+ if (rx_ring->set_itr) {
+- writel(1000000000 / (rx_ring->itr_val * 256),
+- rx_ring->itr_register);
++ u32 itr = rx_ring->itr_val ?
++ 1000000000 / (rx_ring->itr_val * 256) : 0;
++
++ writel(itr, rx_ring->itr_register);
+ rx_ring->set_itr = 0;
+ }
+
--- /dev/null
+From a9a06362673e662cf7a194d37388b409a4e1abbc Mon Sep 17 00:00:00 2001
+From: Loc Ho <lho@apm.com>
+Date: Thu, 19 Nov 2015 12:20:30 -0700
+Subject: [PATCH 017/135] clk: xgene: Fix divider with non-zero shift value
+
+[ Upstream commit 1382ea631ddddb634850a3795527db0feeff5aaf ]
+
+The X-Gene clock driver missed the divider shift operation when
+set the divider value.
+
+Signed-off-by: Loc Ho <lho@apm.com>
+Fixes: 308964caeebc ("clk: Add APM X-Gene SoC clock driver")
+Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/clk-xgene.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/clk/clk-xgene.c
++++ b/drivers/clk/clk-xgene.c
+@@ -351,7 +351,8 @@ static int xgene_clk_set_rate(struct clk
+ /* Set new divider */
+ data = xgene_clk_read(pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
+- data &= ~((1 << pclk->param.reg_divider_width) - 1);
++ data &= ~((1 << pclk->param.reg_divider_width) - 1)
++ << pclk->param.reg_divider_shift;
+ data |= divider;
+ xgene_clk_write(data, pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
--- /dev/null
+From 34411f1bdb573a3cb8eb77133dcbfe8f042ae5e7 Mon Sep 17 00:00:00 2001
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Mon, 24 Aug 2015 17:27:24 -0700
+Subject: [PATCH 018/135] fm10k: do not assume VF always has 1 queue
+
+[ Upstream commit 1340181fe435ccb8ca2f996b8680bd9566860619 ]
+
+It is possible that the PF has not yet assigned resources to the VF.
+Although rare, this could result in the VF attempting to read queues it
+does not own and result in FUM or THI faults in the PF. To prevent this,
+check queue 0 before we continue in init_hw_vf.
+
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Krishneil Singh <Krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k_type.h | 1 +
+ drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 7 ++++++-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+@@ -77,6 +77,7 @@ struct fm10k_hw;
+ #define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
+
+ #define FM10K_ERR_PARAM -2
++#define FM10K_ERR_NO_RESOURCES -3
+ #define FM10K_ERR_REQUESTS_PENDING -4
+ #define FM10K_ERR_RESET_REQUESTED -5
+ #define FM10K_ERR_DMA_PENDING -6
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+@@ -103,7 +103,12 @@ static s32 fm10k_init_hw_vf(struct fm10k
+ s32 err;
+ u16 i;
+
+- /* assume we always have at least 1 queue */
++ /* verify we have at least 1 queue */
++ if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) ||
++ !~fm10k_read_reg(hw, FM10K_RXQCTL(0)))
++ return FM10K_ERR_NO_RESOURCES;
++
++ /* determine how many queues we have */
+ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
+ /* verify the Descriptor cache offsets are increasing */
+ tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i));
--- /dev/null
+From b518807ec09948b82cdc9ac806f6ec7c4b8704ff Mon Sep 17 00:00:00 2001
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Tue, 25 Aug 2015 13:49:11 -0700
+Subject: [PATCH 019/135] fm10k: Correct MTU for jumbo frames
+
+[ Upstream commit 8c7ee6d2cacc7794a91875ef5fd8284b4a900d8c ]
+
+Based on hardware testing, the host interface supports up to 15368 bytes
+as the maximum frame size. To determine the correct MTU, we subtract 8
+for the internal switch tag, 14 for the L2 header, and 4 for the
+appended FCS header, resulting in 15342 bytes of payload for our maximum
+MTU on jumbo frames.
+
+Signed-off-by: Matthew Vick <matthew.vick@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Acked-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
++++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
+@@ -33,7 +33,7 @@
+ #include "fm10k_pf.h"
+ #include "fm10k_vf.h"
+
+-#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
++#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */
+
+ #define MAX_QUEUES FM10K_MAX_QUEUES_PF
+
--- /dev/null
+From d2cefd7c700cd39e4af907a991a440e0acca3c4b Mon Sep 17 00:00:00 2001
+From: Alexander Duyck <aduyck@mirantis.com>
+Date: Tue, 22 Sep 2015 14:35:35 -0700
+Subject: [PATCH 020/135] fm10k: Fix handling of NAPI budget when multiple
+ queues are enabled per vector
+
+[ Upstream commit 9f872986479b6e0543eb5c615e5f9491bb04e5c1 ]
+
+This patch corrects an issue in which the polling routine would increase
+the budget for Rx to at least 1 per queue if multiple queues were present.
+This would result in Rx packets being processed when the budget was 0 which
+is meant to indicate that no Rx can be handled.
+
+Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
+Tested-by: Krishneil Singh <Krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k_main.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -1428,6 +1428,10 @@ static int fm10k_poll(struct napi_struct
+ fm10k_for_each_ring(ring, q_vector->tx)
+ clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+
++ /* Handle case where we are called by netpoll with a budget of 0 */
++ if (budget <= 0)
++ return budget;
++
+ /* attempt to distribute budget to each queue fairly, but don't
+ * allow the budget to go below 1 because we'll exit polling
+ */
--- /dev/null
+From 15680d790bfccab097d9d7c1032d2bcd41bc8649 Mon Sep 17 00:00:00 2001
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Fri, 16 Oct 2015 10:56:57 -0700
+Subject: [PATCH 021/135] fm10k: reset max_queues on init_hw_vf failure
+
+[ Upstream commit 0e8d5b5975401c83641efd5d4595e6cdbe9e9e2f ]
+
+VF drivers must detect how many queues are available. Previously, the
+driver assumed that each VF has at minimum 1 queue. This assumption is
+incorrect, since it is possible that the PF has not yet assigned the
+queues to the VF by the time the VF checks. To resolve this, we added a
+check first to ensure that the first queue is infact owned by the VF at
+init_hw_vf time. However, the code flow did not reset hw->mac.max_queues
+to 0. In some cases, such as during reinit flows, we call init_hw_vf
+without clearing the previous value of hw->mac.max_queues. Due to this,
+when init_hw_vf errors out, if its error code is not properly handled
+the VF driver may still believe it has queues which no longer belong to
+it. Fix this by clearing the hw->mac.max_queues on exit due to errors.
+
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Krishneil Singh <Krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+@@ -105,8 +105,10 @@ static s32 fm10k_init_hw_vf(struct fm10k
+
+ /* verify we have at least 1 queue */
+ if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) ||
+- !~fm10k_read_reg(hw, FM10K_RXQCTL(0)))
+- return FM10K_ERR_NO_RESOURCES;
++ !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) {
++ err = FM10K_ERR_NO_RESOURCES;
++ goto reset_max_queues;
++ }
+
+ /* determine how many queues we have */
+ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
+@@ -124,7 +126,7 @@ static s32 fm10k_init_hw_vf(struct fm10k
+ /* shut down queues we own and reset DMA configuration */
+ err = fm10k_disable_queues_generic(hw, i);
+ if (err)
+- return err;
++ goto reset_max_queues;
+
+ /* record maximum queue count */
+ hw->mac.max_queues = i;
+@@ -134,6 +136,11 @@ static s32 fm10k_init_hw_vf(struct fm10k
+ FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+
+ return 0;
++
++reset_max_queues:
++ hw->mac.max_queues = 0;
++
++ return err;
+ }
+
+ /* This structure defines the attibutes to be parsed below */
--- /dev/null
+From 0f22f6d050b281f96bdc3e6664530ded1e7e4dea Mon Sep 17 00:00:00 2001
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Fri, 16 Oct 2015 10:56:58 -0700
+Subject: [PATCH 022/135] fm10k: always check init_hw for errors
+
+[ Upstream commit 1343c65f70ee1b1f968a08b30e1836a4e37116cd ]
+
+A recent change modified init_hw in some flows the function may fail on
+VF devices. For example, if a VF doesn't yet own its own queues.
+However, many callers of init_hw didn't bother to check the error code.
+Other callers checked but only displayed diagnostic messages without
+actually handling the consequences.
+
+Fix this by (a) always returning and preventing the netdevice from going
+up, and (b) printing the diagnostic in every flow for consistency. This
+should resolve an issue where VF drivers would attempt to come up
+before the PF has finished assigning queues.
+
+In addition, change the dmesg output to explicitly show the actual
+function that failed, instead of combining reset_hw and init_hw into a
+single check, to help for future debugging.
+
+Fixes: 1d568b0f6424 ("fm10k: do not assume VF always has 1 queue")
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Krishneil Singh <Krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 34 +++++++++++++++++++++++----
+ 1 file changed, 29 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+@@ -163,9 +163,17 @@ static void fm10k_reinit(struct fm10k_in
+ interface->last_reset = jiffies + (10 * HZ);
+
+ /* reset and initialize the hardware so it is in a known state */
+- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
+- if (err)
++ err = hw->mac.ops.reset_hw(hw);
++ if (err) {
++ dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
++ goto reinit_err;
++ }
++
++ err = hw->mac.ops.init_hw(hw);
++ if (err) {
+ dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
++ goto reinit_err;
++ }
+
+ /* reassociate interrupts */
+ fm10k_mbx_request_irq(interface);
+@@ -193,6 +201,10 @@ static void fm10k_reinit(struct fm10k_in
+
+ fm10k_iov_resume(interface->pdev);
+
++reinit_err:
++ if (err)
++ netif_device_detach(netdev);
++
+ rtnl_unlock();
+
+ clear_bit(__FM10K_RESETTING, &interface->state);
+@@ -1684,7 +1696,13 @@ static int fm10k_sw_init(struct fm10k_in
+ interface->last_reset = jiffies + (10 * HZ);
+
+ /* reset and initialize the hardware so it is in a known state */
+- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
++ err = hw->mac.ops.reset_hw(hw);
++ if (err) {
++ dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
++ return err;
++ }
++
++ err = hw->mac.ops.init_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
+ return err;
+@@ -2071,8 +2089,10 @@ static int fm10k_resume(struct pci_dev *
+
+ /* reset hardware to known state */
+ err = hw->mac.ops.init_hw(&interface->hw);
+- if (err)
++ if (err) {
++ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
+ return err;
++ }
+
+ /* reset statistics starting values */
+ hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+@@ -2248,7 +2268,11 @@ static void fm10k_io_resume(struct pci_d
+ int err = 0;
+
+ /* reset hardware to known state */
+- hw->mac.ops.init_hw(&interface->hw);
++ err = hw->mac.ops.init_hw(&interface->hw);
++ if (err) {
++ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
++ return;
++ }
+
+ /* reset statistics starting values */
+ hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
--- /dev/null
+From 5c571b333ac3ff510b59a843675b450e72dc8543 Mon Sep 17 00:00:00 2001
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Fri, 16 Oct 2015 10:56:59 -0700
+Subject: [PATCH 023/135] fm10k: reinitialize queuing scheme after calling
+ init_hw
+
+[ Upstream commit 875328e4bce696e85edcda3c4b0ec80fd525e3a3 ]
+
+The init_hw function may fail, and in the case of VFs, it might change
+the number of maximum queues available. Thus, for every flow which
+checks init_hw, we need to ensure that we clear the queue scheme before,
+and initialize it after. The fm10k_io_slot_reset path will end up
+triggering a reset so fm10k_reinit needs this change. The
+fm10k_io_error_detected and fm10k_io_resume also need to properly clear
+and reinitialize the queue scheme.
+
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Krishneil Singh <Krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+@@ -159,6 +159,9 @@ static void fm10k_reinit(struct fm10k_in
+
+ fm10k_mbx_free_irq(interface);
+
++ /* free interrupts */
++ fm10k_clear_queueing_scheme(interface);
++
+ /* delay any future reset requests */
+ interface->last_reset = jiffies + (10 * HZ);
+
+@@ -175,6 +178,12 @@ static void fm10k_reinit(struct fm10k_in
+ goto reinit_err;
+ }
+
++ err = fm10k_init_queueing_scheme(interface);
++ if (err) {
++ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
++ goto reinit_err;
++ }
++
+ /* reassociate interrupts */
+ fm10k_mbx_request_irq(interface);
+
+@@ -2205,6 +2214,9 @@ static pci_ers_result_t fm10k_io_error_d
+ if (netif_running(netdev))
+ fm10k_close(netdev);
+
++ /* free interrupts */
++ fm10k_clear_queueing_scheme(interface);
++
+ fm10k_mbx_free_irq(interface);
+
+ pci_disable_device(pdev);
+@@ -2277,6 +2289,12 @@ static void fm10k_io_resume(struct pci_d
+ /* reset statistics starting values */
+ hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+
++ err = fm10k_init_queueing_scheme(interface);
++ if (err) {
++ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
++ return;
++ }
++
+ /* reassociate interrupts */
+ fm10k_mbx_request_irq(interface);
+
--- /dev/null
+From bb8414d058ba1c5a49749428f167cd8a28a9a879 Mon Sep 17 00:00:00 2001
+From: Alexander Duyck <aduyck@mirantis.com>
+Date: Tue, 27 Oct 2015 16:59:12 -0700
+Subject: [PATCH 024/135] fm10k: Cleanup MSI-X interrupts in case of failure
+
+[ Upstream commit 587731e684dcf3522215194a02357d26b9bc7277 ]
+
+If the q_vector allocation fails we should free the resources associated
+with the MSI-X vector table.
+
+Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
+Reviewed-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Krishneil Singh <Krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k_main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -1970,8 +1970,10 @@ int fm10k_init_queueing_scheme(struct fm
+
+ /* Allocate memory for queues */
+ err = fm10k_alloc_q_vectors(interface);
+- if (err)
++ if (err) {
++ fm10k_reset_msix_capability(interface);
+ return err;
++ }
+
+ /* Map rings to devices, and map devices to physical queues */
+ fm10k_assign_rings(interface);
--- /dev/null
+From c24ba4c3209f024567e1835cac517c92d7e528eb Mon Sep 17 00:00:00 2001
+From: Alexander Duyck <aduyck@mirantis.com>
+Date: Tue, 27 Oct 2015 16:59:18 -0700
+Subject: [PATCH 025/135] fm10k: Cleanup exception handling for mailbox
+ interrupt
+
+[ Upstream commit e00e23bceba48a8f0c94fefe26948404cbd43d0a ]
+
+This patch addresses two issues.
+
+First is the fact that the fm10k_mbx_free_irq was assuming msix_entries was
+valid and that will not always be the case. As such we need to add a check
+for if it is NULL.
+
+Second is the fact that we weren't freeing the IRQ if the mailbox API
+returned an error on trying to connect.
+
+Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
+Reviewed-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Krishneil Singh <Krishneil.k.singh@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+@@ -1122,6 +1122,10 @@ void fm10k_mbx_free_irq(struct fm10k_int
+ struct fm10k_hw *hw = &interface->hw;
+ int itr_reg;
+
++ /* no mailbox IRQ to free if MSI-X is not enabled */
++ if (!interface->msix_entries)
++ return;
++
+ /* disconnect the mailbox */
+ hw->mbx.ops.disconnect(hw, &hw->mbx);
+
+@@ -1444,10 +1448,15 @@ int fm10k_mbx_request_irq(struct fm10k_i
+ err = fm10k_mbx_request_irq_pf(interface);
+ else
+ err = fm10k_mbx_request_irq_vf(interface);
++ if (err)
++ return err;
+
+ /* connect mailbox */
+- if (!err)
+- err = hw->mbx.ops.connect(hw, &hw->mbx);
++ err = hw->mbx.ops.connect(hw, &hw->mbx);
++
++ /* if the mailbox failed to connect, then free IRQ */
++ if (err)
++ fm10k_mbx_free_irq(interface);
+
+ return err;
+ }
--- /dev/null
+From c4eae6df9efb902a57ef0282ba6012d05ad53979 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 2 Dec 2015 17:26:28 -0600
+Subject: [PATCH 026/135] cxlflash: a couple off by one bugs
+
+[ Upstream commit e37390bee6fe7dfbe507a9d50cdc11344b53fa08 ]
+
+The "> MAX_CONTEXT" should be ">= MAX_CONTEXT". Otherwise we go one
+step beyond the end of the cfg->ctx_tbl[] array.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Manoj Kumar <manoj@linux.vnet.ibm.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/cxlflash/superpipe.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/cxlflash/superpipe.c
++++ b/drivers/scsi/cxlflash/superpipe.c
+@@ -1380,7 +1380,7 @@ static int cxlflash_disk_attach(struct s
+ }
+
+ ctxid = cxl_process_element(ctx);
+- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
++ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
+ dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+ rc = -EPERM;
+ goto err2;
+@@ -1508,7 +1508,7 @@ static int recover_context(struct cxlfla
+ }
+
+ ctxid = cxl_process_element(ctx);
+- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
++ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
+ dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+ rc = -EPERM;
+ goto err1;
--- /dev/null
+From 8419ef00ea07eaaca57b21fae9f10b79a70fe7b4 Mon Sep 17 00:00:00 2001
+From: Wenwei Tao <ww.tao0320@gmail.com>
+Date: Tue, 12 Jan 2016 07:49:15 +0100
+Subject: [PATCH 027/135] lightnvm: fix bio submission issue
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 3cd485b1f8e25a6534eb4c542e7eba1b944fbaaf ]
+
+Put bio when submission fails, since we get it
+before submission. And return error when backend
+device driver doesn't provide a submit_io method,
+thus we can end IO properly.
+
+Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
+Signed-off-by: Matias Bjørling <m@bjorling.me>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/lightnvm/gennvm.c | 2 +-
+ drivers/lightnvm/rrpc.c | 4 +++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/lightnvm/gennvm.c
++++ b/drivers/lightnvm/gennvm.c
+@@ -345,7 +345,7 @@ static void gennvm_generic_to_addr_mode(
+ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+ {
+ if (!dev->ops->submit_io)
+- return 0;
++ return -ENODEV;
+
+ /* Convert address space */
+ gennvm_generic_to_addr_mode(dev, rqd);
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -650,11 +650,12 @@ static int rrpc_end_io(struct nvm_rq *rq
+ if (bio_data_dir(rqd->bio) == WRITE)
+ rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+
++ bio_put(rqd->bio);
++
+ if (rrqd->flags & NVM_IOTYPE_GC)
+ return 0;
+
+ rrpc_unlock_rq(rrpc, rqd);
+- bio_put(rqd->bio);
+
+ if (npages > 1)
+ nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+@@ -841,6 +842,7 @@ static int rrpc_submit_io(struct rrpc *r
+ err = nvm_submit_io(rrpc->dev, rqd);
+ if (err) {
+ pr_err("rrpc: I/O submission failed: %d\n", err);
++ bio_put(bio);
+ return NVM_IO_ERR;
+ }
+
--- /dev/null
+From 84f44c75c7e0dc3903563e33428cb23970e91ce7 Mon Sep 17 00:00:00 2001
+From: Chao Yu <chao2.yu@samsung.com>
+Date: Tue, 12 Jan 2016 07:49:16 +0100
+Subject: [PATCH 028/135] lightnvm: fix incorrect nr_free_blocks stat
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit bdded1552085b12d23c9be76147d2e96647a098f ]
+
+When initing bad block list in gennvm_block_bb, once we move bad block
+from free_list to bb_list, we should maintain both stat info
+nr_free_blocks and nr_bad_blocks. So this patch fixes to add missing
+operation related to nr_free_blocks.
+
+Signed-off-by: Chao Yu <chao2.yu@samsung.com>
+Signed-off-by: Matias Bjørling <m@bjorling.me>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/lightnvm/gennvm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/lightnvm/gennvm.c
++++ b/drivers/lightnvm/gennvm.c
+@@ -89,6 +89,7 @@ static int gennvm_block_bb(struct ppa_ad
+
+ list_move_tail(&blk->list, &lun->bb_list);
+ lun->vlun.nr_bad_blocks++;
++ lun->vlun.nr_free_blocks--;
+ }
+
+ return 0;
--- /dev/null
+From 47576e733b8fb27b0631e40543bb85ec1eb40230 Mon Sep 17 00:00:00 2001
+From: Javier Gonzalez <javier@javigon.com>
+Date: Tue, 12 Jan 2016 07:49:17 +0100
+Subject: [PATCH 029/135] lightnvm: add check after mempool allocation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 3bfbc6adbc5031e8a5907baa5beb27b41637742a ]
+
+The mempool allocation might fail. Make sure to return error when it
+does, instead of causing a kernel panic.
+
+Signed-off-by: Javier Gonzalez <javier@cnexlabs.com>
+Signed-off-by: Matias Bjørling <m@bjorling.me>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/lightnvm/rrpc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -287,6 +287,8 @@ static int rrpc_move_valid_pages(struct
+ }
+
+ page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
++ if (!page)
++ return -ENOMEM;
+
+ while ((slot = find_first_zero_bit(rblk->invalid_pages,
+ nr_pgs_per_blk)) < nr_pgs_per_blk) {
--- /dev/null
+From 479a951226470515dcfd473205d1c5ee18e79c01 Mon Sep 17 00:00:00 2001
+From: Wenwei Tao <ww.tao0320@gmail.com>
+Date: Tue, 12 Jan 2016 07:49:18 +0100
+Subject: [PATCH 030/135] lightnvm: unlock rq and free ppa_list on submission
+ fail
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit c27278bddd75a3ee755c8e83c6bcc3fdd7271ef6 ]
+
+When rrpc_write_ppalist_rq and rrpc_read_ppalist_rq succeed, we setup
+rq correctly, but nvm_submit_io may afterward fail since it cannot
+allocate request or nvme_nvm_command, we return error but forget to
+cleanup the previous work.
+
+Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
+Signed-off-by: Matias Bjørling <m@bjorling.me>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/lightnvm/rrpc.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -845,6 +845,12 @@ static int rrpc_submit_io(struct rrpc *r
+ if (err) {
+ pr_err("rrpc: I/O submission failed: %d\n", err);
+ bio_put(bio);
++ if (!(flags & NVM_IOTYPE_GC)) {
++ rrpc_unlock_rq(rrpc, rqd);
++ if (rqd->nr_pages > 1)
++ nvm_dev_dma_free(rrpc->dev,
++ rqd->ppa_list, rqd->dma_ppa_list);
++ }
+ return NVM_IO_ERR;
+ }
+
--- /dev/null
+From b81c684d48fd1b1a620f39a1b61bc1681525ce45 Mon Sep 17 00:00:00 2001
+From: Wenwei Tao <ww.tao0320@gmail.com>
+Date: Tue, 12 Jan 2016 07:49:25 +0100
+Subject: [PATCH 031/135] lightnvm: fix locking and mempool in rrpc_lun_gc
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit b262924be03d5d2ae735bc9a4b37eb2c613f61f8 ]
+
+This patch fix two issues in rrpc_lun_gc
+
+1. prio_list is protected by rrpc_lun's lock not nvm_lun's, so
+acquire rlun's lock instead of lun's before operate on the list.
+
+2. we delete block from prio_list before allocating gcb, but gcb
+allocation may fail, we end without putting it back to the list,
+this makes the block won't get reclaimed in the future. To solve
+this issue, delete block after gcb allocation.
+
+Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
+Signed-off-by: Matias Bjørling <m@bjorling.me>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/lightnvm/rrpc.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -429,7 +429,7 @@ static void rrpc_lun_gc(struct work_stru
+ if (nr_blocks_need < rrpc->nr_luns)
+ nr_blocks_need = rrpc->nr_luns;
+
+- spin_lock(&lun->lock);
++ spin_lock(&rlun->lock);
+ while (nr_blocks_need > lun->nr_free_blocks &&
+ !list_empty(&rlun->prio_list)) {
+ struct rrpc_block *rblock = block_prio_find_max(rlun);
+@@ -438,16 +438,16 @@ static void rrpc_lun_gc(struct work_stru
+ if (!rblock->nr_invalid_pages)
+ break;
+
++ gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
++ if (!gcb)
++ break;
++
+ list_del_init(&rblock->prio);
+
+ BUG_ON(!block_is_full(rrpc, rblock));
+
+ pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+
+- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
+- if (!gcb)
+- break;
+-
+ gcb->rrpc = rrpc;
+ gcb->rblk = rblock;
+ INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
+@@ -456,7 +456,7 @@ static void rrpc_lun_gc(struct work_stru
+
+ nr_blocks_need--;
+ }
+- spin_unlock(&lun->lock);
++ spin_unlock(&rlun->lock);
+
+ /* TODO: Hint that request queue can be started again */
+ }
--- /dev/null
+From d946814b5fd3f948d98dc39034a486af65f8bb55 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= <m@bjorling.me>
+Date: Tue, 12 Jan 2016 07:49:32 +0100
+Subject: [PATCH 032/135] lightnvm: fix missing grown bad block type
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit b5d4acd4cbf5029a2616084d9e9f392046d53a37 ]
+
+The get/set bad block interface defines good block, factory bad block,
+grown bad block, device reserved block, and host reserved block.
+Unfortunately the grown bad block was missing, leaving the offsets wrong
+for device and host side reserved blocks.
+
+This patch adds the missing type and corrects the offsets.
+
+Signed-off-by: Matias Bjørling <m@bjorling.me>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/lightnvm.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/include/linux/lightnvm.h
++++ b/include/linux/lightnvm.h
+@@ -58,8 +58,9 @@ enum {
+ /* Block Types */
+ NVM_BLK_T_FREE = 0x0,
+ NVM_BLK_T_BAD = 0x1,
+- NVM_BLK_T_DEV = 0x2,
+- NVM_BLK_T_HOST = 0x4,
++ NVM_BLK_T_GRWN_BAD = 0x2,
++ NVM_BLK_T_DEV = 0x4,
++ NVM_BLK_T_HOST = 0x8,
+ };
+
+ struct nvm_id_group {
--- /dev/null
+From 949e31ee7225602105e4129b9a093d3fc546f701 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Thu, 3 Dec 2015 09:52:05 -0700
+Subject: [PATCH 033/135] NVMe: fix build with CONFIG_NVM enabled
+
+[ Upstream commit ac02dddec63385ffef1397d3f56cec4108bcafe9 ]
+
+Looks like I didn't test with CONFIG_NVM enabled, and neither did
+the build bot.
+
+Most of this is really weird crazy shit in the lighnvm support, though.
+
+Struct nvme_ns is a structure for the NVM I/O command set, and it has
+no business poking into it. Second this commit:
+
+commit 47b3115ae7b799be8b77b0f024215ad4f68d6460
+Author: Wenwei Tao <ww.tao0320@gmail.com>
+Date: Fri Nov 20 13:47:55 2015 +0100
+
+ nvme: lightnvm: use admin queues for admin cmds
+
+Does even more crazy stuff. If a function gets a request_queue parameter
+passed it'd better use that and not look for another one.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/lightnvm.c | 35 ++++++++++++++++-------------------
+ 1 file changed, 16 insertions(+), 19 deletions(-)
+
+--- a/drivers/nvme/host/lightnvm.c
++++ b/drivers/nvme/host/lightnvm.c
+@@ -274,7 +274,6 @@ static int init_grps(struct nvm_id *nvm_
+ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
+ {
+ struct nvme_ns *ns = nvmdev->q->queuedata;
+- struct nvme_dev *dev = ns->dev;
+ struct nvme_nvm_id *nvme_nvm_id;
+ struct nvme_nvm_command c = {};
+ int ret;
+@@ -287,7 +286,7 @@ static int nvme_nvm_identity(struct nvm_
+ if (!nvme_nvm_id)
+ return -ENOMEM;
+
+- ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
++ ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
+ nvme_nvm_id, sizeof(struct nvme_nvm_id));
+ if (ret) {
+ ret = -EIO;
+@@ -312,9 +311,8 @@ static int nvme_nvm_get_l2p_tbl(struct n
+ nvm_l2p_update_fn *update_l2p, void *priv)
+ {
+ struct nvme_ns *ns = nvmdev->q->queuedata;
+- struct nvme_dev *dev = ns->dev;
+ struct nvme_nvm_command c = {};
+- u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
++ u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
+ u32 nlb_pr_rq = len / sizeof(u64);
+ u64 cmd_slba = slba;
+ void *entries;
+@@ -332,10 +330,10 @@ static int nvme_nvm_get_l2p_tbl(struct n
+ c.l2p.slba = cpu_to_le64(cmd_slba);
+ c.l2p.nlb = cpu_to_le32(cmd_nlb);
+
+- ret = nvme_submit_sync_cmd(dev->admin_q,
++ ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
+ (struct nvme_command *)&c, entries, len);
+ if (ret) {
+- dev_err(dev->dev, "L2P table transfer failed (%d)\n",
++ dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
+ ret);
+ ret = -EIO;
+ goto out;
+@@ -361,7 +359,7 @@ static int nvme_nvm_get_bb_tbl(struct nv
+ {
+ struct request_queue *q = nvmdev->q;
+ struct nvme_ns *ns = q->queuedata;
+- struct nvme_dev *dev = ns->dev;
++ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct nvme_nvm_command c = {};
+ struct nvme_nvm_bb_tbl *bb_tbl;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
+@@ -375,30 +373,30 @@ static int nvme_nvm_get_bb_tbl(struct nv
+ if (!bb_tbl)
+ return -ENOMEM;
+
+- ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
++ ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
+ bb_tbl, tblsz);
+ if (ret) {
+- dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
++ dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
+ bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
+- dev_err(dev->dev, "bbt format mismatch\n");
++ dev_err(ctrl->dev, "bbt format mismatch\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (le16_to_cpu(bb_tbl->verid) != 1) {
+ ret = -EINVAL;
+- dev_err(dev->dev, "bbt version not supported\n");
++ dev_err(ctrl->dev, "bbt version not supported\n");
+ goto out;
+ }
+
+ if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
+ ret = -EINVAL;
+- dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
++ dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
+ le32_to_cpu(bb_tbl->tblks), nr_blocks);
+ goto out;
+ }
+@@ -419,7 +417,6 @@ static int nvme_nvm_set_bb_tbl(struct nv
+ int type)
+ {
+ struct nvme_ns *ns = nvmdev->q->queuedata;
+- struct nvme_dev *dev = ns->dev;
+ struct nvme_nvm_command c = {};
+ int ret = 0;
+
+@@ -429,10 +426,10 @@ static int nvme_nvm_set_bb_tbl(struct nv
+ c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
+ c.set_bb.value = type;
+
+- ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
++ ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
+ NULL, 0);
+ if (ret)
+- dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
++ dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
+ return ret;
+ }
+
+@@ -520,9 +517,8 @@ static int nvme_nvm_erase_block(struct n
+ static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
+ {
+ struct nvme_ns *ns = nvmdev->q->queuedata;
+- struct nvme_dev *dev = ns->dev;
+
+- return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
++ return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
+ }
+
+ static void nvme_nvm_destroy_dma_pool(void *pool)
+@@ -580,8 +576,9 @@ void nvme_nvm_unregister(struct request_
+
+ int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
+ {
+- struct nvme_dev *dev = ns->dev;
+- struct pci_dev *pdev = to_pci_dev(dev->dev);
++ struct nvme_ctrl *ctrl = ns->ctrl;
++ /* XXX: this is poking into PCI structures from generic code! */
++ struct pci_dev *pdev = to_pci_dev(ctrl->dev);
+
+ /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
+ if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
--- /dev/null
+From bb7fbb0e302daa3a908d24115be3d76f27b58340 Mon Sep 17 00:00:00 2001
+From: "K. Y. Srinivasan" <kys@microsoft.com>
+Date: Mon, 14 Dec 2015 16:01:32 -0800
+Subject: [PATCH 034/135] Drivers: hv: util: Increase the timeout for util
+ services
+
+[ Upstream commit c0b200cfb0403740171c7527b3ac71d03f82947a ]
+
+Util services such as KVP and FCOPY need assistance from daemon's running
+in user space. Increase the timeout so we don't prematurely terminate
+the transaction in the kernel. Host sets up a 60 second timeout for
+all util driver transactions. The host will retry the transaction if it
+times out. Set the guest timeout at 30 seconds.
+
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/hv_fcopy.c | 3 ++-
+ drivers/hv/hv_kvp.c | 3 ++-
+ drivers/hv/hyperv_vmbus.h | 5 +++++
+ 3 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/hv/hv_fcopy.c
++++ b/drivers/hv/hv_fcopy.c
+@@ -275,7 +275,8 @@ void hv_fcopy_onchannelcallback(void *co
+ * Send the information to the user-level daemon.
+ */
+ schedule_work(&fcopy_send_work);
+- schedule_delayed_work(&fcopy_timeout_work, 5*HZ);
++ schedule_delayed_work(&fcopy_timeout_work,
++ HV_UTIL_TIMEOUT * HZ);
+ return;
+ }
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -668,7 +668,8 @@ void hv_kvp_onchannelcallback(void *cont
+ * user-mode not responding.
+ */
+ schedule_work(&kvp_sendkey_work);
+- schedule_delayed_work(&kvp_timeout_work, 5*HZ);
++ schedule_delayed_work(&kvp_timeout_work,
++ HV_UTIL_TIMEOUT * HZ);
+
+ return;
+
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -31,6 +31,11 @@
+ #include <linux/hyperv.h>
+
+ /*
++ * Timeout for services such as KVP and fcopy.
++ */
++#define HV_UTIL_TIMEOUT 30
++
++/*
+ * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
+ * is set by CPUID(HVCPUID_VERSION_FEATURES).
+ */
--- /dev/null
+From 8670bd6f526d0953e614d6d3bb1652b4a26587c0 Mon Sep 17 00:00:00 2001
+From: Olaf Hering <olaf@aepfle.de>
+Date: Mon, 14 Dec 2015 16:01:33 -0800
+Subject: [PATCH 035/135] Drivers: hv: utils: run polling callback always in
+ interrupt context
+
+[ Upstream commit 3cace4a616108539e2730f8dc21a636474395e0f ]
+
+All channel interrupts are bound to specific VCPUs in the guest
+at the point channel is created. While currently, we invoke the
+polling function on the correct CPU (the CPU to which the channel
+is bound to) in some cases we may run the polling function in
+a non-interrupt context. This potentially can cause an issue as the
+polling function can be interrupted by the channel callback function.
+Fix the issue by running the polling function on the appropriate CPU
+at interrupt level. Additional details of the issue being addressed by
+this patch are given below:
+
+Currently hv_fcopy_onchannelcallback is called from interrupts and also
+via the ->write function of hv_utils. Since the used global variables to
+maintain state are not thread safe the state can get out of sync.
+This affects the variable state as well as the channel inbound buffer.
+
+As suggested by KY adjust hv_poll_channel to always run the given
+callback on the cpu which the channel is bound to. This avoids the need
+for locking because all the util services are single threaded and only
+one transaction is active at any given point in time.
+
+Additionally, remove the context variable, they will always be the same as
+recv_channel.
+
+Signed-off-by: Olaf Hering <olaf@aepfle.de>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/hv_fcopy.c | 34 ++++++++++++----------------------
+ drivers/hv/hv_kvp.c | 28 ++++++++++------------------
+ drivers/hv/hv_snapshot.c | 29 +++++++++++------------------
+ drivers/hv/hyperv_vmbus.h | 6 +-----
+ 4 files changed, 34 insertions(+), 63 deletions(-)
+
+--- a/drivers/hv/hv_fcopy.c
++++ b/drivers/hv/hv_fcopy.c
+@@ -51,7 +51,6 @@ static struct {
+ struct hv_fcopy_hdr *fcopy_msg; /* current message */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+- void *fcopy_context; /* for the channel callback */
+ } fcopy_transaction;
+
+ static void fcopy_respond_to_host(int error);
+@@ -67,6 +66,13 @@ static struct hvutil_transport *hvt;
+ */
+ static int dm_reg_value;
+
++static void fcopy_poll_wrapper(void *channel)
++{
++ /* Transaction is finished, reset the state here to avoid races. */
++ fcopy_transaction.state = HVUTIL_READY;
++ hv_fcopy_onchannelcallback(channel);
++}
++
+ static void fcopy_timeout_func(struct work_struct *dummy)
+ {
+ /*
+@@ -74,13 +80,7 @@ static void fcopy_timeout_func(struct wo
+ * process the pending transaction.
+ */
+ fcopy_respond_to_host(HV_E_FAIL);
+-
+- /* Transaction is finished, reset the state. */
+- if (fcopy_transaction.state > HVUTIL_READY)
+- fcopy_transaction.state = HVUTIL_READY;
+-
+- hv_poll_channel(fcopy_transaction.fcopy_context,
+- hv_fcopy_onchannelcallback);
++ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+ }
+
+ static int fcopy_handle_handshake(u32 version)
+@@ -108,9 +108,7 @@ static int fcopy_handle_handshake(u32 ve
+ return -EINVAL;
+ }
+ pr_debug("FCP: userspace daemon ver. %d registered\n", version);
+- fcopy_transaction.state = HVUTIL_READY;
+- hv_poll_channel(fcopy_transaction.fcopy_context,
+- hv_fcopy_onchannelcallback);
++ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+ return 0;
+ }
+
+@@ -227,15 +225,8 @@ void hv_fcopy_onchannelcallback(void *co
+ int util_fw_version;
+ int fcopy_srv_version;
+
+- if (fcopy_transaction.state > HVUTIL_READY) {
+- /*
+- * We will defer processing this callback once
+- * the current transaction is complete.
+- */
+- fcopy_transaction.fcopy_context = context;
++ if (fcopy_transaction.state > HVUTIL_READY)
+ return;
+- }
+- fcopy_transaction.fcopy_context = NULL;
+
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ &requestid);
+@@ -305,9 +296,8 @@ static int fcopy_on_msg(void *msg, int l
+ if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
+ fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
+ fcopy_respond_to_host(*val);
+- fcopy_transaction.state = HVUTIL_READY;
+- hv_poll_channel(fcopy_transaction.fcopy_context,
+- hv_fcopy_onchannelcallback);
++ hv_poll_channel(fcopy_transaction.recv_channel,
++ fcopy_poll_wrapper);
+ }
+
+ return 0;
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -66,7 +66,6 @@ static struct {
+ struct hv_kvp_msg *kvp_msg; /* current message */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+- void *kvp_context; /* for the channel callback */
+ } kvp_transaction;
+
+ /*
+@@ -94,6 +93,13 @@ static struct hvutil_transport *hvt;
+ */
+ #define HV_DRV_VERSION "3.1"
+
++static void kvp_poll_wrapper(void *channel)
++{
++ /* Transaction is finished, reset the state here to avoid races. */
++ kvp_transaction.state = HVUTIL_READY;
++ hv_kvp_onchannelcallback(channel);
++}
++
+ static void
+ kvp_register(int reg_value)
+ {
+@@ -121,12 +127,7 @@ static void kvp_timeout_func(struct work
+ */
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+
+- /* Transaction is finished, reset the state. */
+- if (kvp_transaction.state > HVUTIL_READY)
+- kvp_transaction.state = HVUTIL_READY;
+-
+- hv_poll_channel(kvp_transaction.kvp_context,
+- hv_kvp_onchannelcallback);
++ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+ }
+
+ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+@@ -218,9 +219,7 @@ static int kvp_on_msg(void *msg, int len
+ */
+ if (cancel_delayed_work_sync(&kvp_timeout_work)) {
+ kvp_respond_to_host(message, error);
+- kvp_transaction.state = HVUTIL_READY;
+- hv_poll_channel(kvp_transaction.kvp_context,
+- hv_kvp_onchannelcallback);
++ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+ }
+
+ return 0;
+@@ -596,15 +595,8 @@ void hv_kvp_onchannelcallback(void *cont
+ int util_fw_version;
+ int kvp_srv_version;
+
+- if (kvp_transaction.state > HVUTIL_READY) {
+- /*
+- * We will defer processing this callback once
+- * the current transaction is complete.
+- */
+- kvp_transaction.kvp_context = context;
++ if (kvp_transaction.state > HVUTIL_READY)
+ return;
+- }
+- kvp_transaction.kvp_context = NULL;
+
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
+ &requestid);
+--- a/drivers/hv/hv_snapshot.c
++++ b/drivers/hv/hv_snapshot.c
+@@ -53,7 +53,6 @@ static struct {
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+ struct hv_vss_msg *msg; /* current message */
+- void *vss_context; /* for the channel callback */
+ } vss_transaction;
+
+
+@@ -74,6 +73,13 @@ static void vss_timeout_func(struct work
+ static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
+ static DECLARE_WORK(vss_send_op_work, vss_send_op);
+
++static void vss_poll_wrapper(void *channel)
++{
++ /* Transaction is finished, reset the state here to avoid races. */
++ vss_transaction.state = HVUTIL_READY;
++ hv_vss_onchannelcallback(channel);
++}
++
+ /*
+ * Callback when data is received from user mode.
+ */
+@@ -86,12 +92,7 @@ static void vss_timeout_func(struct work
+ pr_warn("VSS: timeout waiting for daemon to reply\n");
+ vss_respond_to_host(HV_E_FAIL);
+
+- /* Transaction is finished, reset the state. */
+- if (vss_transaction.state > HVUTIL_READY)
+- vss_transaction.state = HVUTIL_READY;
+-
+- hv_poll_channel(vss_transaction.vss_context,
+- hv_vss_onchannelcallback);
++ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+ }
+
+ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
+@@ -138,9 +139,8 @@ static int vss_on_msg(void *msg, int len
+ if (cancel_delayed_work_sync(&vss_timeout_work)) {
+ vss_respond_to_host(vss_msg->error);
+ /* Transaction is finished, reset the state. */
+- vss_transaction.state = HVUTIL_READY;
+- hv_poll_channel(vss_transaction.vss_context,
+- hv_vss_onchannelcallback);
++ hv_poll_channel(vss_transaction.recv_channel,
++ vss_poll_wrapper);
+ }
+ } else {
+ /* This is a spurious call! */
+@@ -238,15 +238,8 @@ void hv_vss_onchannelcallback(void *cont
+ struct icmsg_hdr *icmsghdrp;
+ struct icmsg_negotiate *negop = NULL;
+
+- if (vss_transaction.state > HVUTIL_READY) {
+- /*
+- * We will defer processing this callback once
+- * the current transaction is complete.
+- */
+- vss_transaction.vss_context = context;
++ if (vss_transaction.state > HVUTIL_READY)
+ return;
+- }
+- vss_transaction.vss_context = NULL;
+
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ &requestid);
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -764,11 +764,7 @@ static inline void hv_poll_channel(struc
+ if (!channel)
+ return;
+
+- if (channel->target_cpu != smp_processor_id())
+- smp_call_function_single(channel->target_cpu,
+- cb, channel, true);
+- else
+- cb(channel);
++ smp_call_function_single(channel->target_cpu, cb, channel, true);
+ }
+
+ enum hvutil_device_state {
--- /dev/null
+From 4c3d90dbac2951d370a2bbb2673191cd526da3ab Mon Sep 17 00:00:00 2001
+From: Olaf Hering <olaf@aepfle.de>
+Date: Mon, 14 Dec 2015 16:01:34 -0800
+Subject: [PATCH 036/135] tools: hv: report ENOSPC errors in hv_fcopy_daemon
+
+[ Upstream commit b4ed5d1682c6613988c2eb1de55df5ac9988afcc ]
+
+Currently some "Unspecified error 0x80004005" is reported on the Windows
+side if something fails. Handle the ENOSPC case and return
+ERROR_DISK_FULL, which allows at least Copy-VMFile to report a meaning
+full error.
+
+Signed-off-by: Olaf Hering <olaf@aepfle.de>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/hyperv.h | 1 +
+ tools/hv/hv_fcopy_daemon.c | 20 +++++++++++++++++---
+ 2 files changed, 18 insertions(+), 3 deletions(-)
+
+--- a/include/uapi/linux/hyperv.h
++++ b/include/uapi/linux/hyperv.h
+@@ -313,6 +313,7 @@ enum hv_kvp_exchg_pool {
+ #define HV_INVALIDARG 0x80070057
+ #define HV_GUID_NOTFOUND 0x80041002
+ #define HV_ERROR_ALREADY_EXISTS 0x80070050
++#define HV_ERROR_DISK_FULL 0x80070070
+
+ #define ADDR_FAMILY_NONE 0x00
+ #define ADDR_FAMILY_IPV4 0x01
+--- a/tools/hv/hv_fcopy_daemon.c
++++ b/tools/hv/hv_fcopy_daemon.c
+@@ -37,12 +37,14 @@
+
+ static int target_fd;
+ static char target_fname[W_MAX_PATH];
++static unsigned long long filesize;
+
+ static int hv_start_fcopy(struct hv_start_fcopy *smsg)
+ {
+ int error = HV_E_FAIL;
+ char *q, *p;
+
++ filesize = 0;
+ p = (char *)smsg->path_name;
+ snprintf(target_fname, sizeof(target_fname), "%s/%s",
+ (char *)smsg->path_name, (char *)smsg->file_name);
+@@ -98,14 +100,26 @@ done:
+ static int hv_copy_data(struct hv_do_fcopy *cpmsg)
+ {
+ ssize_t bytes_written;
++ int ret = 0;
+
+ bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size,
+ cpmsg->offset);
+
+- if (bytes_written != cpmsg->size)
+- return HV_E_FAIL;
++ filesize += cpmsg->size;
++ if (bytes_written != cpmsg->size) {
++ switch (errno) {
++ case ENOSPC:
++ ret = HV_ERROR_DISK_FULL;
++ break;
++ default:
++ ret = HV_E_FAIL;
++ break;
++ }
++ syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)",
++ filesize, (long)bytes_written, strerror(errno));
++ }
+
+- return 0;
++ return ret;
+ }
+
+ static int hv_copy_finished(void)
--- /dev/null
+From 9435330bd885600bb666af6bb111834a8e475ba5 Mon Sep 17 00:00:00 2001
+From: Olaf Hering <olaf@aepfle.de>
+Date: Mon, 14 Dec 2015 16:01:36 -0800
+Subject: [PATCH 037/135] Drivers: hv: util: catch allocation errors
+
+[ Upstream commit cdc0c0c94e4e6dfa371d497a3130f83349b6ead6 ]
+
+Catch allocation errors in hvutil_transport_send.
+
+Fixes: 14b50f80c32d ('Drivers: hv: util: introduce hv_utils_transport abstraction')
+
+Signed-off-by: Olaf Hering <olaf@aepfle.de>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/hv_utils_transport.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/hv/hv_utils_transport.c
++++ b/drivers/hv/hv_utils_transport.c
+@@ -204,9 +204,12 @@ int hvutil_transport_send(struct hvutil_
+ goto out_unlock;
+ }
+ hvt->outmsg = kzalloc(len, GFP_KERNEL);
+- memcpy(hvt->outmsg, msg, len);
+- hvt->outmsg_len = len;
+- wake_up_interruptible(&hvt->outmsg_q);
++ if (hvt->outmsg) {
++ memcpy(hvt->outmsg, msg, len);
++ hvt->outmsg_len = len;
++ wake_up_interruptible(&hvt->outmsg_q);
++ } else
++ ret = -ENOMEM;
+ out_unlock:
+ mutex_unlock(&hvt->outmsg_lock);
+ return ret;
--- /dev/null
+From b43e0d6819e7723e5b43da02b039eb6d62970f1f Mon Sep 17 00:00:00 2001
+From: Andrey Smetanin <asmetanin@virtuozzo.com>
+Date: Mon, 14 Dec 2015 16:01:38 -0800
+Subject: [PATCH 038/135] drivers/hv: cleanup synic msrs if vmbus connect
+ failed
+
+[ Upstream commit 17efbee8ba02ef00d3b270998978f8a1a90f1d92 ]
+
+Before vmbus_connect() synic is setup per vcpu - this means
+hypervisor receives writes at synic msr's and probably allocate
+hypervisor resources per synic setup.
+
+If vmbus_connect() failed for some reason it's neccessary to cleanup
+synic setup by call hv_synic_cleanup() at each vcpu to get a chance
+to free allocated resources by hypervisor per synic.
+
+This patch does appropriate cleanup in case of vmbus_connect() failure.
+
+Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
+Signed-off-by: Denis V. Lunev <den@openvz.org>
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+CC: "K. Y. Srinivasan" <kys@microsoft.com>
+CC: Haiyang Zhang <haiyangz@microsoft.com>
+CC: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/vmbus_drv.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -870,7 +870,7 @@ static int vmbus_bus_init(int irq)
+ on_each_cpu(hv_synic_init, NULL, 1);
+ ret = vmbus_connect();
+ if (ret)
+- goto err_alloc;
++ goto err_connect;
+
+ if (vmbus_proto_version > VERSION_WIN7)
+ cpu_hotplug_disable();
+@@ -888,6 +888,8 @@ static int vmbus_bus_init(int irq)
+
+ return 0;
+
++err_connect:
++ on_each_cpu(hv_synic_cleanup, NULL, 1);
+ err_alloc:
+ hv_synic_free();
+ hv_remove_vmbus_irq();
--- /dev/null
+From ad43527212d81be98c9194972dd8adaeb405189b Mon Sep 17 00:00:00 2001
+From: Olaf Hering <olaf@aepfle.de>
+Date: Mon, 14 Dec 2015 16:01:42 -0800
+Subject: [PATCH 039/135] Drivers: hv: vss: run only on supported host versions
+
+[ Upstream commit ed9ba608e4851144af8c7061cbb19f751c73e998 ]
+
+The Backup integration service on WS2012 has appearently trouble to
+negotiate with a guest which does not support the provided util version.
+Currently the VSS driver supports only version 5/0. A WS2012 offers only
+version 1/x and 3/x, and vmbus_prep_negotiate_resp correctly returns an
+empty icframe_vercnt/icmsg_vercnt. But the host ignores that and
+continues to send ICMSGTYPE_NEGOTIATE messages. The result are weird
+errors during boot and general misbehaviour.
+
+Check the Windows version to work around the host bug, skip hv_vss_init
+on WS2012 and older.
+
+Signed-off-by: Olaf Hering <olaf@aepfle.de>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/hv_snapshot.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/hv/hv_snapshot.c
++++ b/drivers/hv/hv_snapshot.c
+@@ -331,6 +331,11 @@ static void vss_on_reset(void)
+ int
+ hv_vss_init(struct hv_util_service *srv)
+ {
++ if (vmbus_proto_version < VERSION_WIN8_1) {
++ pr_warn("Integration service 'Backup (volume snapshot)'"
++ " not supported on this host version.\n");
++ return -ENOTSUPP;
++ }
+ recv_buffer = srv->recv_buffer;
+
+ /*
--- /dev/null
+From 9b7eb397cb7c022112ea1a076cff8e1bcffe4311 Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Mon, 14 Dec 2015 16:01:47 -0800
+Subject: [PATCH 040/135] Drivers: hv: vmbus: serialize process_chn_event() and
+ vmbus_close_internal()
+
+[ Upstream commit 63d55b2aeb5e4faa170316fee73c3c47ea9268c7 ]
+
+process_chn_event(), running in the tasklet, can race with
+vmbus_close_internal() in the case of SMP guest, e.g., when the former is
+accessing channel->inbound.ring_buffer, the latter could be freeing the
+ring_buffer pages.
+
+To resolve the race, we can serialize them by disabling the tasklet when
+the latter is running here.
+
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/channel.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -28,6 +28,7 @@
+ #include <linux/module.h>
+ #include <linux/hyperv.h>
+ #include <linux/uio.h>
++#include <linux/interrupt.h>
+
+ #include "hyperv_vmbus.h"
+
+@@ -496,8 +497,21 @@ static void reset_channel_cb(void *arg)
+ static int vmbus_close_internal(struct vmbus_channel *channel)
+ {
+ struct vmbus_channel_close_channel *msg;
++ struct tasklet_struct *tasklet;
+ int ret;
+
++ /*
++ * process_chn_event(), running in the tasklet, can race
++ * with vmbus_close_internal() in the case of SMP guest, e.g., when
++ * the former is accessing channel->inbound.ring_buffer, the latter
++ * could be freeing the ring_buffer pages.
++ *
++ * To resolve the race, we can serialize them by disabling the
++ * tasklet when the latter is running here.
++ */
++ tasklet = hv_context.event_dpc[channel->target_cpu];
++ tasklet_disable(tasklet);
++
+ channel->state = CHANNEL_OPEN_STATE;
+ channel->sc_creation_callback = NULL;
+ /* Stop callback and cancel the timer asap */
+@@ -525,7 +539,7 @@ static int vmbus_close_internal(struct v
+ * If we failed to post the close msg,
+ * it is perhaps better to leak memory.
+ */
+- return ret;
++ goto out;
+ }
+
+ /* Tear down the gpadl for the channel's ring buffer */
+@@ -538,7 +552,7 @@ static int vmbus_close_internal(struct v
+ * If we failed to teardown gpadl,
+ * it is perhaps better to leak memory.
+ */
+- return ret;
++ goto out;
+ }
+ }
+
+@@ -555,6 +569,9 @@ static int vmbus_close_internal(struct v
+ if (channel->rescind)
+ hv_process_channel_removal(channel,
+ channel->offermsg.child_relid);
++out:
++ tasklet_enable(tasklet);
++
+ return ret;
+ }
+
--- /dev/null
+From a62048110b92194ad40236f5fd728942d9994565 Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Mon, 14 Dec 2015 16:01:49 -0800
+Subject: [PATCH 041/135] Drivers: hv: vmbus: fix rescind-offer handling for
+ device without a driver
+
+[ Upstream commit 34c6801e3310ad286c7bb42bc88d42926b8f99bf ]
+
+In the path vmbus_onoffer_rescind() -> vmbus_device_unregister() ->
+device_unregister() -> ... -> __device_release_driver(), we can see for a
+device without a driver loaded: dev->driver is NULL, so
+dev->bus->remove(dev), namely vmbus_remove(), isn't invoked.
+
+As a result, vmbus_remove() -> hv_process_channel_removal() isn't invoked
+and some cleanups(like sending a CHANNELMSG_RELID_RELEASED message to the
+host) aren't done.
+
+We can demo the issue this way:
+1. rmmod hv_utils;
+2. disable the Heartbeat Integration Service in Hyper-V Manager and lsvmbus
+shows the device disappears.
+3. re-enable the Heartbeat in Hyper-V Manager and modprobe hv_utils, but
+lsvmbus shows the device can't appear again.
+This is because, the host thinks the VM hasn't released the relid, so can't
+re-offer the device to the VM.
+
+We can fix the issue by moving hv_process_channel_removal()
+from vmbus_close_internal() to vmbus_device_release(), since the latter is
+always invoked on device_unregister(), whether or not the dev has a driver
+loaded.
+
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/channel.c | 6 ------
+ drivers/hv/channel_mgmt.c | 6 +++---
+ drivers/hv/vmbus_drv.c | 15 +++------------
+ 3 files changed, 6 insertions(+), 21 deletions(-)
+
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -563,12 +563,6 @@ static int vmbus_close_internal(struct v
+ free_pages((unsigned long)channel->ringbuffer_pages,
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+
+- /*
+- * If the channel has been rescinded; process device removal.
+- */
+- if (channel->rescind)
+- hv_process_channel_removal(channel,
+- channel->offermsg.child_relid);
+ out:
+ tasklet_enable(tasklet);
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -191,6 +191,8 @@ void hv_process_channel_removal(struct v
+ if (channel == NULL)
+ return;
+
++ BUG_ON(!channel->rescind);
++
+ if (channel->target_cpu != get_cpu()) {
+ put_cpu();
+ smp_call_function_single(channel->target_cpu,
+@@ -230,9 +232,7 @@ void vmbus_free_channels(void)
+
+ list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
+ listentry) {
+- /* if we don't set rescind to true, vmbus_close_internal()
+- * won't invoke hv_process_channel_removal().
+- */
++ /* hv_process_channel_removal() needs this */
+ channel->rescind = true;
+
+ vmbus_device_unregister(channel->device_obj);
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -603,23 +603,11 @@ static int vmbus_remove(struct device *c
+ {
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+- u32 relid = dev->channel->offermsg.child_relid;
+
+ if (child_device->driver) {
+ drv = drv_to_hv_drv(child_device->driver);
+ if (drv->remove)
+ drv->remove(dev);
+- else {
+- hv_process_channel_removal(dev->channel, relid);
+- pr_err("remove not set for driver %s\n",
+- dev_name(child_device));
+- }
+- } else {
+- /*
+- * We don't have a driver for this device; deal with the
+- * rescind message by removing the channel.
+- */
+- hv_process_channel_removal(dev->channel, relid);
+ }
+
+ return 0;
+@@ -654,7 +642,10 @@ static void vmbus_shutdown(struct device
+ static void vmbus_device_release(struct device *device)
+ {
+ struct hv_device *hv_dev = device_to_hv_device(device);
++ struct vmbus_channel *channel = hv_dev->channel;
+
++ hv_process_channel_removal(channel,
++ channel->offermsg.child_relid);
+ kfree(hv_dev);
+
+ }
--- /dev/null
+From 1e890bc033d06da1bae241e5cd6d29dae98131e6 Mon Sep 17 00:00:00 2001
+From: Vaibhav Jain <vaibhav@linux.vnet.ibm.com>
+Date: Mon, 16 Nov 2015 09:33:45 +0530
+Subject: [PATCH 042/135] cxl: Fix possible idr warning when contexts are
+ released
+
+[ Upstream commit 1b5df59e50874b9034c0fa389cd52b65f1f93292 ]
+
+An idr warning is reported when a context is release after the capi card
+is unbound from the cxl driver via sysfs. Below are the steps to
+reproduce:
+
+1. Create multiple afu contexts in an user-space application using libcxl.
+2. Unbind capi card from cxl using command of form
+ echo <capi-card-pci-addr> > /sys/bus/pci/drivers/cxl-pci/unbind
+3. Exit/kill the application owning afu contexts.
+
+After above steps a warning message is usually seen in the kernel logs
+of the form "idr_remove called for id=<context-id> which is not
+allocated."
+
+This is caused by the function cxl_release_afu which destroys the
+contexts_idr table. So when a context is release no entry for context pe
+is found in the contexts_idr table and idr code prints this warning.
+
+This patch fixes this issue by increasing & decreasing the ref-count on
+the afu device when a context is initialized or when its freed
+respectively. This prevents the afu from being released until all the
+afu contexts have been released. The patch introduces two new functions
+namely cxl_afu_get/put that manage the ref-count on the afu device.
+
+Also the patch removes code inside cxl_dev_context_init that increases ref
+on the afu device as its guaranteed to be alive during this function.
+
+Reported-by: Ian Munsie <imunsie@au1.ibm.com>
+Signed-off-by: Vaibhav Jain <vaibhav@linux.vnet.ibm.com>
+Acked-by: Ian Munsie <imunsie@au1.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cxl/api.c | 4 ----
+ drivers/misc/cxl/context.c | 9 +++++++++
+ drivers/misc/cxl/cxl.h | 12 ++++++++++++
+ drivers/misc/cxl/file.c | 19 +++++++++++--------
+ 4 files changed, 32 insertions(+), 12 deletions(-)
+
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init
+
+ afu = cxl_pci_to_afu(dev);
+
+- get_device(&afu->dev);
+ ctx = cxl_context_alloc();
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+@@ -61,7 +60,6 @@ err_mapping:
+ err_ctx:
+ kfree(ctx);
+ err_dev:
+- put_device(&afu->dev);
+ return ERR_PTR(rc);
+ }
+ EXPORT_SYMBOL_GPL(cxl_dev_context_init);
+@@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_conte
+ if (ctx->status >= STARTED)
+ return -EBUSY;
+
+- put_device(&ctx->afu->dev);
+-
+ cxl_context_free(ctx);
+
+ return 0;
+--- a/drivers/misc/cxl/context.c
++++ b/drivers/misc/cxl/context.c
+@@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context
+ ctx->pe = i;
+ ctx->elem = &ctx->afu->spa[i];
+ ctx->pe_inserted = false;
++
++ /*
++ * take a ref on the afu so that it stays alive at-least till
++ * this context is reclaimed inside reclaim_ctx.
++ */
++ cxl_afu_get(afu);
+ return 0;
+ }
+
+@@ -278,6 +284,9 @@ static void reclaim_ctx(struct rcu_head
+ if (ctx->irq_bitmap)
+ kfree(ctx->irq_bitmap);
+
++ /* Drop ref to the afu device taken during cxl_context_init */
++ cxl_afu_put(ctx->afu);
++
+ kfree(ctx);
+ }
+
+--- a/drivers/misc/cxl/cxl.h
++++ b/drivers/misc/cxl/cxl.h
+@@ -403,6 +403,18 @@ struct cxl_afu {
+ bool enabled;
+ };
+
++/* AFU refcount management */
++static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
++{
++
++ return (get_device(&afu->dev) == NULL) ? NULL : afu;
++}
++
++static inline void cxl_afu_put(struct cxl_afu *afu)
++{
++ put_device(&afu->dev);
++}
++
+
+ struct cxl_irq_name {
+ struct list_head list;
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -67,7 +67,13 @@ static int __afu_open(struct inode *inod
+ spin_unlock(&adapter->afu_list_lock);
+ goto err_put_adapter;
+ }
+- get_device(&afu->dev);
++
++ /*
++ * taking a ref to the afu so that it doesn't go away
++ * for rest of the function. This ref is released before
++ * we return.
++ */
++ cxl_afu_get(afu);
+ spin_unlock(&adapter->afu_list_lock);
+
+ if (!afu->current_mode)
+@@ -90,13 +96,12 @@ static int __afu_open(struct inode *inod
+ file->private_data = ctx;
+ cxl_ctx_get();
+
+- /* Our ref on the AFU will now hold the adapter */
+- put_device(&adapter->dev);
+-
+- return 0;
++ /* indicate success */
++ rc = 0;
+
+ err_put_afu:
+- put_device(&afu->dev);
++ /* release the ref taken earlier */
++ cxl_afu_put(afu);
+ err_put_adapter:
+ put_device(&adapter->dev);
+ return rc;
+@@ -131,8 +136,6 @@ int afu_release(struct inode *inode, str
+ mutex_unlock(&ctx->mapping_lock);
+ }
+
+- put_device(&ctx->afu->dev);
+-
+ /*
+ * At this this point all bottom halfs have finished and we should be
+ * getting no more IRQs from the hardware for this context. Once it's
--- /dev/null
+From fd58cf7d092e9bbaa0e29239de77b4653b859fa5 Mon Sep 17 00:00:00 2001
+From: Vaibhav Jain <vaibhav@linux.vnet.ibm.com>
+Date: Tue, 24 Nov 2015 16:26:18 +0530
+Subject: [PATCH 043/135] cxl: Fix DSI misses when the context owning task
+ exits
+
+[ Upstream commit 7b8ad495d59280b634a7b546f4cdf58cf4d65f61 ]
+
+Presently when a user-space process issues CXL_IOCTL_START_WORK ioctl we
+store the pid of the current task_struct and use it to get pointer to
+the mm_struct of the process, while processing page or segment faults
+from the capi card. However this causes issues when the thread that had
+originally issued the start-work ioctl exits in which case the stored
+pid is no more valid and the cxl driver is unable to handle faults as
+the mm_struct corresponding to process is no more accessible.
+
+This patch fixes this issue by using the mm_struct of the next alive
+task in the thread group. This is done by iterating over all the tasks
+in the thread group starting from thread group leader and calling
+get_task_mm on each one of them. When a valid mm_struct is obtained the
+pid of the associated task is stored in the context replacing the
+exiting one for handling future faults.
+
+The patch introduces a new function named get_mem_context that checks if
+the current task pointed to by ctx->pid is dead? If yes it performs the
+steps described above. Also a new variable cxl_context.glpid is
+introduced which stores the pid of the thread group leader associated
+with the context owning task.
+
+Reported-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Reported-by: Frank Haverkamp <HAVERKAM@de.ibm.com>
+Suggested-by: Ian Munsie <imunsie@au1.ibm.com>
+Signed-off-by: Vaibhav Jain <vaibhav@linux.vnet.ibm.com>
+Acked-by: Ian Munsie <imunsie@au1.ibm.com>
+Reviewed-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Reviewed-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cxl/api.c | 2
+ drivers/misc/cxl/context.c | 6 +-
+ drivers/misc/cxl/cxl.h | 3 +
+ drivers/misc/cxl/fault.c | 129 +++++++++++++++++++++++++++++++++------------
+ drivers/misc/cxl/file.c | 6 +-
+ 5 files changed, 109 insertions(+), 37 deletions(-)
+
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -172,7 +172,7 @@ int cxl_start_context(struct cxl_context
+
+ if (task) {
+ ctx->pid = get_task_pid(task, PIDTYPE_PID);
+- get_pid(ctx->pid);
++ ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
+ kernel = false;
+ }
+
+--- a/drivers/misc/cxl/context.c
++++ b/drivers/misc/cxl/context.c
+@@ -42,7 +42,7 @@ int cxl_context_init(struct cxl_context
+ spin_lock_init(&ctx->sste_lock);
+ ctx->afu = afu;
+ ctx->master = master;
+- ctx->pid = NULL; /* Set in start work ioctl */
++ ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
+ mutex_init(&ctx->mapping_lock);
+ ctx->mapping = mapping;
+
+@@ -217,7 +217,11 @@ int __detach_context(struct cxl_context
+ WARN_ON(cxl_detach_process(ctx) &&
+ cxl_adapter_link_ok(ctx->afu->adapter));
+ flush_work(&ctx->fault_work); /* Only needed for dedicated process */
++
++ /* release the reference to the group leader and mm handling pid */
+ put_pid(ctx->pid);
++ put_pid(ctx->glpid);
++
+ cxl_ctx_put();
+ return 0;
+ }
+--- a/drivers/misc/cxl/cxl.h
++++ b/drivers/misc/cxl/cxl.h
+@@ -445,6 +445,9 @@ struct cxl_context {
+ unsigned int sst_size, sst_lru;
+
+ wait_queue_head_t wq;
++ /* pid of the group leader associated with the pid */
++ struct pid *glpid;
++ /* use mm context associated with this pid for ds faults */
+ struct pid *pid;
+ spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
+ /* Only used in PR mode */
+--- a/drivers/misc/cxl/fault.c
++++ b/drivers/misc/cxl/fault.c
+@@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct
+ cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ }
+
++/*
++ * Returns the mm_struct corresponding to the context ctx via ctx->pid
++ * In case the task has exited we use the task group leader accessible
++ * via ctx->glpid to find the next task in the thread group that has a
++ * valid mm_struct associated with it. If a task with valid mm_struct
++ * is found the ctx->pid is updated to use the task struct for subsequent
++ * translations. In case no valid mm_struct is found in the task group to
++ * service the fault a NULL is returned.
++ */
++static struct mm_struct *get_mem_context(struct cxl_context *ctx)
++{
++ struct task_struct *task = NULL;
++ struct mm_struct *mm = NULL;
++ struct pid *old_pid = ctx->pid;
++
++ if (old_pid == NULL) {
++ pr_warn("%s: Invalid context for pe=%d\n",
++ __func__, ctx->pe);
++ return NULL;
++ }
++
++ task = get_pid_task(old_pid, PIDTYPE_PID);
++
++ /*
++ * pid_alive may look racy but this saves us from costly
++ * get_task_mm when the task is a zombie. In worst case
++ * we may think a task is alive, which is about to die
++ * but get_task_mm will return NULL.
++ */
++ if (task != NULL && pid_alive(task))
++ mm = get_task_mm(task);
++
++ /* release the task struct that was taken earlier */
++ if (task)
++ put_task_struct(task);
++ else
++ pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
++ __func__, pid_nr(old_pid), ctx->pe);
++
++ /*
++ * If we couldn't find the mm context then use the group
++ * leader to iterate over the task group and find a task
++ * that gives us mm_struct.
++ */
++ if (unlikely(mm == NULL && ctx->glpid != NULL)) {
++
++ rcu_read_lock();
++ task = pid_task(ctx->glpid, PIDTYPE_PID);
++ if (task)
++ do {
++ mm = get_task_mm(task);
++ if (mm) {
++ ctx->pid = get_task_pid(task,
++ PIDTYPE_PID);
++ break;
++ }
++ task = next_thread(task);
++ } while (task && !thread_group_leader(task));
++ rcu_read_unlock();
++
++ /* check if we switched pid */
++ if (ctx->pid != old_pid) {
++ if (mm)
++ pr_devel("%s:pe=%i switch pid %i->%i\n",
++ __func__, ctx->pe, pid_nr(old_pid),
++ pid_nr(ctx->pid));
++ else
++ pr_devel("%s:Cannot find mm for pid=%i\n",
++ __func__, pid_nr(old_pid));
++
++ /* drop the reference to older pid */
++ put_pid(old_pid);
++ }
++ }
++
++ return mm;
++}
++
++
++
+ void cxl_handle_fault(struct work_struct *fault_work)
+ {
+ struct cxl_context *ctx =
+ container_of(fault_work, struct cxl_context, fault_work);
+ u64 dsisr = ctx->dsisr;
+ u64 dar = ctx->dar;
+- struct task_struct *task = NULL;
+ struct mm_struct *mm = NULL;
+
+ if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
+@@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct
+ "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
+
+ if (!ctx->kernel) {
+- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
+- pr_devel("cxl_handle_fault unable to get task %i\n",
+- pid_nr(ctx->pid));
++
++ mm = get_mem_context(ctx);
++ /* indicates all the thread in task group have exited */
++ if (mm == NULL) {
++ pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
++ __func__, ctx->pe, pid_nr(ctx->pid));
+ cxl_ack_ae(ctx);
+ return;
+- }
+- if (!(mm = get_task_mm(task))) {
+- pr_devel("cxl_handle_fault unable to get mm %i\n",
+- pid_nr(ctx->pid));
+- cxl_ack_ae(ctx);
+- goto out;
++ } else {
++ pr_devel("Handling page fault for pe=%d pid=%i\n",
++ ctx->pe, pid_nr(ctx->pid));
+ }
+ }
+
+@@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct
+
+ if (mm)
+ mmput(mm);
+-out:
+- if (task)
+- put_task_struct(task);
+ }
+
+ static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
+ {
+- int rc;
+- struct task_struct *task;
+ struct mm_struct *mm;
+
+- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
+- pr_devel("cxl_prefault_one unable to get task %i\n",
+- pid_nr(ctx->pid));
+- return;
+- }
+- if (!(mm = get_task_mm(task))) {
++ mm = get_mem_context(ctx);
++ if (mm == NULL) {
+ pr_devel("cxl_prefault_one unable to get mm %i\n",
+ pid_nr(ctx->pid));
+- put_task_struct(task);
+ return;
+ }
+
+- rc = cxl_fault_segment(ctx, mm, ea);
++ cxl_fault_segment(ctx, mm, ea);
+
+ mmput(mm);
+- put_task_struct(task);
+ }
+
+ static u64 next_segment(u64 ea, u64 vsid)
+@@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_
+ struct copro_slb slb;
+ struct vm_area_struct *vma;
+ int rc;
+- struct task_struct *task;
+ struct mm_struct *mm;
+
+- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
+- pr_devel("cxl_prefault_vma unable to get task %i\n",
+- pid_nr(ctx->pid));
+- return;
+- }
+- if (!(mm = get_task_mm(task))) {
++ mm = get_mem_context(ctx);
++ if (mm == NULL) {
+ pr_devel("cxl_prefault_vm unable to get mm %i\n",
+ pid_nr(ctx->pid));
+- goto out1;
++ return;
+ }
+
+ down_read(&mm->mmap_sem);
+@@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_
+ up_read(&mm->mmap_sem);
+
+ mmput(mm);
+-out1:
+- put_task_struct(task);
+ }
+
+ void cxl_prefault(struct cxl_context *ctx, u64 wed)
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -201,8 +201,12 @@ static long afu_ioctl_start_work(struct
+ * where a process (master, some daemon, etc) has opened the chardev on
+ * behalf of another process, so the AFU's mm gets bound to the process
+ * that performs this ioctl and not the process that opened the file.
++ * Also we grab the PID of the group leader so that if the task that
++ * has performed the attach operation exits the mm context of the
++ * process is still accessible.
+ */
+- ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
++ ctx->pid = get_task_pid(current, PIDTYPE_PID);
++ ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+
+ trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
+
--- /dev/null
+From a8d537cb6e93d02620573e0da8381d8186afb5f4 Mon Sep 17 00:00:00 2001
+From: Manoj Kumar <manoj@linux.vnet.ibm.com>
+Date: Mon, 14 Dec 2015 15:07:02 -0600
+Subject: [PATCH 044/135] cxlflash: Fix to resolve cmd leak after host reset
+
+[ Upstream commit ee91e332a6e6e9b939f60f6e1bd72fb2def5290d ]
+
+After a few iterations of resetting the card, either during EEH
+recovery, or a host_reset the following is seen in the logs. cxlflash
+0008:00: cxlflash_queuecommand: could not get a free command
+
+At every reset of the card, the commands that are outstanding are being
+leaked. No effort is being made to reap these commands. A few more
+resets later, the above error message floods the logs and the card is
+rendered totally unusable as no free commands are available.
+
+Iterated through the 'cmd' queue and printed out the 'free' counter and
+found that on each reset certain commands were in-use and stayed in-use
+through subsequent resets.
+
+To resolve this issue, when the card is reset, reap all the commands
+that are active/outstanding.
+
+Signed-off-by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
+Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/cxlflash/main.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -632,15 +632,30 @@ static void free_mem(struct cxlflash_cfg
+ * @cfg: Internal structure associated with the host.
+ *
+ * Safe to call with AFU in a partially allocated/initialized state.
++ *
++ * Cleans up all state associated with the command queue, and unmaps
++ * the MMIO space.
++ *
++ * - complete() will take care of commands we initiated (they'll be checked
++ * in as part of the cleanup that occurs after the completion)
++ *
++ * - cmd_checkin() will take care of entries that we did not initiate and that
++ * have not (and will not) complete because they are sitting on a [now stale]
++ * hardware queue
+ */
+ static void stop_afu(struct cxlflash_cfg *cfg)
+ {
+ int i;
+ struct afu *afu = cfg->afu;
++ struct afu_cmd *cmd;
+
+ if (likely(afu)) {
+- for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
+- complete(&afu->cmd[i].cevent);
++ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
++ cmd = &afu->cmd[i];
++ complete(&cmd->cevent);
++ if (!atomic_read(&cmd->free))
++ cmd_checkin(cmd);
++ }
+
+ if (likely(afu->afu_map)) {
+ cxl_psa_unmap((void __iomem *)afu->afu_map);
--- /dev/null
+From 3446cec1bd5569a331080820ae23f6ed0f20c664 Mon Sep 17 00:00:00 2001
+From: Manoj Kumar <manoj@linux.vnet.ibm.com>
+Date: Mon, 14 Dec 2015 15:07:23 -0600
+Subject: [PATCH 045/135] cxlflash: Resolve oops in wait_port_offline
+
+[ Upstream commit b45cdbaf9f7f0486847c52f60747fb108724652a ]
+
+If an async error interrupt is generated, and the error requires the FC
+link to be reset, it cannot be performed in the interrupt context. So a
+work element is scheduled to complete the link reset in a process
+context. If either an EEH event or an escalation occurs in between when
+the interrupt is generated and the scheduled work is started, the MMIO
+space may no longer be available. This will cause an oops in the worker
+thread.
+
+[ 606.806583] NIP kthread_data+0x28/0x40
+[ 606.806633] LR wq_worker_sleeping+0x30/0x100
+[ 606.806694] Call Trace:
+[ 606.806721] 0x50 (unreliable)
+[ 606.806796] wq_worker_sleeping+0x30/0x100
+[ 606.806884] __schedule+0x69c/0x8a0
+[ 606.806959] schedule+0x44/0xc0
+[ 606.807034] do_exit+0x770/0xb90
+[ 606.807109] die+0x300/0x460
+[ 606.807185] bad_page_fault+0xd8/0x150
+[ 606.807259] handle_page_fault+0x2c/0x30
+[ 606.807338] wait_port_offline.constprop.12+0x60/0x130 [cxlflash]
+
+To prevent the problem space area from being unmapped, when there is
+pending work, a mapcount (using the kref mechanism) is held. The
+mapcount is released only when the work is completed. The last
+reference release is tied to the unmapping service.
+
+Signed-off-by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
+Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Reviewed-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/cxlflash/common.h | 2 ++
+ drivers/scsi/cxlflash/main.c | 27 ++++++++++++++++++++++++---
+ 2 files changed, 26 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/cxlflash/common.h
++++ b/drivers/scsi/cxlflash/common.h
+@@ -165,6 +165,8 @@ struct afu {
+ struct sisl_host_map __iomem *host_map; /* MC host map */
+ struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
+
++ struct kref mapcount;
++
+ ctx_hndl_t ctx_hndl; /* master's context handle */
+ u64 *hrrq_start;
+ u64 *hrrq_end;
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -368,6 +368,7 @@ out:
+
+ no_room:
+ afu->read_room = true;
++ kref_get(&cfg->afu->mapcount);
+ schedule_work(&cfg->work_q);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+@@ -473,6 +474,16 @@ out:
+ return rc;
+ }
+
++static void afu_unmap(struct kref *ref)
++{
++ struct afu *afu = container_of(ref, struct afu, mapcount);
++
++ if (likely(afu->afu_map)) {
++ cxl_psa_unmap((void __iomem *)afu->afu_map);
++ afu->afu_map = NULL;
++ }
++}
++
+ /**
+ * cxlflash_driver_info() - information handler for this host driver
+ * @host: SCSI host associated with device.
+@@ -503,6 +514,7 @@ static int cxlflash_queuecommand(struct
+ ulong lock_flags;
+ short lflag = 0;
+ int rc = 0;
++ int kref_got = 0;
+
+ dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08X-%08X-%08X-%08X)\n",
+@@ -547,6 +559,9 @@ static int cxlflash_queuecommand(struct
+ goto out;
+ }
+
++ kref_get(&cfg->afu->mapcount);
++ kref_got = 1;
++
+ cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.port_sel = port_sel;
+ cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+@@ -587,6 +602,8 @@ static int cxlflash_queuecommand(struct
+ }
+
+ out:
++ if (kref_got)
++ kref_put(&afu->mapcount, afu_unmap);
+ pr_devel("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+ }
+@@ -661,6 +678,7 @@ static void stop_afu(struct cxlflash_cfg
+ cxl_psa_unmap((void __iomem *)afu->afu_map);
+ afu->afu_map = NULL;
+ }
++ kref_put(&afu->mapcount, afu_unmap);
+ }
+ }
+
+@@ -746,8 +764,8 @@ static void cxlflash_remove(struct pci_d
+ scsi_remove_host(cfg->host);
+ /* fall through */
+ case INIT_STATE_AFU:
+- term_afu(cfg);
+ cancel_work_sync(&cfg->work_q);
++ term_afu(cfg);
+ case INIT_STATE_PCI:
+ pci_release_regions(cfg->dev);
+ pci_disable_device(pdev);
+@@ -1331,6 +1349,7 @@ static irqreturn_t cxlflash_async_err_ir
+ __func__, port);
+ cfg->lr_state = LINK_RESET_REQUIRED;
+ cfg->lr_port = port;
++ kref_get(&cfg->afu->mapcount);
+ schedule_work(&cfg->work_q);
+ }
+
+@@ -1351,6 +1370,7 @@ static irqreturn_t cxlflash_async_err_ir
+
+ if (info->action & SCAN_HOST) {
+ atomic_inc(&cfg->scan_host_needed);
++ kref_get(&cfg->afu->mapcount);
+ schedule_work(&cfg->work_q);
+ }
+ }
+@@ -1746,6 +1766,7 @@ static int init_afu(struct cxlflash_cfg
+ rc = -ENOMEM;
+ goto err1;
+ }
++ kref_init(&afu->mapcount);
+
+ /* No byte reverse on reading afu_version or string will be backwards */
+ reg = readq(&afu->afu_map->global.regs.afu_version);
+@@ -1780,8 +1801,7 @@ out:
+ return rc;
+
+ err2:
+- cxl_psa_unmap((void __iomem *)afu->afu_map);
+- afu->afu_map = NULL;
++ kref_put(&afu->mapcount, afu_unmap);
+ err1:
+ term_mc(cfg, UNDO_START);
+ goto out;
+@@ -2354,6 +2374,7 @@ static void cxlflash_worker_thread(struc
+
+ if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
+ scsi_scan_host(cfg->host);
++ kref_put(&afu->mapcount, afu_unmap);
+ }
+
+ /**
--- /dev/null
+From ecfb10e113f6b097bd095ee231c0efae8112ca0d Mon Sep 17 00:00:00 2001
+From: Manoj Kumar <manoj@linux.vnet.ibm.com>
+Date: Mon, 14 Dec 2015 15:07:43 -0600
+Subject: [PATCH 046/135] cxlflash: Enable device id for future IBM CXL adapter
+
+[ Upstream commit a2746fb16e41b7c8f02aa4d2605ecce97abbebbd ]
+
+This drop enables a future card with a device id of 0x0600 to be
+recognized by the cxlflash driver.
+
+As per the design, the Accelerator Function Unit (AFU) for this new IBM
+CXL Flash Adapter retains the same host interface as the previous
+generation. For the early prototypes of the new card, the driver with
+this change behaves exactly as the driver prior to this behaved with the
+earlier generation card. Therefore, no card specific programming has
+been added. These card specific changes can be staged in later if
+needed.
+
+Signed-off-by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
+Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/cxlflash/main.c | 3 +++
+ drivers/scsi/cxlflash/main.h | 4 ++--
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -2309,6 +2309,7 @@ static struct scsi_host_template driver_
+ * Device dependent values
+ */
+ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
++static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
+
+ /*
+ * PCI device binding table
+@@ -2316,6 +2317,8 @@ static struct dev_dependent_vals dev_cor
+ static struct pci_device_id cxlflash_pci_table[] = {
+ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
++ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
+ {}
+ };
+
+--- a/drivers/scsi/cxlflash/main.h
++++ b/drivers/scsi/cxlflash/main.h
+@@ -24,8 +24,8 @@
+ #define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
+ #define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
+
+-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
+-#define CXLFLASH_SUBS_DEV_ID 0x04F0
++#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
++#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
+
+ /* Since there is only one target, make it 0 */
+ #define CXLFLASH_TARGET 0
--- /dev/null
+From 97da3d4e080bd62b8e81fcad0bfb821e35be8f31 Mon Sep 17 00:00:00 2001
+From: Brian Norris <computersforpeace@gmail.com>
+Date: Fri, 8 Jan 2016 10:30:09 -0800
+Subject: [PATCH 047/135] cxl: fix build for GCC 4.6.x
+
+[ Upstream commit aa09545589ceeff884421d8eb38d04963190afbe ]
+
+GCC 4.6.3 does not support -Wno-unused-const-variable. Instead, use the
+kbuild infrastructure that checks if this options exists.
+
+Fixes: 2cd55c68c0a4 ("cxl: Fix build failure due to -Wunused-variable behaviour change")
+Suggested-by: Michal Marek <mmarek@suse.com>
+Suggested-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cxl/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/cxl/Makefile
++++ b/drivers/misc/cxl/Makefile
+@@ -1,4 +1,4 @@
+-ccflags-y := -Werror -Wno-unused-const-variable
++ccflags-y := -Werror $(call cc-disable-warning, unused-const-variable)
+
+ cxl-y += main.o file.o irq.o fault.o native.o
+ cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
--- /dev/null
+From 0f234983774874603308c15698704af8a978397c Mon Sep 17 00:00:00 2001
+From: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
+Date: Mon, 7 Dec 2015 16:03:32 -0600
+Subject: [PATCH 048/135] cxl: Enable PCI device ID for future IBM CXL adapter
+
+[ Upstream commit 68adb7bfd66504e97364651fb7dac3f9c8aa8561 ]
+
+Add support for future IBM Coherent Accelerator (CXL) device
+with ID of 0x0601.
+
+Signed-off-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
+Reviewed-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cxl/pci.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -138,6 +138,7 @@ static const struct pci_device_id cxl_pc
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
++ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
+ { PCI_DEVICE_CLASS(0x120000, ~0), },
+
+ { }
--- /dev/null
+From 5f7eb50385ad993baa7e08814b59305ac85a9000 Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:11:52 -0500
+Subject: [PATCH 049/135] lpfc: Fix FCF Infinite loop in
+ lpfc_sli4_fcf_rr_next_index_get.
+
+[ Upstream commit f5cb5304eb26d307c9b30269fb0e007e0b262b7d ]
+
+Fix FCF Infinite loop in lpfc_sli4_fcf_rr_next_index_get.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_sli.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -16173,7 +16173,7 @@ fail_fcf_read:
+ }
+
+ /**
+- * lpfc_check_next_fcf_pri
++ * lpfc_check_next_fcf_pri_level
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+@@ -16329,8 +16329,12 @@ next_priority:
+
+ if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+- LPFC_FCF_FLOGI_FAILED)
++ LPFC_FCF_FLOGI_FAILED) {
++ if (list_is_singular(&phba->fcf.fcf_pri_list))
++ return LPFC_FCOE_FCF_NEXT_NONE;
++
+ goto next_priority;
++ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2845 Get next roundrobin failover FCF (x%x)\n",
--- /dev/null
+From 03fd0ccf08442441bc213b14250d4c6f9175472d Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:11:53 -0500
+Subject: [PATCH 050/135] lpfc: Fix the FLOGI discovery logic to comply with
+ T11 standards
+
+[ Upstream commit d6de08cc46269899988b4f40acc7337279693d4b ]
+
+Fix the FLOGI discovery logic to comply with T11 standards
+
+We weren't properly setting fabric parameters, such as R_A_TOV and E_D_TOV,
+when we registered the vfi object in default configs and pt2pt configs.
+Revise to now pass service params with the values to the firmware and
+ensure they are reset on link bounce. Required reworking the call sequence
+in the discovery threads.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_crtn.h | 1
+ drivers/scsi/lpfc/lpfc_els.c | 342 ++++++++++++++++---------------------
+ drivers/scsi/lpfc/lpfc_hbadisc.c | 12 -
+ drivers/scsi/lpfc/lpfc_nportdisc.c | 124 ++++++++-----
+ 4 files changed, 240 insertions(+), 239 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -72,6 +72,7 @@ void lpfc_cancel_all_vport_retry_delay_t
+ void lpfc_retry_pport_discovery(struct lpfc_hba *);
+ void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+
++void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -455,9 +455,9 @@ int
+ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+ {
+ struct lpfc_hba *phba = vport->phba;
+- LPFC_MBOXQ_t *mboxq;
++ LPFC_MBOXQ_t *mboxq = NULL;
+ struct lpfc_nodelist *ndlp;
+- struct lpfc_dmabuf *dmabuf;
++ struct lpfc_dmabuf *dmabuf = NULL;
+ int rc = 0;
+
+ /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
+@@ -471,25 +471,33 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vp
+ }
+ }
+
+- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+- if (!dmabuf) {
++ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (!mboxq) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+- if (!dmabuf->virt) {
+- rc = -ENOMEM;
+- goto fail_free_dmabuf;
+- }
+
+- mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (!mboxq) {
+- rc = -ENOMEM;
+- goto fail_free_coherent;
++ /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
++ if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
++ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
++ if (!dmabuf) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++ dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
++ if (!dmabuf->virt) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++ memcpy(dmabuf->virt, &phba->fc_fabparam,
++ sizeof(struct serv_parm));
+ }
++
+ vport->port_state = LPFC_FABRIC_CFG_LINK;
+- memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
+- lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
++ if (dmabuf)
++ lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
++ else
++ lpfc_reg_vfi(mboxq, vport, 0);
+
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+ mboxq->vport = vport;
+@@ -497,17 +505,19 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vp
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = -ENXIO;
+- goto fail_free_mbox;
++ goto fail;
+ }
+ return 0;
+
+-fail_free_mbox:
+- mempool_free(mboxq, phba->mbox_mem_pool);
+-fail_free_coherent:
+- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+-fail_free_dmabuf:
+- kfree(dmabuf);
+ fail:
++ if (mboxq)
++ mempool_free(mboxq, phba->mbox_mem_pool);
++ if (dmabuf) {
++ if (dmabuf->virt)
++ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
++ kfree(dmabuf);
++ }
++
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0289 Issue Register VFI failed: Err %d\n", rc);
+@@ -711,9 +721,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_v
+ * For FC we need to do some special processing because of the SLI
+ * Port's default settings of the Common Service Parameters.
+ */
+- if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
++ if ((phba->sli_rev == LPFC_SLI_REV4) &&
++ (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
+ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+- if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
++ if (fabric_param_changed)
+ lpfc_unregister_fcf_prep(phba);
+
+ /* This should just update the VFI CSPs*/
+@@ -824,13 +835,21 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vp
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
++ vport->fc_flag |= FC_PT2PT;
+ spin_unlock_irq(shost->host_lock);
+
+- phba->fc_edtov = FF_DEF_EDTOV;
+- phba->fc_ratov = FF_DEF_RATOV;
++ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
++ if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
++ lpfc_unregister_fcf_prep(phba);
++
++ spin_lock_irq(shost->host_lock);
++ vport->fc_flag &= ~FC_VFI_REGISTERED;
++ spin_unlock_irq(shost->host_lock);
++ phba->fc_topology_changed = 0;
++ }
++
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(vport->fc_portname));
+- memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ if (rc >= 0) {
+ /* This side will initiate the PLOGI */
+@@ -839,38 +858,14 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vp
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+- * N_Port ID cannot be 0, set our to LocalID the other
+- * side will be RemoteID.
++ * N_Port ID cannot be 0, set our Id to LocalID
++ * the other side will be RemoteID.
+ */
+
+ /* not equal */
+ if (rc)
+ vport->fc_myDID = PT2PT_LocalID;
+
+- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (!mbox)
+- goto fail;
+-
+- lpfc_config_link(phba, mbox);
+-
+- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+- mbox->vport = vport;
+- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+- if (rc == MBX_NOT_FINISHED) {
+- mempool_free(mbox, phba->mbox_mem_pool);
+- goto fail;
+- }
+-
+- /*
+- * For SLI4, the VFI/VPI are registered AFTER the
+- * Nport with the higher WWPN sends the PLOGI with
+- * an assigned NPortId.
+- */
+-
+- /* not equal */
+- if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
+- lpfc_issue_reg_vfi(vport);
+-
+ /* Decrement ndlp reference count indicating that ndlp can be
+ * safely released when other references to it are done.
+ */
+@@ -912,29 +907,20 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vp
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
+- spin_lock_irq(shost->host_lock);
+- vport->fc_flag |= FC_PT2PT;
+- spin_unlock_irq(shost->host_lock);
+- /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+- if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+- lpfc_unregister_fcf_prep(phba);
++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (!mbox)
++ goto fail;
+
+- /* The FC_VFI_REGISTERED flag will get clear in the cmpl
+- * handler for unreg_vfi, but if we don't force the
+- * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
+- * built with the update bit set instead of just the vp bit to
+- * change the Nport ID. We need to have the vp set and the
+- * Upd cleared on topology changes.
+- */
+- spin_lock_irq(shost->host_lock);
+- vport->fc_flag &= ~FC_VFI_REGISTERED;
+- spin_unlock_irq(shost->host_lock);
+- phba->fc_topology_changed = 0;
+- lpfc_issue_reg_vfi(vport);
++ lpfc_config_link(phba, mbox);
++
++ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
++ mbox->vport = vport;
++ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
++ if (rc == MBX_NOT_FINISHED) {
++ mempool_free(mbox, phba->mbox_mem_pool);
++ goto fail;
+ }
+
+- /* Start discovery - this should just do CLEAR_LA */
+- lpfc_disc_start(vport);
+ return 0;
+ fail:
+ return -ENXIO;
+@@ -1157,6 +1143,7 @@ flogifail:
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ spin_unlock_irq(&phba->hbalock);
++
+ lpfc_nlp_put(ndlp);
+
+ if (!lpfc_error_lost_link(irsp)) {
+@@ -3898,6 +3885,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vpor
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
++ struct serv_parm *sp;
+ uint16_t cmdsize;
+ int rc;
+ ELS_PKT *els_pkt_ptr;
+@@ -3927,6 +3915,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vpor
+ "Issue ACC: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ break;
++ case ELS_CMD_FLOGI:
+ case ELS_CMD_PLOGI:
+ cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+@@ -3944,10 +3933,34 @@ lpfc_els_rsp_acc(struct lpfc_vport *vpor
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+- memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
++ sp = (struct serv_parm *)pcmd;
++
++ if (flag == ELS_CMD_FLOGI) {
++ /* Copy the received service parameters back */
++ memcpy(sp, &phba->fc_fabparam,
++ sizeof(struct serv_parm));
++
++ /* Clear the F_Port bit */
++ sp->cmn.fPort = 0;
++
++ /* Mark all class service parameters as invalid */
++ sp->cls1.classValid = 0;
++ sp->cls2.classValid = 0;
++ sp->cls3.classValid = 0;
++ sp->cls4.classValid = 0;
++
++ /* Copy our worldwide names */
++ memcpy(&sp->portName, &vport->fc_sparam.portName,
++ sizeof(struct lpfc_name));
++ memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
++ sizeof(struct lpfc_name));
++ } else {
++ memcpy(pcmd, &vport->fc_sparam,
++ sizeof(struct serv_parm));
++ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+- "Issue ACC PLOGI: did:x%x flg:x%x",
++ "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ break;
+ case ELS_CMD_PRLO:
+@@ -5739,7 +5752,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vp
+ IOCB_t *icmd = &cmdiocb->iocb;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+- struct ls_rjt stat;
+ uint32_t cmd, did;
+ int rc;
+ uint32_t fc_flag = 0;
+@@ -5765,135 +5777,92 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vp
+ return 1;
+ }
+
+- if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
+- /* For a FLOGI we accept, then if our portname is greater
+- * then the remote portname we initiate Nport login.
+- */
++ (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
+
+- rc = memcmp(&vport->fc_portname, &sp->portName,
+- sizeof(struct lpfc_name));
+
+- if (!rc) {
+- if (phba->sli_rev < LPFC_SLI_REV4) {
+- mbox = mempool_alloc(phba->mbox_mem_pool,
+- GFP_KERNEL);
+- if (!mbox)
+- return 1;
+- lpfc_linkdown(phba);
+- lpfc_init_link(phba, mbox,
+- phba->cfg_topology,
+- phba->cfg_link_speed);
+- mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+- mbox->vport = vport;
+- rc = lpfc_sli_issue_mbox(phba, mbox,
+- MBX_NOWAIT);
+- lpfc_set_loopback_flag(phba);
+- if (rc == MBX_NOT_FINISHED)
+- mempool_free(mbox, phba->mbox_mem_pool);
+- return 1;
+- } else {
+- /* abort the flogi coming back to ourselves
+- * due to external loopback on the port.
+- */
+- lpfc_els_abort_flogi(phba);
+- return 0;
+- }
+- } else if (rc > 0) { /* greater than */
+- spin_lock_irq(shost->host_lock);
+- vport->fc_flag |= FC_PT2PT_PLOGI;
+- spin_unlock_irq(shost->host_lock);
++ /*
++ * If our portname is greater than the remote portname,
++ * then we initiate Nport login.
++ */
+
+- /* If we have the high WWPN we can assign our own
+- * myDID; otherwise, we have to WAIT for a PLOGI
+- * from the remote NPort to find out what it
+- * will be.
+- */
+- vport->fc_myDID = PT2PT_LocalID;
+- } else
+- vport->fc_myDID = PT2PT_RemoteID;
++ rc = memcmp(&vport->fc_portname, &sp->portName,
++ sizeof(struct lpfc_name));
+
+- /*
+- * The vport state should go to LPFC_FLOGI only
+- * AFTER we issue a FLOGI, not receive one.
++ if (!rc) {
++ if (phba->sli_rev < LPFC_SLI_REV4) {
++ mbox = mempool_alloc(phba->mbox_mem_pool,
++ GFP_KERNEL);
++ if (!mbox)
++ return 1;
++ lpfc_linkdown(phba);
++ lpfc_init_link(phba, mbox,
++ phba->cfg_topology,
++ phba->cfg_link_speed);
++ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
++ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
++ mbox->vport = vport;
++ rc = lpfc_sli_issue_mbox(phba, mbox,
++ MBX_NOWAIT);
++ lpfc_set_loopback_flag(phba);
++ if (rc == MBX_NOT_FINISHED)
++ mempool_free(mbox, phba->mbox_mem_pool);
++ return 1;
++ }
++
++ /* abort the flogi coming back to ourselves
++ * due to external loopback on the port.
+ */
++ lpfc_els_abort_flogi(phba);
++ return 0;
++
++ } else if (rc > 0) { /* greater than */
+ spin_lock_irq(shost->host_lock);
+- fc_flag = vport->fc_flag;
+- port_state = vport->port_state;
+- vport->fc_flag |= FC_PT2PT;
+- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
++ vport->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(shost->host_lock);
+- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+- "3311 Rcv Flogi PS x%x new PS x%x "
+- "fc_flag x%x new fc_flag x%x\n",
+- port_state, vport->port_state,
+- fc_flag, vport->fc_flag);
+
+- /*
+- * We temporarily set fc_myDID to make it look like we are
+- * a Fabric. This is done just so we end up with the right
+- * did / sid on the FLOGI ACC rsp.
++ /* If we have the high WWPN we can assign our own
++ * myDID; otherwise, we have to WAIT for a PLOGI
++ * from the remote NPort to find out what it
++ * will be.
+ */
+- did = vport->fc_myDID;
+- vport->fc_myDID = Fabric_DID;
+-
++ vport->fc_myDID = PT2PT_LocalID;
+ } else {
+- /* Reject this request because invalid parameters */
+- stat.un.b.lsRjtRsvd0 = 0;
+- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+- stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+- stat.un.b.vendorUnique = 0;
+-
+- /*
+- * We temporarily set fc_myDID to make it look like we are
+- * a Fabric. This is done just so we end up with the right
+- * did / sid on the FLOGI LS_RJT rsp.
+- */
+- did = vport->fc_myDID;
+- vport->fc_myDID = Fabric_DID;
+-
+- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+- NULL);
++ vport->fc_myDID = PT2PT_RemoteID;
++ }
+
+- /* Now lets put fc_myDID back to what its supposed to be */
+- vport->fc_myDID = did;
++ /*
++ * The vport state should go to LPFC_FLOGI only
++ * AFTER we issue a FLOGI, not receive one.
++ */
++ spin_lock_irq(shost->host_lock);
++ fc_flag = vport->fc_flag;
++ port_state = vport->port_state;
++ vport->fc_flag |= FC_PT2PT;
++ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
++ spin_unlock_irq(shost->host_lock);
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
++ "3311 Rcv Flogi PS x%x new PS x%x "
++ "fc_flag x%x new fc_flag x%x\n",
++ port_state, vport->port_state,
++ fc_flag, vport->fc_flag);
+
+- return 1;
+- }
++ /*
++ * We temporarily set fc_myDID to make it look like we are
++ * a Fabric. This is done just so we end up with the right
++ * did / sid on the FLOGI ACC rsp.
++ */
++ did = vport->fc_myDID;
++ vport->fc_myDID = Fabric_DID;
+
+- /* send our FLOGI first */
+- if (vport->port_state < LPFC_FLOGI) {
+- vport->fc_myDID = 0;
+- lpfc_initial_flogi(vport);
+- vport->fc_myDID = Fabric_DID;
+- }
++ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ /* Send back ACC */
+- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
++ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
+
+ /* Now lets put fc_myDID back to what its supposed to be */
+ vport->fc_myDID = did;
+
+- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+-
+- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (!mbox)
+- goto fail;
+-
+- lpfc_config_link(phba, mbox);
+-
+- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+- mbox->vport = vport;
+- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+- if (rc == MBX_NOT_FINISHED) {
+- mempool_free(mbox, phba->mbox_mem_pool);
+- goto fail;
+- }
+- }
+-
+ return 0;
+-fail:
+- return 1;
+ }
+
+ /**
+@@ -7345,7 +7314,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p
+
+ /* reject till our FLOGI completes */
+ if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
+- (cmd != ELS_CMD_FLOGI)) {
++ (cmd != ELS_CMD_FLOGI)) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ goto lsrjt;
+@@ -7381,6 +7350,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
++
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ if (!(phba->pport->fc_flag & FC_PT2PT) ||
+ (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -1083,7 +1083,7 @@ out:
+ }
+
+
+-static void
++void
+ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ {
+ struct lpfc_vport *vport = pmb->vport;
+@@ -1113,8 +1113,10 @@ lpfc_mbx_cmpl_local_config_link(struct l
+ /* Start discovery by sending a FLOGI. port_state is identically
+ * LPFC_FLOGI while waiting for FLOGI cmpl
+ */
+- if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
++ if (vport->port_state != LPFC_FLOGI)
+ lpfc_initial_flogi(vport);
++ else if (vport->fc_flag & FC_PT2PT)
++ lpfc_disc_start(vport);
+ return;
+
+ out:
+@@ -2963,8 +2965,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *p
+
+ out_free_mem:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+- kfree(dmabuf);
++ if (dmabuf) {
++ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
++ kfree(dmabuf);
++ }
+ return;
+ }
+
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -280,38 +280,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport,
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct serv_parm *sp;
++ uint32_t ed_tov;
+ LPFC_MBOXQ_t *mbox;
+ struct ls_rjt stat;
+ int rc;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+- if (vport->port_state <= LPFC_FDISC) {
+- /* Before responding to PLOGI, check for pt2pt mode.
+- * If we are pt2pt, with an outstanding FLOGI, abort
+- * the FLOGI and resend it first.
+- */
+- if (vport->fc_flag & FC_PT2PT) {
+- lpfc_els_abort_flogi(phba);
+- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+- /* If the other side is supposed to initiate
+- * the PLOGI anyway, just ACC it now and
+- * move on with discovery.
+- */
+- phba->fc_edtov = FF_DEF_EDTOV;
+- phba->fc_ratov = FF_DEF_RATOV;
+- /* Start discovery - this should just do
+- CLEAR_LA */
+- lpfc_disc_start(vport);
+- } else
+- lpfc_initial_flogi(vport);
+- } else {
+- stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+- stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+- ndlp, NULL);
+- return 0;
+- }
+- }
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+@@ -404,30 +378,46 @@ lpfc_rcv_plogi(struct lpfc_vport *vport,
+ /* Check for Nport to NPort pt2pt protocol */
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+-
+ /* rcv'ed PLOGI decides what our NPortId will be */
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
+- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (mbox == NULL)
+- goto out;
+- lpfc_config_link(phba, mbox);
+- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+- mbox->vport = vport;
+- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+- if (rc == MBX_NOT_FINISHED) {
+- mempool_free(mbox, phba->mbox_mem_pool);
+- goto out;
++
++ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
++ if (sp->cmn.edtovResolution) {
++ /* E_D_TOV ticks are in nanoseconds */
++ ed_tov = (phba->fc_edtov + 999999) / 1000000;
+ }
++
+ /*
+- * For SLI4, the VFI/VPI are registered AFTER the
+- * Nport with the higher WWPN sends us a PLOGI with
+- * our assigned NPortId.
++ * For pt-to-pt, use the larger EDTOV
++ * RATOV = 2 * EDTOV
+ */
++ if (ed_tov > phba->fc_edtov)
++ phba->fc_edtov = ed_tov;
++ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
++
++ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
++
++ /* Issue config_link / reg_vfi to account for updated TOV's */
++
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_issue_reg_vfi(vport);
++ else {
++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (mbox == NULL)
++ goto out;
++ lpfc_config_link(phba, mbox);
++ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
++ mbox->vport = vport;
++ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
++ if (rc == MBX_NOT_FINISHED) {
++ mempool_free(mbox, phba->mbox_mem_pool);
++ goto out;
++ }
++ }
+
+ lpfc_can_disctmo(vport);
+ }
++
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto out;
+@@ -1038,7 +1028,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_
+ uint32_t *lp;
+ IOCB_t *irsp;
+ struct serv_parm *sp;
++ uint32_t ed_tov;
+ LPFC_MBOXQ_t *mbox;
++ int rc;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+@@ -1053,6 +1045,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_
+ if (irsp->ulpStatus)
+ goto out;
+
++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (!mbox) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "0133 PLOGI: no memory for reg_login "
++ "Data: x%x x%x x%x x%x\n",
++ ndlp->nlp_DID, ndlp->nlp_state,
++ ndlp->nlp_flag, ndlp->nlp_rpi);
++ goto out;
++ }
++
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+@@ -1094,14 +1096,38 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_
+ ndlp->nlp_maxframe =
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (!mbox) {
+- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+- "0133 PLOGI: no memory for reg_login "
+- "Data: x%x x%x x%x x%x\n",
+- ndlp->nlp_DID, ndlp->nlp_state,
+- ndlp->nlp_flag, ndlp->nlp_rpi);
+- goto out;
++ if ((vport->fc_flag & FC_PT2PT) &&
++ (vport->fc_flag & FC_PT2PT_PLOGI)) {
++ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
++ if (sp->cmn.edtovResolution) {
++ /* E_D_TOV ticks are in nanoseconds */
++ ed_tov = (phba->fc_edtov + 999999) / 1000000;
++ }
++
++ /*
++ * Use the larger EDTOV
++ * RATOV = 2 * EDTOV for pt-to-pt
++ */
++ if (ed_tov > phba->fc_edtov)
++ phba->fc_edtov = ed_tov;
++ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
++
++ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
++
++ /* Issue config_link / reg_vfi to account for updated TOV's */
++ if (phba->sli_rev == LPFC_SLI_REV4) {
++ lpfc_issue_reg_vfi(vport);
++ } else {
++ lpfc_config_link(phba, mbox);
++
++ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
++ mbox->vport = vport;
++ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
++ if (rc == MBX_NOT_FINISHED) {
++ mempool_free(mbox, phba->mbox_mem_pool);
++ goto out;
++ }
++ }
+ }
+
+ lpfc_unreg_rpi(vport, ndlp);
--- /dev/null
+From 3e671fc5ed60d40e93f74aa58e890a63430c3c03 Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:11:55 -0500
+Subject: [PATCH 051/135] lpfc: Fix RegLogin failed error seen on Lancer FC
+ during port bounce
+
+[ Upstream commit 4b7789b71c916f79a3366da080101014473234c3 ]
+
+Fix RegLogin failed error seen on Lancer FC during port bounce
+
+Fix the statemachine and ref counting.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_els.c | 14 +++++++++-----
+ drivers/scsi/lpfc/lpfc_hbadisc.c | 8 ++++----
+ drivers/scsi/lpfc/lpfc_nportdisc.c | 3 +++
+ 3 files changed, 16 insertions(+), 9 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -3779,14 +3779,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba,
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ }
++
++ ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED)
+ goto out;
+- else
+- /* Decrement the ndlp reference count we
+- * set for this failed mailbox command.
+- */
+- lpfc_nlp_put(ndlp);
++
++ /* Decrement the ndlp reference count we
++ * set for this failed mailbox command.
++ */
++ lpfc_nlp_put(ndlp);
++ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+
+ /* ELS rsp: Cannot issue reg_login for <NPortid> */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+@@ -3843,6 +3846,7 @@ out:
+ * the routine lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
++
+ }
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -3452,10 +3452,10 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock_irq(shost->host_lock);
+- } else
+- /* Good status, call state machine */
+- lpfc_disc_state_machine(vport, ndlp, pmb,
+- NLP_EVT_CMPL_REG_LOGIN);
++ }
++
++ /* Call state machine */
++ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -2325,6 +2325,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
++ lpfc_unreg_rpi(vport, ndlp);
++ }
+ } else {
+ if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
+ lpfc_drop_node(vport, ndlp);
--- /dev/null
+From eb953f2af619294fe16f591f6d7e929e93e1db1c Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:11:56 -0500
+Subject: [PATCH 052/135] lpfc: Fix driver crash when module parameter
+ lpfc_fcp_io_channel set to 16
+
+[ Upstream commit 6690e0d4fc5cccf74534abe0c9f9a69032bc02f0 ]
+
+Fix driver crash when module parameter lpfc_fcp_io_channel set to 16
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -8834,9 +8834,12 @@ found:
+ * already mapped to this phys_id.
+ */
+ if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
+- chann[saved_chann] =
+- cpup->channel_id;
+- saved_chann++;
++ if (saved_chann <=
++ LPFC_FCP_IO_CHAN_MAX) {
++ chann[saved_chann] =
++ cpup->channel_id;
++ saved_chann++;
++ }
+ goto out;
+ }
+
--- /dev/null
+From defa518102495b4e0c2c96b55f021657c9cc6432 Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:11:57 -0500
+Subject: [PATCH 053/135] lpfc: Fix crash in fcp command completion path.
+
+[ Upstream commit c90261dcd86e4eb5c9c1627fde037e902db8aefa ]
+
+Fix crash in fcp command completion path.
+
+Missed null check.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_scsi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3908,9 +3908,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
+ uint32_t logit = LOG_FCP;
+
+ /* Sanity check on return of outstanding command */
+- if (!(lpfc_cmd->pCmd))
+- return;
+ cmd = lpfc_cmd->pCmd;
++ if (!cmd)
++ return;
+ shost = cmd->device->host;
+
+ lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
--- /dev/null
+From 5ed87df593cdbb140123d01b291f88cc9091f476 Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:11:59 -0500
+Subject: [PATCH 054/135] lpfc: Fix RDP Speed reporting.
+
+[ Upstream commit 81e7517723fc17396ba91f59312b3177266ddbda ]
+
+Fix RDP Speed reporting.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_els.c | 17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -4698,28 +4698,25 @@ lpfc_rdp_res_speed(struct fc_rdp_port_sp
+
+ desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
+
+- switch (phba->sli4_hba.link_state.speed) {
+- case LPFC_FC_LA_SPEED_1G:
++ switch (phba->fc_linkspeed) {
++ case LPFC_LINK_SPEED_1GHZ:
+ rdp_speed = RDP_PS_1GB;
+ break;
+- case LPFC_FC_LA_SPEED_2G:
++ case LPFC_LINK_SPEED_2GHZ:
+ rdp_speed = RDP_PS_2GB;
+ break;
+- case LPFC_FC_LA_SPEED_4G:
++ case LPFC_LINK_SPEED_4GHZ:
+ rdp_speed = RDP_PS_4GB;
+ break;
+- case LPFC_FC_LA_SPEED_8G:
++ case LPFC_LINK_SPEED_8GHZ:
+ rdp_speed = RDP_PS_8GB;
+ break;
+- case LPFC_FC_LA_SPEED_10G:
++ case LPFC_LINK_SPEED_10GHZ:
+ rdp_speed = RDP_PS_10GB;
+ break;
+- case LPFC_FC_LA_SPEED_16G:
++ case LPFC_LINK_SPEED_16GHZ:
+ rdp_speed = RDP_PS_16GB;
+ break;
+- case LPFC_FC_LA_SPEED_32G:
+- rdp_speed = RDP_PS_32GB;
+- break;
+ default:
+ rdp_speed = RDP_PS_UNKNOWN;
+ break;
--- /dev/null
+From 73e0304c829778d91116e22f78358d5b2ce07dbd Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:12:00 -0500
+Subject: [PATCH 055/135] lpfc: Fix RDP ACC being too long.
+
+[ Upstream commit eb8d68c9930f7f9c8f3f4a6059b051b32077a735 ]
+
+Fix RDP ACC being too long.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_els.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -4792,6 +4792,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp = rdp_context->ndlp;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_iocbq *elsiocb;
++ struct ulp_bde64 *bpl;
+ IOCB_t *icmd;
+ uint8_t *pcmd;
+ struct ls_rjt *stat;
+@@ -4801,6 +4802,8 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba,
+
+ if (status != SUCCESS)
+ goto error;
++
++ /* This will change once we know the true size of the RDP payload */
+ cmdsize = sizeof(struct fc_rdp_res_frame);
+
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
+@@ -4841,6 +4844,13 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba,
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
++ /* Now that we know the true size of the payload, update the BPL */
++ bpl = (struct ulp_bde64 *)
++ (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
++ bpl->tus.f.bdeSize = (fec_size + RDP_DESC_PAYLOAD_SIZE + 8);
++ bpl->tus.f.bdeFlags = 0;
++ bpl->tus.w = le32_to_cpu(bpl->tus.w);
++
+ phba->fc_stat.elsXmitACC++;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
--- /dev/null
+From f67fd277e0e1e9b57bf42dc1a56a10b82ee3a5f2 Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:12:03 -0500
+Subject: [PATCH 056/135] lpfc: Fix mbox reuse in PLOGI completion
+
+[ Upstream commit 01c73bbcd7cc4f31f45a1b0caeacdba46acd9c9c ]
+
+Fix mbox reuse in PLOGI completion. Moved allocations so that buffer
+properly init'd.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_nportdisc.c | 31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -1045,16 +1045,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_
+ if (irsp->ulpStatus)
+ goto out;
+
+- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (!mbox) {
+- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+- "0133 PLOGI: no memory for reg_login "
+- "Data: x%x x%x x%x x%x\n",
+- ndlp->nlp_DID, ndlp->nlp_state,
+- ndlp->nlp_flag, ndlp->nlp_rpi);
+- goto out;
+- }
+-
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+@@ -1118,6 +1108,17 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_issue_reg_vfi(vport);
+ } else {
++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (!mbox) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "0133 PLOGI: no memory "
++ "for config_link "
++ "Data: x%x x%x x%x x%x\n",
++ ndlp->nlp_DID, ndlp->nlp_state,
++ ndlp->nlp_flag, ndlp->nlp_rpi);
++ goto out;
++ }
++
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+@@ -1132,6 +1133,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_
+
+ lpfc_unreg_rpi(vport, ndlp);
+
++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (!mbox) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "0018 PLOGI: no memory for reg_login "
++ "Data: x%x x%x x%x x%x\n",
++ ndlp->nlp_DID, ndlp->nlp_state,
++ ndlp->nlp_flag, ndlp->nlp_rpi);
++ goto out;
++ }
++
+ if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
+ switch (ndlp->nlp_DID) {
--- /dev/null
+From 1a7ae138e2131e9828f50eb577dc964efbef0d68 Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@avagotech.com>
+Date: Wed, 16 Dec 2015 18:12:04 -0500
+Subject: [PATCH 057/135] lpfc: Fix external loopback failure.
+
+[ Upstream commit 4360ca9c24388e44cb0e14861a62fff43cf225c0 ]
+
+Fix external loopback failure.
+
+Rx sequence reassembly was incorrect.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_sli.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -14842,10 +14842,12 @@ lpfc_fc_frame_add(struct lpfc_vport *vpo
+ struct lpfc_dmabuf *h_buf;
+ struct hbq_dmabuf *seq_dmabuf = NULL;
+ struct hbq_dmabuf *temp_dmabuf = NULL;
++ uint8_t found = 0;
+
+ INIT_LIST_HEAD(&dmabuf->dbuf.list);
+ dmabuf->time_stamp = jiffies;
+ new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
++
+ /* Use the hdr_buf to find the sequence that this frame belongs to */
+ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+ temp_hdr = (struct fc_frame_header *)h_buf->virt;
+@@ -14885,7 +14887,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vpo
+ return seq_dmabuf;
+ }
+ /* find the correct place in the sequence to insert this frame */
+- list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
++ d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
++ while (!found) {
+ temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
+ /*
+@@ -14895,9 +14898,17 @@ lpfc_fc_frame_add(struct lpfc_vport *vpo
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+ list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
+- return seq_dmabuf;
++ found = 1;
++ break;
+ }
++
++ if (&d_buf->list == &seq_dmabuf->dbuf.list)
++ break;
++ d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
+ }
++
++ if (found)
++ return seq_dmabuf;
+ return NULL;
+ }
+
--- /dev/null
+From 9c19e97673f6e6644b6236d8f5c40b8d5eb37ad7 Mon Sep 17 00:00:00 2001
+From: Ursula Braun <ubraun@linux.vnet.ibm.com>
+Date: Fri, 11 Dec 2015 12:27:55 +0100
+Subject: [PATCH 058/135] qeth: initialize net_device with carrier off
+
+[ Upstream commit e5ebe63214d44d4dcf43df02edf3613e04d671b9 ]
+
+/sys/class/net/<interface>/operstate for an active qeth network
+interface offen shows "unknown", which translates to "state UNKNOWN
+in output of "ip link show". It is caused by a missing initialization
+of the __LINK_STATE_NOCARRIER bit in the net_device state field.
+This patch adds a netif_carrier_off() invocation when creating the
+net_device for a qeth device.
+
+Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
+Acked-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+Reference-ID: Bugzilla 133209
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/net/qeth_l2_main.c | 1 +
+ drivers/s390/net/qeth_l3_main.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -1127,6 +1127,7 @@ static int qeth_l2_setup_netdev(struct q
+ qeth_l2_request_initial_mac(card);
+ SET_NETDEV_DEV(card->dev, &card->gdev->dev);
+ netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
++ netif_carrier_off(card->dev);
+ return register_netdev(card->dev);
+ }
+
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -3220,6 +3220,7 @@ static int qeth_l3_setup_netdev(struct q
+
+ SET_NETDEV_DEV(card->dev, &card->gdev->dev);
+ netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
++ netif_carrier_off(card->dev);
+ return register_netdev(card->dev);
+ }
+
--- /dev/null
+From b25aac2e34a9783d2fde366d9ce12a8c0d525f50 Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Date: Mon, 25 Jan 2016 10:30:27 +0100
+Subject: [PATCH 059/135] s390/cio: fix measurement characteristics memleak
+
+[ Upstream commit 0d9bfe9123cfde59bf5c2e375b59d2a7d5061c4c ]
+
+Measurement characteristics are allocated during channel path
+registration but not freed during deregistration. Fix this by
+embedding these characteristics inside struct channel_path.
+
+Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Reviewed-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/cio/chp.c | 6 +++---
+ drivers/s390/cio/chp.h | 2 +-
+ drivers/s390/cio/chsc.c | 16 ++--------------
+ 3 files changed, 6 insertions(+), 18 deletions(-)
+
+--- a/drivers/s390/cio/chp.c
++++ b/drivers/s390/cio/chp.c
+@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_rea
+
+ device = container_of(kobj, struct device, kobj);
+ chp = to_channelpath(device);
+- if (!chp->cmg_chars)
++ if (chp->cmg == -1)
+ return 0;
+
+- return memory_read_from_buffer(buf, count, &off,
+- chp->cmg_chars, sizeof(struct cmg_chars));
++ return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
++ sizeof(chp->cmg_chars));
+ }
+
+ static struct bin_attribute chp_measurement_chars_attr = {
+--- a/drivers/s390/cio/chp.h
++++ b/drivers/s390/cio/chp.h
+@@ -48,7 +48,7 @@ struct channel_path {
+ /* Channel-measurement related stuff: */
+ int cmg;
+ int shared;
+- void *cmg_chars;
++ struct cmg_chars cmg_chars;
+ };
+
+ /* Return channel_path struct for given chpid. */
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -967,22 +967,19 @@ static void
+ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
+ struct cmg_chars *chars)
+ {
+- struct cmg_chars *cmg_chars;
+ int i, mask;
+
+- cmg_chars = chp->cmg_chars;
+ for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+ mask = 0x80 >> (i + 3);
+ if (cmcv & mask)
+- cmg_chars->values[i] = chars->values[i];
++ chp->cmg_chars.values[i] = chars->values[i];
+ else
+- cmg_chars->values[i] = 0;
++ chp->cmg_chars.values[i] = 0;
+ }
+ }
+
+ int chsc_get_channel_measurement_chars(struct channel_path *chp)
+ {
+- struct cmg_chars *cmg_chars;
+ int ccode, ret;
+
+ struct {
+@@ -1006,11 +1003,6 @@ int chsc_get_channel_measurement_chars(s
+ u32 data[NR_MEASUREMENT_CHARS];
+ } __attribute__ ((packed)) *scmc_area;
+
+- chp->cmg_chars = NULL;
+- cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
+- if (!cmg_chars)
+- return -ENOMEM;
+-
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
+@@ -1042,14 +1034,10 @@ int chsc_get_channel_measurement_chars(s
+ /* No cmg-dependent data. */
+ goto out;
+ }
+- chp->cmg_chars = cmg_chars;
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+ (struct cmg_chars *) &scmc_area->data);
+ out:
+ spin_unlock_irq(&chsc_page_lock);
+- if (!chp->cmg_chars)
+- kfree(cmg_chars);
+-
+ return ret;
+ }
+
--- /dev/null
+From 3e0c8a647d389d4b8bc7fe244d2d1160dcc7ff98 Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Date: Mon, 25 Jan 2016 10:31:33 +0100
+Subject: [PATCH 060/135] s390/cio: ensure consistent measurement state
+
+[ Upstream commit 61f0bfcf8020f02eb09adaef96745d1c1d1b3623 ]
+
+Make sure that in all cases where we could not obtain measurement
+characteristics the associated fields are set to invalid values.
+
+Note: without this change the "shared" capability of a channel path
+for which we could not obtain the measurement characteristics was
+incorrectly displayed as 0 (not shared). We will now correctly
+report "unknown" in this case.
+
+Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Reviewed-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/cio/chp.c | 13 +++++--------
+ drivers/s390/cio/chsc.c | 12 ++++++++----
+ 2 files changed, 13 insertions(+), 12 deletions(-)
+
+--- a/drivers/s390/cio/chp.c
++++ b/drivers/s390/cio/chp.c
+@@ -466,14 +466,11 @@ int chp_new(struct chp_id chpid)
+ ret = -ENODEV;
+ goto out_free;
+ }
+- /* Get channel-measurement characteristics. */
+- if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
+- ret = chsc_get_channel_measurement_chars(chp);
+- if (ret)
+- goto out_free;
+- } else {
+- chp->cmg = -1;
+- }
++
++ ret = chsc_get_channel_measurement_chars(chp);
++ if (ret)
++ goto out_free;
++
+ dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
+
+ /* make it known to the system */
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -1003,6 +1003,12 @@ int chsc_get_channel_measurement_chars(s
+ u32 data[NR_MEASUREMENT_CHARS];
+ } __attribute__ ((packed)) *scmc_area;
+
++ chp->shared = -1;
++ chp->cmg = -1;
++
++ if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
++ return 0;
++
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
+@@ -1023,11 +1029,9 @@ int chsc_get_channel_measurement_chars(s
+ scmc_area->response.code);
+ goto out;
+ }
+- if (scmc_area->not_valid) {
+- chp->cmg = -1;
+- chp->shared = -1;
++ if (scmc_area->not_valid)
+ goto out;
+- }
++
+ chp->cmg = scmc_area->cmg;
+ chp->shared = scmc_area->shared;
+ if (chp->cmg != 2 && chp->cmg != 3) {
--- /dev/null
+From fa6df6413bbe96f3cfc0196dd60368aff4e809ef Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Date: Mon, 25 Jan 2016 10:32:51 +0100
+Subject: [PATCH 061/135] s390/cio: update measurement characteristics
+
+[ Upstream commit 9f3d6d7a40a178b8a5b5274f4e55fec8c30147c9 ]
+
+Per channel path measurement characteristics are obtained during channel
+path registration. However if some properties of a channel path change
+we don't update the measurement characteristics.
+
+Make sure to update the characteristics when we change the properties of
+a channel path or receive a notification from FW about such a change.
+
+Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Reviewed-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/cio/chp.c | 12 +++++-------
+ drivers/s390/cio/chsc.c | 17 +++++++++++++++--
+ 2 files changed, 20 insertions(+), 9 deletions(-)
+
+--- a/drivers/s390/cio/chp.c
++++ b/drivers/s390/cio/chp.c
+@@ -416,7 +416,8 @@ static void chp_release(struct device *d
+ * chp_update_desc - update channel-path description
+ * @chp - channel-path
+ *
+- * Update the channel-path description of the specified channel-path.
++ * Update the channel-path description of the specified channel-path
++ * including channel measurement related information.
+ * Return zero on success, non-zero otherwise.
+ */
+ int chp_update_desc(struct channel_path *chp)
+@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path
+ return rc;
+
+ rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
++ if (rc)
++ return rc;
+
+- return rc;
++ return chsc_get_channel_measurement_chars(chp);
+ }
+
+ /**
+@@ -466,11 +469,6 @@ int chp_new(struct chp_id chpid)
+ ret = -ENODEV;
+ goto out_free;
+ }
+-
+- ret = chsc_get_channel_measurement_chars(chp);
+- if (ret)
+- goto out_free;
+-
+ dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
+
+ /* make it known to the system */
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/device.h>
++#include <linux/mutex.h>
+ #include <linux/pci.h>
+
+ #include <asm/cio.h>
+@@ -224,8 +225,9 @@ out_unreg:
+
+ void chsc_chp_offline(struct chp_id chpid)
+ {
+- char dbf_txt[15];
++ struct channel_path *chp = chpid_to_chp(chpid);
+ struct chp_link link;
++ char dbf_txt[15];
+
+ sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
+ CIO_TRACE_EVENT(2, dbf_txt);
+@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpi
+ link.chpid = chpid;
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
++
++ mutex_lock(&chp->lock);
++ chp_update_desc(chp);
++ mutex_unlock(&chp->lock);
++
+ for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
+ }
+
+@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw
+
+ void chsc_chp_online(struct chp_id chpid)
+ {
+- char dbf_txt[15];
++ struct channel_path *chp = chpid_to_chp(chpid);
+ struct chp_link link;
++ char dbf_txt[15];
+
+ sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
+ CIO_TRACE_EVENT(2, dbf_txt);
+@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid
+ link.chpid = chpid;
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
++
++ mutex_lock(&chp->lock);
++ chp_update_desc(chp);
++ mutex_unlock(&chp->lock);
++
+ for_each_subchannel_staged(__s390_process_res_acc, NULL,
+ &link);
+ css_schedule_reprobe();
--- /dev/null
+From 301709c17bc50faa7f2c77797e454f3286d8ba39 Mon Sep 17 00:00:00 2001
+From: Nicholas Krause <xerofoify@gmail.com>
+Date: Tue, 5 Jan 2016 14:32:54 -0500
+Subject: [PATCH 062/135] megaraid: Fix possible NULL pointer deference in
+ mraid_mm_ioctl
+
+[ Upstream commit 7296f62f0322d808362b21064deb34f20799c20d ]
+
+This adds the needed check after the call to the function
+mraid_mm_alloc_kioc in order to make sure that this function has not
+returned NULL and therefore makes sure we do not deference a NULL
+pointer if one is returned by mraid_mm_alloc_kioc. Further more add
+needed comments explaining that this function call can return NULL if
+the list head is empty for the pointer passed in order to allow furture
+users to understand this required pointer check.
+
+Signed-off-by: Nicholas Krause <xerofoify@gmail.com>
+Acked-by: Sumit Saxena <sumit.saxena@avagotech.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/megaraid/megaraid_mm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/scsi/megaraid/megaraid_mm.c
++++ b/drivers/scsi/megaraid/megaraid_mm.c
+@@ -179,8 +179,12 @@ mraid_mm_ioctl(struct file *filep, unsig
+
+ /*
+ * The following call will block till a kioc is available
++ * or return NULL if the list head is empty for the pointer
++ * of type mraid_mmapt passed to mraid_mm_alloc_kioc
+ */
+ kioc = mraid_mm_alloc_kioc(adp);
++ if (!kioc)
++ return -ENXIO;
+
+ /*
+ * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
--- /dev/null
+From b3659ad8b1a778e2e5a2845c40512b2598141159 Mon Sep 17 00:00:00 2001
+From: Sumit Saxena <sumit.saxena@avagotech.com>
+Date: Thu, 28 Jan 2016 21:04:22 +0530
+Subject: [PATCH 063/135] megaraid_sas: Do not allow PCI access during OCR
+
+[ Upstream commit 11c71cb4ab7cd901b9d6f0ff267c102778c1c8ef ]
+
+This patch will do synhronization between OCR function and AEN function
+using "reset_mutex" lock. reset_mutex will be acquired only in the
+first half of the AEN function which issues a DCMD. Second half of the
+function which calls SCSI API (scsi_add_device/scsi_remove_device)
+should be out of reset_mutex to avoid deadlock between scsi_eh thread
+and driver.
+
+During chip reset (inside OCR function), there should not be any PCI
+access and AEN function (which is called in delayed context) may be
+firing DCMDs (doing PCI writes) when chip reset is happening in parallel
+which will cause FW fault. This patch will solve the problem by making
+AEN thread and OCR thread mutually exclusive.
+
+Signed-off-by: Sumit Saxena <sumit.saxena@avagotech.com>
+Signed-off-by: Kashyap Desai <kashyap.desai@avagotech.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/megaraid/megaraid_sas.h | 2
+ drivers/scsi/megaraid/megaraid_sas_base.c | 254 +++++++++---------------------
+ 2 files changed, 82 insertions(+), 174 deletions(-)
+
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -1083,6 +1083,8 @@ struct megasas_ctrl_info {
+
+ #define VD_EXT_DEBUG 0
+
++#define SCAN_PD_CHANNEL 0x1
++#define SCAN_VD_CHANNEL 0x2
+
+ enum MR_SCSI_CMD_TYPE {
+ READ_WRITE_LDIO = 0,
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -5476,7 +5476,6 @@ static int megasas_probe_one(struct pci_
+ spin_lock_init(&instance->hba_lock);
+ spin_lock_init(&instance->completion_lock);
+
+- mutex_init(&instance->aen_mutex);
+ mutex_init(&instance->reset_mutex);
+
+ /*
+@@ -6443,10 +6442,10 @@ static int megasas_mgmt_ioctl_aen(struct
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+- mutex_lock(&instance->aen_mutex);
++ mutex_lock(&instance->reset_mutex);
+ error = megasas_register_aen(instance, aen.seq_num,
+ aen.class_locale_word);
+- mutex_unlock(&instance->aen_mutex);
++ mutex_unlock(&instance->reset_mutex);
+ return error;
+ }
+
+@@ -6648,6 +6647,7 @@ megasas_aen_polling(struct work_struct *
+ int i, j, doscan = 0;
+ u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
+ int error;
++ u8 dcmd_ret = 0;
+
+ if (!instance) {
+ printk(KERN_ERR "invalid instance!\n");
+@@ -6660,16 +6660,7 @@ megasas_aen_polling(struct work_struct *
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Don't run the event workqueue thread if OCR is running */
+- for (i = 0; i < wait_time; i++) {
+- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+- break;
+- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+- dev_notice(&instance->pdev->dev, "%s waiting for "
+- "controller reset to finish for scsi%d\n",
+- __func__, instance->host->host_no);
+- }
+- msleep(1000);
+- }
++ mutex_lock(&instance->reset_mutex);
+
+ instance->ev = NULL;
+ host = instance->host;
+@@ -6677,212 +6668,127 @@ megasas_aen_polling(struct work_struct *
+ megasas_decode_evt(instance);
+
+ switch (le32_to_cpu(instance->evt_detail->code)) {
+- case MR_EVT_PD_INSERTED:
+- if (megasas_get_pd_list(instance) == 0) {
+- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+- for (j = 0;
+- j < MEGASAS_MAX_DEV_PER_CHANNEL;
+- j++) {
+-
+- pd_index =
+- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+-
+- sdev1 = scsi_device_lookup(host, i, j, 0);
+-
+- if (instance->pd_list[pd_index].driveState
+- == MR_PD_STATE_SYSTEM) {
+- if (!sdev1)
+- scsi_add_device(host, i, j, 0);
+-
+- if (sdev1)
+- scsi_device_put(sdev1);
+- }
+- }
+- }
+- }
+- doscan = 0;
+- break;
+
++ case MR_EVT_PD_INSERTED:
+ case MR_EVT_PD_REMOVED:
+- if (megasas_get_pd_list(instance) == 0) {
+- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+- for (j = 0;
+- j < MEGASAS_MAX_DEV_PER_CHANNEL;
+- j++) {
+-
+- pd_index =
+- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+-
+- sdev1 = scsi_device_lookup(host, i, j, 0);
+-
+- if (instance->pd_list[pd_index].driveState
+- == MR_PD_STATE_SYSTEM) {
+- if (sdev1)
+- scsi_device_put(sdev1);
+- } else {
+- if (sdev1) {
+- scsi_remove_device(sdev1);
+- scsi_device_put(sdev1);
+- }
+- }
+- }
+- }
+- }
+- doscan = 0;
++ dcmd_ret = megasas_get_pd_list(instance);
++ if (dcmd_ret == 0)
++ doscan = SCAN_PD_CHANNEL;
+ break;
+
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+- if (!instance->requestorId ||
+- megasas_get_ld_vf_affiliation(instance, 0)) {
+- if (megasas_ld_list_query(instance,
+- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+- megasas_get_ld_list(instance);
+- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+- for (j = 0;
+- j < MEGASAS_MAX_DEV_PER_CHANNEL;
+- j++) {
+-
+- ld_index =
+- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+-
+- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+-
+- if (instance->ld_ids[ld_index]
+- != 0xff) {
+- if (sdev1)
+- scsi_device_put(sdev1);
+- } else {
+- if (sdev1) {
+- scsi_remove_device(sdev1);
+- scsi_device_put(sdev1);
+- }
+- }
+- }
+- }
+- doscan = 0;
+- }
+- break;
+ case MR_EVT_LD_CREATED:
+ if (!instance->requestorId ||
+- megasas_get_ld_vf_affiliation(instance, 0)) {
+- if (megasas_ld_list_query(instance,
+- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+- megasas_get_ld_list(instance);
+- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+- for (j = 0;
+- j < MEGASAS_MAX_DEV_PER_CHANNEL;
+- j++) {
+- ld_index =
+- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+-
+- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+-
+- if (instance->ld_ids[ld_index]
+- != 0xff) {
+- if (!sdev1)
+- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+- }
+- if (sdev1)
+- scsi_device_put(sdev1);
+- }
+- }
+- doscan = 0;
+- }
++ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
++ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
++
++ if (dcmd_ret == 0)
++ doscan = SCAN_VD_CHANNEL;
++
+ break;
++
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+- doscan = 1;
++ dcmd_ret = megasas_get_pd_list(instance);
++
++ if (dcmd_ret != 0)
++ break;
++
++ if (!instance->requestorId ||
++ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
++ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
++
++ if (dcmd_ret != 0)
++ break;
++
++ doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
++ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
++ instance->host->host_no);
+ break;
++
+ case MR_EVT_CTRL_PROP_CHANGED:
+- megasas_get_ctrl_info(instance);
+- break;
++ dcmd_ret = megasas_get_ctrl_info(instance);
++ break;
+ default:
+ doscan = 0;
+ break;
+ }
+ } else {
+ dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
++ mutex_unlock(&instance->reset_mutex);
+ kfree(ev);
+ return;
+ }
+
+- if (doscan) {
+- dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+- instance->host->host_no);
+- if (megasas_get_pd_list(instance) == 0) {
+- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+- sdev1 = scsi_device_lookup(host, i, j, 0);
+- if (instance->pd_list[pd_index].driveState ==
+- MR_PD_STATE_SYSTEM) {
+- if (!sdev1) {
+- scsi_add_device(host, i, j, 0);
+- }
+- if (sdev1)
+- scsi_device_put(sdev1);
+- } else {
+- if (sdev1) {
+- scsi_remove_device(sdev1);
+- scsi_device_put(sdev1);
+- }
++ mutex_unlock(&instance->reset_mutex);
++
++ if (doscan & SCAN_PD_CHANNEL) {
++ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
++ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
++ sdev1 = scsi_device_lookup(host, i, j, 0);
++ if (instance->pd_list[pd_index].driveState ==
++ MR_PD_STATE_SYSTEM) {
++ if (!sdev1)
++ scsi_add_device(host, i, j, 0);
++ else
++ scsi_device_put(sdev1);
++ } else {
++ if (sdev1) {
++ scsi_remove_device(sdev1);
++ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
++ }
+
+- if (!instance->requestorId ||
+- megasas_get_ld_vf_affiliation(instance, 0)) {
+- if (megasas_ld_list_query(instance,
+- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+- megasas_get_ld_list(instance);
+- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+- j++) {
+- ld_index =
+- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+-
+- sdev1 = scsi_device_lookup(host,
+- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+- if (instance->ld_ids[ld_index]
+- != 0xff) {
+- if (!sdev1)
+- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+- else
+- scsi_device_put(sdev1);
+- } else {
+- if (sdev1) {
+- scsi_remove_device(sdev1);
+- scsi_device_put(sdev1);
+- }
++ if (doscan & SCAN_VD_CHANNEL) {
++ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
++ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++ ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
++ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
++ if (instance->ld_ids[ld_index] != 0xff) {
++ if (!sdev1)
++ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
++ else
++ scsi_device_put(sdev1);
++ } else {
++ if (sdev1) {
++ scsi_remove_device(sdev1);
++ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+
+- if (instance->aen_cmd != NULL) {
+- kfree(ev);
+- return ;
+- }
+-
+- seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
++ if (dcmd_ret == 0)
++ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
++ else
++ seq_num = instance->last_seq_num;
+
+ /* Register AEN with FW for latest sequence number plus 1 */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+- mutex_lock(&instance->aen_mutex);
++
++ if (instance->aen_cmd != NULL) {
++ kfree(ev);
++ return;
++ }
++
++ mutex_lock(&instance->reset_mutex);
+ error = megasas_register_aen(instance, seq_num,
+ class_locale.word);
+- mutex_unlock(&instance->aen_mutex);
+-
+ if (error)
+- dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
++ dev_err(&instance->pdev->dev,
++ "register aen failed error %x\n", error);
+
++ mutex_unlock(&instance->reset_mutex);
+ kfree(ev);
+ }
+
--- /dev/null
+From 1e089e2cf0c34407a09bbf8e75ca34082cb9d85f Mon Sep 17 00:00:00 2001
+From: Sumit Saxena <sumit.saxena@avagotech.com>
+Date: Thu, 28 Jan 2016 21:14:26 +0530
+Subject: [PATCH 064/135] megaraid_sas: Fix SMAP issue
+
+[ Upstream commit ea1c928bb6051ec4ccf24826898aa2361eaa71e5 ]
+
+Inside compat IOCTL hook of driver, driver was using wrong address of
+ioc->frame.raw which leads sense_ioc_ptr to be calculated wrongly and
+failing IOCTL.
+
+Signed-off-by: Sumit Saxena <sumit.saxena@avagotech.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/megaraid/megaraid_sas_base.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6476,9 +6476,9 @@ static int megasas_mgmt_compat_ioctl_fw(
+ int i;
+ int error = 0;
+ compat_uptr_t ptr;
+- unsigned long local_raw_ptr;
+ u32 local_sense_off;
+ u32 local_sense_len;
++ u32 user_sense_off;
+
+ if (clear_user(ioc, sizeof(*ioc)))
+ return -EFAULT;
+@@ -6496,17 +6496,16 @@ static int megasas_mgmt_compat_ioctl_fw(
+ * sense_len is not null, so prepare the 64bit value under
+ * the same condition.
+ */
+- if (get_user(local_raw_ptr, ioc->frame.raw) ||
+- get_user(local_sense_off, &ioc->sense_off) ||
+- get_user(local_sense_len, &ioc->sense_len))
++ if (get_user(local_sense_off, &ioc->sense_off) ||
++ get_user(local_sense_len, &ioc->sense_len) ||
++ get_user(user_sense_off, &cioc->sense_off))
+ return -EFAULT;
+
+-
+ if (local_sense_len) {
+ void __user **sense_ioc_ptr =
+- (void __user **)((u8*)local_raw_ptr + local_sense_off);
++ (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
+ compat_uptr_t *sense_cioc_ptr =
+- (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
++ (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
+ if (get_user(ptr, sense_cioc_ptr) ||
+ put_user(compat_ptr(ptr), sense_ioc_ptr))
+ return -EFAULT;
--- /dev/null
+From adc397ce2b832fc83021f4a3adaa8e0b0ee0b8ea Mon Sep 17 00:00:00 2001
+From: Tomas Henzl <thenzl@redhat.com>
+Date: Mon, 1 Feb 2016 15:12:04 +0100
+Subject: [PATCH 065/135] megaraid_sas: Add an i/o barrier
+
+[ Upstream commit b99dbe56d511eb07de33bfa1b99ac5a6ff76ae08 ]
+
+A barrier should be added to ensure proper ordering of memory mapped
+writes.
+
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Reviewed-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Acked-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/megaraid/megaraid_sas_base.c | 1 +
+ drivers/scsi/megaraid/megaraid_sas_fusion.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -735,6 +735,7 @@ megasas_fire_cmd_skinny(struct megasas_i
+ &(regs)->inbound_high_queue_port);
+ writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+ &(regs)->inbound_low_queue_port);
++ mmiowb();
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ }
+
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -201,6 +201,7 @@ megasas_fire_cmd_fusion(struct megasas_i
+ &instance->reg_set->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc->u.high),
+ &instance->reg_set->inbound_high_queue_port);
++ mmiowb();
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ #endif
+ }
--- /dev/null
+From 61b5ea043d4a2e1223b2ce5d7c65fbf619f8a04e Mon Sep 17 00:00:00 2001
+From: Stefan Agner <stefan@agner.ch>
+Date: Mon, 23 Nov 2015 14:45:07 -0800
+Subject: [PATCH 066/135] pwm: fsl-ftm: Fix clock enable/disable when using PM
+
+[ Upstream commit 816aec2325e620b6454474372a21f90a8740cb28 ]
+
+A FTM PWM instance enables/disables three clocks: The bus clock, the
+counter clock and the PWM clock. The bus clock gets enabled on
+pwm_request, whereas the counter and PWM clocks will be enabled upon
+pwm_enable.
+
+The driver has three closesly related issues when enabling/disabling
+clocks during suspend/resume:
+- The three clocks are not treated differently in regards to the
+ individual PWM state enabled/requested. This can lead to clocks
+ getting disabled which have not been enabled in the first place
+ (a PWM channel which only has been requested going through
+ suspend/resume).
+
+- When entering suspend, the current behavior relies on the
+ FTM_OUTMASK register: If a PWM output is unmasked, the driver
+ assumes the clocks are enabled. However, some PWM instances
+ have only 2 channels connected (e.g. Vybrid's FTM1). In that case,
+ the FTM_OUTMASK reads 0x3 if all channels are disabled, even if
+ the code wrote 0xff to it before. For those PWM instances, the
+ current approach to detect enabled PWM signals does not work.
+
+- A third issue applies to the bus clock only, which can get enabled
+ multiple times (once for each PWM channel of a PWM chip). This is
+ fine, however when entering suspend mode, the clock only gets
+ disabled once.
+
+This change introduces a different approach by relying on the enable
+and prepared counters of the clock framework and using the frameworks
+PWM signal states to address all three issues.
+
+Clocks get disabled during suspend and back enabled on resume
+regarding to the PWM channels individual state (requested/enabled).
+
+Since we do not count the clock enables in the driver, this change no
+longer clears the Status and Control registers Clock Source Selection
+(FTM_SC[CLKS]). However, since we disable the selected clock anyway,
+and we explicitly select the clock source on reenabling a PWM channel
+this approach should not make a difference in practice.
+
+Signed-off-by: Stefan Agner <stefan@agner.ch>
+Signed-off-by: Thierry Reding <thierry.reding@gmail.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pwm/pwm-fsl-ftm.c | 58 +++++++++++++++++++---------------------------
+ 1 file changed, 25 insertions(+), 33 deletions(-)
+
+--- a/drivers/pwm/pwm-fsl-ftm.c
++++ b/drivers/pwm/pwm-fsl-ftm.c
+@@ -80,7 +80,6 @@ struct fsl_pwm_chip {
+
+ struct mutex lock;
+
+- unsigned int use_count;
+ unsigned int cnt_select;
+ unsigned int clk_ps;
+
+@@ -300,9 +299,6 @@ static int fsl_counter_clock_enable(stru
+ {
+ int ret;
+
+- if (fpc->use_count++ != 0)
+- return 0;
+-
+ /* select counter clock source */
+ regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
+ FTM_SC_CLK(fpc->cnt_select));
+@@ -334,25 +330,6 @@ static int fsl_pwm_enable(struct pwm_chi
+ return ret;
+ }
+
+-static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
+-{
+- /*
+- * already disabled, do nothing
+- */
+- if (fpc->use_count == 0)
+- return;
+-
+- /* there are still users, so can't disable yet */
+- if (--fpc->use_count > 0)
+- return;
+-
+- /* no users left, disable PWM counter clock */
+- regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0);
+-
+- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+- clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+-}
+-
+ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+ {
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+@@ -362,7 +339,8 @@ static void fsl_pwm_disable(struct pwm_c
+ regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
+ BIT(pwm->hwpwm));
+
+- fsl_counter_clock_disable(fpc);
++ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
++ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+
+ regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+ if ((val & 0xFF) == 0xFF)
+@@ -492,17 +470,24 @@ static int fsl_pwm_remove(struct platfor
+ static int fsl_pwm_suspend(struct device *dev)
+ {
+ struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+- u32 val;
++ int i;
+
+ regcache_cache_only(fpc->regmap, true);
+ regcache_mark_dirty(fpc->regmap);
+
+- /* read from cache */
+- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+- if ((val & 0xFF) != 0xFF) {
++ for (i = 0; i < fpc->chip.npwm; i++) {
++ struct pwm_device *pwm = &fpc->chip.pwms[i];
++
++ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
++ continue;
++
++ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
++
++ if (!pwm_is_enabled(pwm))
++ continue;
++
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+ }
+
+ return 0;
+@@ -511,12 +496,19 @@ static int fsl_pwm_suspend(struct device
+ static int fsl_pwm_resume(struct device *dev)
+ {
+ struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+- u32 val;
++ int i;
++
++ for (i = 0; i < fpc->chip.npwm; i++) {
++ struct pwm_device *pwm = &fpc->chip.pwms[i];
++
++ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
++ continue;
+
+- /* read from cache */
+- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+- if ((val & 0xFF) != 0xFF) {
+ clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
++
++ if (!pwm_is_enabled(pwm))
++ continue;
++
+ clk_prepare_enable(fpc->clk[fpc->cnt_select]);
+ clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ }
--- /dev/null
+From a0bc0fcd472114c1a817cbdb16ab90b32e8e4540 Mon Sep 17 00:00:00 2001
+From: Vladimir Zapolskiy <vz@mleia.com>
+Date: Sun, 6 Dec 2015 13:31:59 +0200
+Subject: [PATCH 067/135] pwm: lpc32xx: correct number of PWM channels from 2
+ to 1
+
+[ Upstream commit ebe1fca35038df28b5c183e8486863e765364ec1 ]
+
+LPC32xx SoC has two independent PWM controllers, they have different
+clock parents, clock gates and even slightly different controls, and
+each of these two PWM controllers has one output channel. Due to
+almost similar controls arranged in a row it is incorrectly set that
+there is one PWM controller with two channels, fix this problem, which
+at the moment prevents separate configuration of different clock
+parents and gates for both PWM controllers.
+
+The change makes previous PWM device node description incompatible
+with this update.
+
+Signed-off-by: Vladimir Zapolskiy <vz@mleia.com>
+Signed-off-by: Thierry Reding <thierry.reding@gmail.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pwm/pwm-lpc32xx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pwm/pwm-lpc32xx.c
++++ b/drivers/pwm/pwm-lpc32xx.c
+@@ -134,7 +134,7 @@ static int lpc32xx_pwm_probe(struct plat
+
+ lpc32xx->chip.dev = &pdev->dev;
+ lpc32xx->chip.ops = &lpc32xx_pwm_ops;
+- lpc32xx->chip.npwm = 2;
++ lpc32xx->chip.npwm = 1;
+ lpc32xx->chip.base = -1;
+
+ ret = pwmchip_add(&lpc32xx->chip);
--- /dev/null
+From 923f8ee01265e83f788fa96898a9b7befbf83b59 Mon Sep 17 00:00:00 2001
+From: Vladimir Zapolskiy <vz@mleia.com>
+Date: Sun, 6 Dec 2015 13:32:01 +0200
+Subject: [PATCH 068/135] pwm: lpc32xx: fix and simplify duty cycle and period
+ calculations
+
+[ Upstream commit 5a9fc9c666d5d759699cf5495bda85f1da0d747e ]
+
+The change fixes a problem, if duty_ns is too small in comparison
+to period_ns (as a valid corner case duty_ns is 0 ns), then due to
+PWM_DUTY() macro applied on a value the result is overflowed over 8
+bits, and instead of the highest bitfield duty cycle value 0xff the
+invalid duty cycle bitfield value 0x00 is written.
+
+For reference the LPC32xx spec defines PWMx_DUTY bitfield description
+is this way and it seems to be correct:
+
+ [Low]/[High] = [PWM_DUTY]/[256-PWM_DUTY], where 0 < PWM_DUTY <= 255.
+
+In addition according to my oscilloscope measurements LPC32xx PWM is
+"tristate" in sense that it produces a wave with floating min/max
+voltage levels for different duty cycle values, for corner cases:
+
+ PWM_DUTY == 0x01 => signal is in range from -1.05v to 0v
+ ....
+ PWM_DUTY == 0x80 => signal is in range from -0.75v to +0.75v
+ ....
+ PWM_DUTY == 0xff => signal is in range from 0v to +1.05v
+
+ PWM_DUTY == 0x00 => signal is around 0v, PWM is off
+
+Due to this peculiarity on very long period ranges (less than 1KHz)
+and odd pre-divider values PWM generated wave does not remind a
+clock shape signal, but rather a heartbit shape signal with positive
+and negative peaks, so I would recommend to use high-speed HCLK clock
+as a PWM parent clock and avoid using RTC clock as a parent.
+
+The change corrects PWM output in corner cases and prevents any
+possible overflows in calculation of values for PWM_DUTY and
+PWM_RELOADV bitfields, thus helper macro definitions may be removed.
+
+Signed-off-by: Vladimir Zapolskiy <vz@mleia.com>
+Signed-off-by: Thierry Reding <thierry.reding@gmail.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pwm/pwm-lpc32xx.c | 51 ++++++++++++++++------------------------------
+ 1 file changed, 18 insertions(+), 33 deletions(-)
+
+--- a/drivers/pwm/pwm-lpc32xx.c
++++ b/drivers/pwm/pwm-lpc32xx.c
+@@ -24,9 +24,7 @@ struct lpc32xx_pwm_chip {
+ void __iomem *base;
+ };
+
+-#define PWM_ENABLE (1 << 31)
+-#define PWM_RELOADV(x) (((x) & 0xFF) << 8)
+-#define PWM_DUTY(x) ((x) & 0xFF)
++#define PWM_ENABLE BIT(31)
+
+ #define to_lpc32xx_pwm_chip(_chip) \
+ container_of(_chip, struct lpc32xx_pwm_chip, chip)
+@@ -38,40 +36,27 @@ static int lpc32xx_pwm_config(struct pwm
+ unsigned long long c;
+ int period_cycles, duty_cycles;
+ u32 val;
++ c = clk_get_rate(lpc32xx->clk);
+
+- c = clk_get_rate(lpc32xx->clk) / 256;
+- c = c * period_ns;
+- do_div(c, NSEC_PER_SEC);
+-
+- /* Handle high and low extremes */
+- if (c == 0)
+- c = 1;
+- if (c > 255)
+- c = 0; /* 0 set division by 256 */
+- period_cycles = c;
+-
+- /* The duty-cycle value is as follows:
+- *
+- * DUTY-CYCLE HIGH LEVEL
+- * 1 99.9%
+- * 25 90.0%
+- * 128 50.0%
+- * 220 10.0%
+- * 255 0.1%
+- * 0 0.0%
+- *
+- * In other words, the register value is duty-cycle % 256 with
+- * duty-cycle in the range 1-256.
+- */
+- c = 256 * duty_ns;
+- do_div(c, period_ns);
+- if (c > 255)
+- c = 255;
+- duty_cycles = 256 - c;
++ /* The highest acceptable divisor is 256, which is represented by 0 */
++ period_cycles = div64_u64(c * period_ns,
++ (unsigned long long)NSEC_PER_SEC * 256);
++ if (!period_cycles)
++ period_cycles = 1;
++ if (period_cycles > 255)
++ period_cycles = 0;
++
++ /* Compute 256 x #duty/period value and care for corner cases */
++ duty_cycles = div64_u64((unsigned long long)(period_ns - duty_ns) * 256,
++ period_ns);
++ if (!duty_cycles)
++ duty_cycles = 1;
++ if (duty_cycles > 255)
++ duty_cycles = 255;
+
+ val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val &= ~0xFFFF;
+- val |= PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles);
++ val |= (period_cycles << 8) | duty_cycles;
+ writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+
+ return 0;
--- /dev/null
+From 2b7915f04074318e3c739d2d3efafe77b7ee211e Mon Sep 17 00:00:00 2001
+From: Tirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
+Date: Thu, 4 Feb 2016 10:45:25 -0800
+Subject: [PATCH 069/135] irqchip/gic-v3: Make sure read from ICC_IAR1_EL1 is
+ visible on redestributor
+
+[ Upstream commit 1a1ebd5fb1e203ee8cc73508cc7a38ac4b804596 ]
+
+The ARM GICv3 specification mentions the need for dsb after a read
+from the ICC_IAR1_EL1 register:
+
+ 4.1.1 Physical CPU Interface:
+ The effects of reading ICC_IAR0_EL1 and ICC_IAR1_EL1
+ on the state of a returned INTID are not guaranteed
+ to be visible until after the execution of a DSB.
+
+Not having this could result in missed interrupts, so let's add the
+required barrier.
+
+[Marc: fixed commit message]
+
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Tirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/arch_gicv3.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/include/asm/arch_gicv3.h
++++ b/arch/arm64/include/asm/arch_gicv3.h
+@@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(vo
+ u64 irqstat;
+
+ asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
++ dsb(sy);
+ return irqstat;
+ }
+
--- /dev/null
+From 415f5ac691232d6011ff6716348ff40e28a65c12 Mon Sep 17 00:00:00 2001
+From: Tirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
+Date: Wed, 10 Feb 2016 10:46:53 -0800
+Subject: [PATCH 070/135] arm64: KVM: Configure TCR_EL2.PS at runtime
+
+[ Upstream commit 3c5b1d92b3b02be07873d611a27950addff544d3 ]
+
+Setting TCR_EL2.PS to 40 bits is wrong on systems with less that
+less than 40 bits of physical addresses. and breaks KVM on systems
+where the RAM is above 40 bits.
+
+This patch uses ID_AA64MMFR0_EL1.PARange to set TCR_EL2.PS dynamically,
+just like we already do for VTCR_EL2.PS.
+
+[Marc: rewrote commit message, patch tidy up]
+
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Tirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_arm.h | 2 --
+ arch/arm64/kvm/hyp-init.S | 13 ++++++++-----
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -107,8 +107,6 @@
+ #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
+ TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
+
+-#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
+-
+ /* VTCR_EL2 Registers bits */
+ #define VTCR_EL2_RES1 (1 << 31)
+ #define VTCR_EL2_PS_MASK (7 << 16)
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -64,7 +64,7 @@ __do_hyp_init:
+ mrs x4, tcr_el1
+ ldr x5, =TCR_EL2_MASK
+ and x4, x4, x5
+- ldr x5, =TCR_EL2_FLAGS
++ mov x5, #TCR_EL2_RES1
+ orr x4, x4, x5
+
+ #ifndef CONFIG_ARM64_VA_BITS_48
+@@ -85,15 +85,18 @@ __do_hyp_init:
+ ldr_l x5, idmap_t0sz
+ bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+ #endif
+- msr tcr_el2, x4
+-
+- ldr x4, =VTCR_EL2_FLAGS
+ /*
+ * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
+- * VTCR_EL2.
++ * TCR_EL2 and VTCR_EL2.
+ */
+ mrs x5, ID_AA64MMFR0_EL1
+ bfi x4, x5, #16, #3
++
++ msr tcr_el2, x4
++
++ ldr x4, =VTCR_EL2_FLAGS
++ bfi x4, x5, #16, #3
++
+ msr vtcr_el2, x4
+
+ mrs x4, mair_el1
--- /dev/null
+From fc5f88e18ba4c8db7532ec36804817a7e1dc64ae Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Fri, 5 Feb 2016 16:30:39 +0000
+Subject: [PATCH 071/135] net: cavium: liquidio: fix check for in progress flag
+
+[ Upstream commit 19a6d156a7bd080f3a855a40a4a08ab475e34b4a ]
+
+smatch detected a suspicious looking bitop condition:
+
+drivers/net/ethernet/cavium/liquidio/lio_main.c:2529
+ handle_timestamp() warn: suspicious bitop condition
+
+(skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS is always non-zero,
+so the logic is definitely not correct. Use & to mask the correct
+bit.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
+@@ -2526,7 +2526,7 @@ static void handle_timestamp(struct octe
+
+ octeon_swap_8B_data(&resp->timestamp, 1);
+
+- if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
++ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns = resp->timestamp;
+
--- /dev/null
+From 18e9ca3f5e05c1435421bc94f821db774df4c78d Mon Sep 17 00:00:00 2001
+From: Tomas Henzl <thenzl@redhat.com>
+Date: Wed, 23 Dec 2015 14:21:47 +0100
+Subject: [PATCH 072/135] mpt3sas: A correction in unmap_resources
+
+[ Upstream commit 5f985d88bac34e7f3b4403118eab072902a0b392 ]
+
+It might happen that we try to free an already freed pointer.
+
+Reported-by: Maurizio Lombardi <mlombard@redhat.com>
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Acked-by: Chaitra P B <chaitra.basappa@avagotech.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/mpt3sas/mpt3sas_base.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2020,8 +2020,10 @@ mpt3sas_base_unmap_resources(struct MPT3
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+
+- if (ioc->msix96_vector)
++ if (ioc->msix96_vector) {
+ kfree(ioc->replyPostRegisterIndex);
++ ioc->replyPostRegisterIndex = NULL;
++ }
+
+ if (ioc->chip_phys) {
+ iounmap(ioc->chip);
--- /dev/null
+From 6b229d64e5b93e67a6c29c5149f72236a1bfe42b Mon Sep 17 00:00:00 2001
+From: Suganath prabu Subramani <suganath-prabu.subramani@avagotech.com>
+Date: Thu, 28 Jan 2016 12:07:06 +0530
+Subject: [PATCH 073/135] mpt3sas: Fix for Asynchronous completion of timedout
+ IO and task abort of timedout IO.
+
+[ Upstream commit 03d1fb3a65783979f23bd58b5a0387e6992d9e26 ]
+
+Track msix of each IO and use the same msix for issuing abort to timed
+out IO. With this driver will process IO's reply first followed by TM.
+
+Signed-off-by: Suganath prabu Subramani <suganath-prabu.subramani@avagotech.com>
+Signed-off-by: Chaitra P B <chaitra.basappa@avagotech.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/mpt3sas/mpt3sas_base.c | 20 +++++++++++---------
+ drivers/scsi/mpt3sas/mpt3sas_base.h | 5 ++++-
+ drivers/scsi/mpt3sas/mpt3sas_ctl.c | 2 +-
+ drivers/scsi/mpt3sas/mpt3sas_scsih.c | 12 +++++++++---
+ 4 files changed, 25 insertions(+), 14 deletions(-)
+
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2242,6 +2242,12 @@ mpt3sas_base_get_reply_virt_addr(struct
+ return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
+ }
+
++static inline u8
++_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
++{
++ return ioc->cpu_msix_table[raw_smp_processor_id()];
++}
++
+ /**
+ * mpt3sas_base_get_smid - obtain a free smid from internal queue
+ * @ioc: per adapter object
+@@ -2302,6 +2308,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3
+ request->scmd = scmd;
+ request->cb_idx = cb_idx;
+ smid = request->smid;
++ request->msix_io = _base_get_msix_index(ioc);
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+@@ -2424,12 +2431,6 @@ _base_writeq(__u64 b, volatile void __io
+ }
+ #endif
+
+-static inline u8
+-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+-{
+- return ioc->cpu_msix_table[raw_smp_processor_id()];
+-}
+-
+ /**
+ * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * @ioc: per adapter object
+@@ -2483,18 +2484,19 @@ mpt3sas_base_put_smid_fast_path(struct M
+ * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+- *
++ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
+ * Return nothing.
+ */
+ void
+-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
++mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
++ u16 msix_task)
+ {
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+- descriptor.HighPriority.MSIxIndex = 0;
++ descriptor.HighPriority.MSIxIndex = msix_task;
+ descriptor.HighPriority.SMID = cpu_to_le16(smid);
+ descriptor.HighPriority.LMID = 0;
+ descriptor.HighPriority.Reserved1 = 0;
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -643,6 +643,7 @@ struct chain_tracker {
+ * @cb_idx: callback index
+ * @direct_io: To indicate whether I/O is direct (WARPDRIVE)
+ * @tracker_list: list of free request (ioc->free_list)
++ * @msix_io: IO's msix
+ */
+ struct scsiio_tracker {
+ u16 smid;
+@@ -651,6 +652,7 @@ struct scsiio_tracker {
+ u8 direct_io;
+ struct list_head chain_list;
+ struct list_head tracker_list;
++ u16 msix_io;
+ };
+
+ /**
+@@ -1213,7 +1215,8 @@ void mpt3sas_base_put_smid_scsi_io(struc
+ u16 handle);
+ void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
++void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
++ u16 smid, u16 msix_task);
+ void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+ void mpt3sas_base_initialize_callback_handler(void);
+ u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -817,7 +817,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPT
+ tm_request->DevHandle));
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+- mpt3sas_base_put_smid_hi_priority(ioc, smid);
++ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ break;
+ }
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -2193,6 +2193,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_AD
+ unsigned long timeleft;
+ struct scsiio_tracker *scsi_lookup = NULL;
+ int rc;
++ u16 msix_task = 0;
+
+ if (m_type == TM_MUTEX_ON)
+ mutex_lock(&ioc->tm_cmds.mutex);
+@@ -2256,7 +2257,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_AD
+ int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
+ mpt3sas_scsih_set_tm_flag(ioc, handle);
+ init_completion(&ioc->tm_cmds.done);
+- mpt3sas_base_put_smid_hi_priority(ioc, smid);
++ if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
++ (scsi_lookup->msix_io < ioc->reply_queue_count))
++ msix_task = scsi_lookup->msix_io;
++ else
++ msix_task = 0;
++ mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+ timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+@@ -3151,7 +3157,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+- mpt3sas_base_put_smid_hi_priority(ioc, smid);
++ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
+
+ out:
+@@ -3332,7 +3338,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+- mpt3sas_base_put_smid_hi_priority(ioc, smid);
++ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ }
+
+ /**
--- /dev/null
+From 7cad8862ae0c0cba7a848567ea229282a9c67b15 Mon Sep 17 00:00:00 2001
+From: Anjali Singhai Jain <anjali.singhai@intel.com>
+Date: Wed, 9 Dec 2015 15:50:24 -0800
+Subject: [PATCH 074/135] i40e/i40evf: Fix RSS rx-flow-hash configuration
+ through ethtool
+
+[ Upstream commit 6e35c04cf633e55648acb9ccabff42aa37bd4044 ]
+
+This patch fixes the Hash PCTYPE enable for X722 since it supports
+a broader selection of PCTYPES for TCP and UDP.
+
+This patch also fixes a bug in XL710, X710, X722 support for RSS,
+as of now we cannot reduce the (4)tuple for RSS for TCP/IPv4/IPV6 or
+UDP/IPv4/IPv6 packets since this requires a product feature change
+that comes in a later release.
+
+A VF should never be allowed to change the tuples for RSS for any
+PCTYPE since that's a global setting for the device in case of i40e
+devices.
+
+Change-ID: I0ee7203c9b24813260f58f3220798bc9d9ac4a12
+Signed-off-by: Anjali Singhai Jain <anjali.singhai@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 14 ++-----
+ drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 40 ++++-----------------
+ 2 files changed, 12 insertions(+), 42 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -2164,8 +2164,7 @@ static int i40e_set_rss_hash_opt(struct
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+- break;
++ return -EINVAL;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+@@ -2176,8 +2175,7 @@ static int i40e_set_rss_hash_opt(struct
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+- break;
++ return -EINVAL;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+@@ -2188,9 +2186,7 @@ static int i40e_set_rss_hash_opt(struct
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
+- break;
++ return -EINVAL;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
+@@ -2202,9 +2198,7 @@ static int i40e_set_rss_hash_opt(struct
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
+- break;
++ return -EINVAL;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
+--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
++++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+@@ -477,54 +477,30 @@ static int i40evf_set_rss_hash_opt(struc
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+- case 0:
+- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+- break;
+- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
++ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+- break;
+- default:
++ else
+ return -EINVAL;
+- }
+ break;
+ case TCP_V6_FLOW:
+- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+- case 0:
+- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+- break;
+- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
++ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+- break;
+- default:
++ else
+ return -EINVAL;
+- }
+ break;
+ case UDP_V4_FLOW:
+- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+- case 0:
+- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
+- break;
+- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
++ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
+- break;
+- default:
++ } else {
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+- case 0:
+- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
+- break;
+- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
++ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
+- break;
+- default:
++ } else {
+ return -EINVAL;
+ }
+ break;
--- /dev/null
+From 935e2f261c2d316a83df220e37d3caf38a208ac3 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 15 Jan 2016 17:41:09 +0000
+Subject: [PATCH 075/135] hrtimer: Catch illegal clockids
+
+[ Upstream commit 9006a01829a50cfd6bbd4980910ed46e895e93d7 ]
+
+It is way too easy to take any random clockid and feed it to
+the hrtimer subsystem. At best, it gets mapped to a monotonic
+base, but it would be better to just catch illegal values as
+early as possible.
+
+This patch does exactly that, mapping illegal clockids to an
+illegal base index, and panicing when we detect the illegal
+condition.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Tomasz Nowicki <tn@semihalf.com>
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Link: http://lkml.kernel.org/r/1452879670-16133-3-git-send-email-marc.zyngier@arm.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/hrtimer.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -94,6 +94,9 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base,
+ };
+
+ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
++ /* Make sure we catch unsupported clockids */
++ [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
++
+ [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
+ [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
+ [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
+@@ -102,7 +105,9 @@ static const int hrtimer_clock_to_base_t
+
+ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+ {
+- return hrtimer_clock_to_base_table[clock_id];
++ int base = hrtimer_clock_to_base_table[clock_id];
++ BUG_ON(base == HRTIMER_MAX_CLOCK_BASES);
++ return base;
+ }
+
+ /*
--- /dev/null
+From da35be3a7eef7cadd4f810b555919d8055dc37c5 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Thu, 28 Jan 2016 16:04:12 +0200
+Subject: [PATCH 076/135] drm/i915/bxt: update list of PCIIDs
+
+[ Upstream commit 985dd4360fdf2533fe48a33a4a2094f2e4718dc0 ]
+
+Add PCIIDs for new versions of the SOC, based on BSpec. Also add the
+name of the versions as code comment where this is available. The new
+versions don't have any changes visible to the kernel driver.
+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1453989852-13569-1-git-send-email-imre.deak@intel.com
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/drm/i915_pciids.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -289,6 +289,8 @@
+ #define INTEL_BXT_IDS(info) \
+ INTEL_VGA_DEVICE(0x0A84, info), \
+ INTEL_VGA_DEVICE(0x1A84, info), \
+- INTEL_VGA_DEVICE(0x5A84, info)
++ INTEL_VGA_DEVICE(0x1A85, info), \
++ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
++ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
+
+ #endif /* _I915_PCIIDS_H */
--- /dev/null
+From 9be8642218a3a01940f88a8819bea3fb4b351f1f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
+Date: Fri, 5 Feb 2016 13:21:42 +0100
+Subject: [PATCH 077/135] drm/i915/skl: Add missing SKL ids
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 7157bb27e79875db5603aa1e30f56e873a8300f9 ]
+
+Used by production devices:
+ Intel(R) Iris Graphics 540 (Skylake GT3e)
+ Intel(R) Iris Graphics 550 (Skylake GT3e)
+
+v2: More ids
+v3: Less ids (GT1 got duplicated)
+
+Cc: Mika Kuoppala <mika.kuoppala@intel.com>
+Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
+Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1454674902-26207-1-git-send-email-michal.winiarski@intel.com
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/drm/i915_pciids.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -277,7 +277,9 @@
+ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+
+ #define INTEL_SKL_GT3_IDS(info) \
++ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
++ INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
+
--- /dev/null
+From 328b8062af081141284186a4fb9bc37204f1dde2 Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Thu, 7 Jan 2016 10:59:21 +0100
+Subject: [PATCH 078/135] drm/atomic: Do not unset crtc when an encoder is
+ stolen
+
+[ Upstream commit 97a8df90875f72ba3b4c3320759fd93cea743261 ]
+
+While we steal the encoder away from the connector the connector may
+be updated to use a different encoder.
+
+Without this change if 2 connectors swap encoders one of them will
+end up without a crtc.
+
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: http://patchwork.freedesktop.org/patch/msgid/1452160762-30487-5-git-send-email-maarten.lankhorst@linux.intel.com
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_atomic_helper.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -108,7 +108,6 @@ steal_encoder(struct drm_atomic_state *s
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+- int ret;
+
+ /*
+ * We can only steal an encoder coming from a connector, which means we
+@@ -139,9 +138,6 @@ steal_encoder(struct drm_atomic_state *s
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+- ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+- if (ret)
+- return ret;
+ connector_state->best_encoder = NULL;
+ }
+
--- /dev/null
+From 3ac087c846c8eb41c918b720ca12255f78398cfd Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Thu, 26 Nov 2015 14:00:49 +0200
+Subject: [PATCH 079/135] mmc: sdhci: 64-bit DMA actually has 4-byte alignment
+
+[ Upstream commit 04a5ae6fdd018af29675eb8b6c2550c87f471570 ]
+
+The version 3.00 SDHCI spec. was a bit unclear about the
+required data alignment for 64-bit DMA, whereas the version
+4.10 spec. uses different language and indicates that only
+4-byte alignment is required rather than the 8-byte alignment
+currently implemented. That make no difference to SD and EMMC
+which invariably transfer data in sector-aligned blocks.
+However with SDIO, it results in using more DMA descriptors
+than necessary. Theoretically that slows DMA slightly although
+DMA is not the limiting factor for throughput, so there is no
+discernable impact on performance. Nevertheless, the driver
+should follw the spec unless there is good reason not to, so
+this patch corrects the alignment criterion.
+
+There is a more complicated criterion for the DMA descriptor
+table itself. However the table is allocated by dma_alloc_coherent()
+which allocates pages (i.e. aligned to a page boundary).
+For simplicity just check it is 8-byte aligned, but add a comment
+that some Intel controllers actually require 8-byte alignment
+even when using 32-bit DMA.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci.c | 31 ++++++++++++-------------------
+ drivers/mmc/host/sdhci.h | 21 ++++++++++++---------
+ 2 files changed, 24 insertions(+), 28 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -492,7 +492,7 @@ static int sdhci_adma_table_pre(struct s
+ host->align_buffer, host->align_buffer_sz, direction);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+ goto fail;
+- BUG_ON(host->align_addr & host->align_mask);
++ BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
+
+ host->sg_count = sdhci_pre_dma_transfer(host, data);
+ if (host->sg_count < 0)
+@@ -514,8 +514,8 @@ static int sdhci_adma_table_pre(struct s
+ * the (up to three) bytes that screw up the
+ * alignment.
+ */
+- offset = (host->align_sz - (addr & host->align_mask)) &
+- host->align_mask;
++ offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
++ SDHCI_ADMA2_MASK;
+ if (offset) {
+ if (data->flags & MMC_DATA_WRITE) {
+ buffer = sdhci_kmap_atomic(sg, &flags);
+@@ -529,8 +529,8 @@ static int sdhci_adma_table_pre(struct s
+
+ BUG_ON(offset > 65536);
+
+- align += host->align_sz;
+- align_addr += host->align_sz;
++ align += SDHCI_ADMA2_ALIGN;
++ align_addr += SDHCI_ADMA2_ALIGN;
+
+ desc += host->desc_sz;
+
+@@ -611,7 +611,7 @@ static void sdhci_adma_table_post(struct
+ /* Do a quick scan of the SG list for any unaligned mappings */
+ has_unaligned = false;
+ for_each_sg(data->sg, sg, host->sg_count, i)
+- if (sg_dma_address(sg) & host->align_mask) {
++ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+ has_unaligned = true;
+ break;
+ }
+@@ -623,15 +623,15 @@ static void sdhci_adma_table_post(struct
+ align = host->align_buffer;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+- if (sg_dma_address(sg) & host->align_mask) {
+- size = host->align_sz -
+- (sg_dma_address(sg) & host->align_mask);
++ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
++ size = SDHCI_ADMA2_ALIGN -
++ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
+
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ memcpy(buffer, align, size);
+ sdhci_kunmap_atomic(buffer, &flags);
+
+- align += host->align_sz;
++ align += SDHCI_ADMA2_ALIGN;
+ }
+ }
+ }
+@@ -2983,24 +2983,17 @@ int sdhci_add_host(struct sdhci_host *ho
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
+ SDHCI_ADMA2_64_DESC_SZ;
+- host->align_buffer_sz = SDHCI_MAX_SEGS *
+- SDHCI_ADMA2_64_ALIGN;
+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+- host->align_sz = SDHCI_ADMA2_64_ALIGN;
+- host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
+ } else {
+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
+ SDHCI_ADMA2_32_DESC_SZ;
+- host->align_buffer_sz = SDHCI_MAX_SEGS *
+- SDHCI_ADMA2_32_ALIGN;
+ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+- host->align_sz = SDHCI_ADMA2_32_ALIGN;
+- host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
+ }
+ host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
+ host->adma_table_sz,
+ &host->adma_addr,
+ GFP_KERNEL);
++ host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
+ host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
+ if (!host->adma_table || !host->align_buffer) {
+ if (host->adma_table)
+@@ -3014,7 +3007,7 @@ int sdhci_add_host(struct sdhci_host *ho
+ host->flags &= ~SDHCI_USE_ADMA;
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
+- } else if (host->adma_addr & host->align_mask) {
++ } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
+ pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -272,22 +272,27 @@
+ /* ADMA2 32-bit DMA descriptor size */
+ #define SDHCI_ADMA2_32_DESC_SZ 8
+
+-/* ADMA2 32-bit DMA alignment */
+-#define SDHCI_ADMA2_32_ALIGN 4
+-
+ /* ADMA2 32-bit descriptor */
+ struct sdhci_adma2_32_desc {
+ __le16 cmd;
+ __le16 len;
+ __le32 addr;
+-} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
++} __packed __aligned(4);
++
++/* ADMA2 data alignment */
++#define SDHCI_ADMA2_ALIGN 4
++#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1)
++
++/*
++ * ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte
++ * alignment for the descriptor table even in 32-bit DMA mode. Memory
++ * allocation is at least 8 byte aligned anyway, so just stipulate 8 always.
++ */
++#define SDHCI_ADMA2_DESC_ALIGN 8
+
+ /* ADMA2 64-bit DMA descriptor size */
+ #define SDHCI_ADMA2_64_DESC_SZ 12
+
+-/* ADMA2 64-bit DMA alignment */
+-#define SDHCI_ADMA2_64_ALIGN 8
+-
+ /*
+ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
+ * aligned.
+@@ -483,8 +488,6 @@ struct sdhci_host {
+ dma_addr_t align_addr; /* Mapped bounce buffer */
+
+ unsigned int desc_sz; /* ADMA descriptor size */
+- unsigned int align_sz; /* ADMA alignment */
+- unsigned int align_mask; /* ADMA alignment mask */
+
+ struct tasklet_struct finish_tasklet; /* Tasklet structures */
+
--- /dev/null
+From e74817c3b62c82583c95dbd3b7de59472a166a5f Mon Sep 17 00:00:00 2001
+From: Swapnil Nagle <swapnil.nagle@purestorage.com>
+Date: Thu, 4 Feb 2016 11:45:17 -0500
+Subject: [PATCH 080/135] qla2xxx: Use ATIO type to send correct tmr response
+
+[ Upstream commit d7236ac368212bd6fc8b45f050136ee53e6a6f2d ]
+
+The function value inside se_cmd can change if the TMR is cancelled.
+Use original ATIO Type to correctly determine CTIO response.
+
+Signed-off-by: Swapnil Nagle <swapnil.nagle@purestroage.com>
+Signed-off-by: Himanshu Madhani <himanshu.madhani@qlogic.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/qla2xxx/qla_target.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1578,7 +1578,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt
+ qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+ 0, 0, 0, 0, 0, 0);
+ else {
+- if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
++ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
+ qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+ mcmd->fc_tm_rsp, false);
+ else
--- /dev/null
+From a931b4f3726dbb852ce6ec455fcf793ce79b951f Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 17 Dec 2015 09:57:49 -0500
+Subject: [PATCH 081/135] drm/amdgpu: fix dp link rate selection (v2)
+
+[ Upstream commit 41869c1c7fe583dec932eb3d87de2e010b30a737 ]
+
+Need to properly handle the max link rate in the dpcd.
+This prevents some cases where 5.4 Ghz is selected when
+it shouldn't be.
+
+v2: simplify logic, add array bounds check
+
+Reviewed-by: Tom St Denis <tom.stdenis@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/atombios_dp.c | 96 +++++++++++--------------------
+ 1 file changed, 36 insertions(+), 60 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjus
+
+ /* convert bits per color to bits per pixel */
+ /* get bpc from the EDID */
+-static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
++static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
+ {
+ if (bpc == 0)
+ return 24;
+@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bp
+ return bpc * 3;
+ }
+
+-/* get the max pix clock supported by the link rate and lane num */
+-static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
+- int lane_num,
+- int bpp)
+-{
+- return (link_rate * lane_num * 8) / bpp;
+-}
+-
+ /***** amdgpu specific DP functions *****/
+
+-/* First get the min lane# when low rate is used according to pixel clock
+- * (prefer low rate), second check max lane# supported by DP panel,
+- * if the max lane# < low rate lane# then use max lane# instead.
+- */
+-static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
++static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 dpcd[DP_DPCD_SIZE],
+- int pix_clock)
++ unsigned pix_clock,
++ unsigned *dp_lanes, unsigned *dp_rate)
+ {
+- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
+- int max_link_rate = drm_dp_max_link_rate(dpcd);
+- int max_lane_num = drm_dp_max_lane_count(dpcd);
+- int lane_num;
+- int max_dp_pix_clock;
+-
+- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
+- max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
+- if (pix_clock <= max_dp_pix_clock)
+- break;
+- }
+-
+- return lane_num;
+-}
+-
+-static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
+- const u8 dpcd[DP_DPCD_SIZE],
+- int pix_clock)
+-{
+- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
+- int lane_num, max_pix_clock;
+-
+- if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+- ENCODER_OBJECT_ID_NUTMEG)
+- return 270000;
+-
+- lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
+- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
+- if (pix_clock <= max_pix_clock)
+- return 162000;
+- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
+- if (pix_clock <= max_pix_clock)
+- return 270000;
+- if (amdgpu_connector_is_dp12_capable(connector)) {
+- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
+- if (pix_clock <= max_pix_clock)
+- return 540000;
++ unsigned bpp =
++ amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
++ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
++ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
++ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
++ unsigned lane_num, i, max_pix_clock;
++
++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
++ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
++ if (max_pix_clock >= pix_clock) {
++ *dp_lanes = lane_num;
++ *dp_rate = link_rates[i];
++ return 0;
++ }
++ }
+ }
+
+- return drm_dp_max_link_rate(dpcd);
++ return -EINVAL;
+ }
+
+ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
+@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(
+ {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ struct amdgpu_connector_atom_dig *dig_connector;
++ int ret;
+
+ if (!amdgpu_connector->con_priv)
+ return;
+@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(
+
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+- dig_connector->dp_clock =
+- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+- dig_connector->dp_lane_count =
+- amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
++ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
++ mode->clock,
++ &dig_connector->dp_lane_count,
++ &dig_connector->dp_clock);
++ if (ret) {
++ dig_connector->dp_clock = 0;
++ dig_connector->dp_lane_count = 0;
++ }
+ }
+ }
+
+@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper
+ {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ struct amdgpu_connector_atom_dig *dig_connector;
+- int dp_clock;
++ unsigned dp_lanes, dp_clock;
++ int ret;
+
+ if (!amdgpu_connector->con_priv)
+ return MODE_CLOCK_HIGH;
+ dig_connector = amdgpu_connector->con_priv;
+
+- dp_clock =
+- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
++ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
++ mode->clock, &dp_lanes, &dp_clock);
++ if (ret)
++ return MODE_CLOCK_HIGH;
+
+ if ((dp_clock == 540000) &&
+ (!amdgpu_connector_is_dp12_capable(connector)))
--- /dev/null
+From 4c1aab8bf63b9388398f0115a60c40bbef01181c Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 17 Dec 2015 10:23:34 -0500
+Subject: [PATCH 082/135] drm/radeon: fix dp link rate selection (v2)
+
+[ Upstream commit 092c96a8ab9d1bd60ada2ed385cc364ce084180e ]
+
+Need to properly handle the max link rate in the dpcd.
+This prevents some cases where 5.4 Ghz is selected when
+it shouldn't be.
+
+v2: simplify logic, add array bounds check
+
+Reviewed-by: Tom St Denis <tom.stdenis@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/atombios_dp.c | 108 +++++++++++----------------------
+ drivers/gpu/drm/radeon/radeon_dp_mst.c | 12 ++-
+ drivers/gpu/drm/radeon/radeon_mode.h | 6 +
+ 3 files changed, 49 insertions(+), 77 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -302,77 +302,31 @@ static int convert_bpc_to_bpp(int bpc)
+ return bpc * 3;
+ }
+
+-/* get the max pix clock supported by the link rate and lane num */
+-static int dp_get_max_dp_pix_clock(int link_rate,
+- int lane_num,
+- int bpp)
+-{
+- return (link_rate * lane_num * 8) / bpp;
+-}
+-
+ /***** radeon specific DP functions *****/
+
+-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+- const u8 dpcd[DP_DPCD_SIZE])
+-{
+- int max_link_rate;
+-
+- if (radeon_connector_is_dp12_capable(connector))
+- max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
+- else
+- max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
+-
+- return max_link_rate;
+-}
+-
+-/* First get the min lane# when low rate is used according to pixel clock
+- * (prefer low rate), second check max lane# supported by DP panel,
+- * if the max lane# < low rate lane# then use max lane# instead.
+- */
+-static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+- const u8 dpcd[DP_DPCD_SIZE],
+- int pix_clock)
++int radeon_dp_get_dp_link_config(struct drm_connector *connector,
++ const u8 dpcd[DP_DPCD_SIZE],
++ unsigned pix_clock,
++ unsigned *dp_lanes, unsigned *dp_rate)
+ {
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+- int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
+- int max_lane_num = drm_dp_max_lane_count(dpcd);
+- int lane_num;
+- int max_dp_pix_clock;
+-
+- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
+- max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
+- if (pix_clock <= max_dp_pix_clock)
+- break;
+- }
+-
+- return lane_num;
+-}
+-
+-static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+- const u8 dpcd[DP_DPCD_SIZE],
+- int pix_clock)
+-{
+- int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+- int lane_num, max_pix_clock;
+-
+- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+- ENCODER_OBJECT_ID_NUTMEG)
+- return 270000;
+-
+- lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
+- max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
+- if (pix_clock <= max_pix_clock)
+- return 162000;
+- max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
+- if (pix_clock <= max_pix_clock)
+- return 270000;
+- if (radeon_connector_is_dp12_capable(connector)) {
+- max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
+- if (pix_clock <= max_pix_clock)
+- return 540000;
++ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
++ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
++ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
++ unsigned lane_num, i, max_pix_clock;
++
++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
++ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
++ if (max_pix_clock >= pix_clock) {
++ *dp_lanes = lane_num;
++ *dp_rate = link_rates[i];
++ return 0;
++ }
++ }
+ }
+
+- return radeon_dp_get_max_link_rate(connector, dpcd);
++ return -EINVAL;
+ }
+
+ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+@@ -491,6 +445,7 @@ void radeon_dp_set_link_config(struct dr
+ {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
++ int ret;
+
+ if (!radeon_connector->con_priv)
+ return;
+@@ -498,10 +453,14 @@ void radeon_dp_set_link_config(struct dr
+
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+- dig_connector->dp_clock =
+- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+- dig_connector->dp_lane_count =
+- radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
++ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
++ mode->clock,
++ &dig_connector->dp_lane_count,
++ &dig_connector->dp_clock);
++ if (ret) {
++ dig_connector->dp_clock = 0;
++ dig_connector->dp_lane_count = 0;
++ }
+ }
+ }
+
+@@ -510,7 +469,8 @@ int radeon_dp_mode_valid_helper(struct d
+ {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+- int dp_clock;
++ unsigned dp_clock, dp_lanes;
++ int ret;
+
+ if ((mode->clock > 340000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+@@ -520,8 +480,12 @@ int radeon_dp_mode_valid_helper(struct d
+ return MODE_CLOCK_HIGH;
+ dig_connector = radeon_connector->con_priv;
+
+- dp_clock =
+- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
++ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
++ mode->clock,
++ &dp_lanes,
++ &dp_clock);
++ if (ret)
++ return MODE_CLOCK_HIGH;
+
+ if ((dp_clock == 540000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -525,11 +525,17 @@ static bool radeon_mst_mode_fixup(struct
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ {
+ struct radeon_connector_atom_dig *dig_connector;
++ int ret;
+
+ dig_connector = mst_enc->connector->con_priv;
+- dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+- dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
+- dig_connector->dpcd);
++ ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
++ dig_connector->dpcd, adjusted_mode->clock,
++ &dig_connector->dp_lane_count,
++ &dig_connector->dp_clock);
++ if (ret) {
++ dig_connector->dp_lane_count = 0;
++ dig_connector->dp_clock = 0;
++ }
+ DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ dig_connector->dp_lane_count, dig_connector->dp_clock);
+ }
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -757,8 +757,10 @@ extern u8 radeon_dp_getsinktype(struct r
+ extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+ extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+- const u8 *dpcd);
++extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
++ const u8 *dpcd,
++ unsigned pix_clock,
++ unsigned *dp_lanes, unsigned *dp_rate);
+ extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
+ u8 power_state);
+ extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
--- /dev/null
+From 482047d87b4642d3949c532bb7df4926119edc25 Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham@cavium.com>
+Date: Wed, 24 Feb 2016 16:40:50 +0530
+Subject: [PATCH 083/135] net: thunderx: Fix for Qset error due to CQ full
+
+[ Upstream commit 4c0b6eaf373a5323f03a3a20c42fc435715b073d ]
+
+On Thunderx pass 1.x and pass2 due to a HW errata default CQ
+DROP_LEVEL of 0x80 is not sufficient to avoid CQ_WR_FULL Qset
+error when packets are being received at >20Mpps resulting in
+complete stall of packet reception.
+
+This patch will configure it to 0x100 which is what is expected
+by HW on Thunderx. On future passes of thunderx and other chips
+HW default/reset value will be 0x100 or higher hence not overwritten.
+
+Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/thunder/nic.h | 9 +++++++++
+ drivers/net/ethernet/cavium/thunder/nic_main.c | 6 ++++++
+ drivers/net/ethernet/cavium/thunder/nic_reg.h | 2 +-
+ 3 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cavium/thunder/nic.h
++++ b/drivers/net/ethernet/cavium/thunder/nic.h
+@@ -116,6 +116,15 @@
+ #define NIC_PF_INTR_ID_MBOX0 8
+ #define NIC_PF_INTR_ID_MBOX1 9
+
++/* Minimum FIFO level before all packets for the CQ are dropped
++ *
++ * This value ensures that once a packet has been "accepted"
++ * for reception it will not get dropped due to non-availability
++ * of CQ descriptor. An errata in HW mandates this value to be
++ * atleast 0x100.
++ */
++#define NICPF_CQM_MIN_DROP_LEVEL 0x100
++
+ /* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
+@@ -309,6 +309,7 @@ static void nic_set_lmac_vf_mapping(stru
+ static void nic_init_hw(struct nicpf *nic)
+ {
+ int i;
++ u64 cqm_cfg;
+
+ /* Enable NIC HW block */
+ nic_reg_write(nic, NIC_PF_CFG, 0x3);
+@@ -345,6 +346,11 @@ static void nic_init_hw(struct nicpf *ni
+ /* Enable VLAN ethertype matching and stripping */
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
+ (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
++
++ /* Check if HW expected value is higher (could be in future chips) */
++ cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
++ if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
++ nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
+ }
+
+ /* Channel parse index configuration */
+--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
++++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
+@@ -21,7 +21,7 @@
+ #define NIC_PF_TCP_TIMER (0x0060)
+ #define NIC_PF_BP_CFG (0x0080)
+ #define NIC_PF_RRM_CFG (0x0088)
+-#define NIC_PF_CQM_CF (0x00A0)
++#define NIC_PF_CQM_CFG (0x00A0)
+ #define NIC_PF_CNM_CF (0x00A8)
+ #define NIC_PF_CNM_STATUS (0x00B0)
+ #define NIC_PF_CQ_AVG_CFG (0x00C0)
--- /dev/null
+From d72f3eed75067a35153ea20b13a471cee5e67c00 Mon Sep 17 00:00:00 2001
+From: Tirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
+Date: Tue, 16 Feb 2016 12:08:49 -0800
+Subject: [PATCH 084/135] ahci: Workaround for ThunderX Errata#22536
+
+[ Upstream commit d243bed32f5042582896237f88fa1798aee55ff9 ]
+
+Due to Errata in ThunderX, HOST_IRQ_STAT should be
+cleared before leaving the interrupt handler.
+The patch attempts to satisfy the need.
+
+Changes from V2:
+ - removed newfile
+ - code is now under CONFIG_ARM64
+
+Changes from V1:
+ - Rebased on top of libata/for-4.6
+ - Moved ThunderX intr handler to new file
+
+tj: Minor adjustments to comments.
+
+Signed-off-by: Tirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/ahci.c | 43 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1331,6 +1331,44 @@ static inline void ahci_gtf_filter_worka
+ {}
+ #endif
+
++#ifdef CONFIG_ARM64
++/*
++ * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
++ * Workaround is to make sure all pending IRQs are served before leaving
++ * handler.
++ */
++static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
++{
++ struct ata_host *host = dev_instance;
++ struct ahci_host_priv *hpriv;
++ unsigned int rc = 0;
++ void __iomem *mmio;
++ u32 irq_stat, irq_masked;
++ unsigned int handled = 1;
++
++ VPRINTK("ENTER\n");
++ hpriv = host->private_data;
++ mmio = hpriv->mmio;
++ irq_stat = readl(mmio + HOST_IRQ_STAT);
++ if (!irq_stat)
++ return IRQ_NONE;
++
++ do {
++ irq_masked = irq_stat & hpriv->port_map;
++ spin_lock(&host->lock);
++ rc = ahci_handle_port_intr(host, irq_masked);
++ if (!rc)
++ handled = 0;
++ writel(irq_stat, mmio + HOST_IRQ_STAT);
++ irq_stat = readl(mmio + HOST_IRQ_STAT);
++ spin_unlock(&host->lock);
++ } while (irq_stat);
++ VPRINTK("EXIT\n");
++
++ return IRQ_RETVAL(handled);
++}
++#endif
++
+ /*
+ * ahci_init_msix() only implements single MSI-X support, not multiple
+ * MSI-X per-port interrupts. This is needed for host controllers that only
+@@ -1546,6 +1584,11 @@ static int ahci_init_one(struct pci_dev
+ if (ahci_broken_devslp(pdev))
+ hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+
++#ifdef CONFIG_ARM64
++ if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
++ hpriv->irq_handler = ahci_thunderx_irq_handler;
++#endif
++
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+
--- /dev/null
+From 94dcb20e9ee73a8752525d6acaf38e0dbba97d55 Mon Sep 17 00:00:00 2001
+From: Andrew Pinski <apinski@cavium.com>
+Date: Wed, 24 Feb 2016 17:44:57 -0800
+Subject: [PATCH 085/135] arm64: Add workaround for Cavium erratum 27456
+
+[ Upstream commit 104a0c02e8b1936c049e18a6d4e4ab040fb61213 ]
+
+On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
+instructions may cause the icache to become corrupted if it contains
+data for a non-current ASID.
+
+This patch implements the workaround (which invalidates the local
+icache when switching the mm) by using code patching.
+
+Signed-off-by: Andrew Pinski <apinski@cavium.com>
+Signed-off-by: David Daney <david.daney@cavium.com>
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig | 11 +++++++++++
+ arch/arm64/include/asm/cpufeature.h | 3 ++-
+ arch/arm64/kernel/cpu_errata.c | 9 +++++++++
+ arch/arm64/mm/proc.S | 12 ++++++++++++
+ 4 files changed, 34 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -401,6 +401,17 @@ config CAVIUM_ERRATUM_23154
+
+ If unsure, say Y.
+
++config CAVIUM_ERRATUM_27456
++ bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption"
++ default y
++ help
++ On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
++ instructions may cause the icache to become corrupted if it
++ contains data for a non-current ASID. The fix is to
++ invalidate the icache when changing the mm context.
++
++ If unsure, say Y.
++
+ endmenu
+
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -30,8 +30,9 @@
+ #define ARM64_HAS_LSE_ATOMICS 5
+ #define ARM64_WORKAROUND_CAVIUM_23154 6
+ #define ARM64_WORKAROUND_834220 7
++#define ARM64_WORKAROUND_CAVIUM_27456 8
+
+-#define ARM64_NCAPS 8
++#define ARM64_NCAPS 9
+
+ #ifndef __ASSEMBLY__
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -100,6 +100,15 @@ const struct arm64_cpu_capabilities arm6
+ MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
+ },
+ #endif
++#ifdef CONFIG_CAVIUM_ERRATUM_27456
++ {
++ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
++ .desc = "Cavium erratum 27456",
++ .capability = ARM64_WORKAROUND_CAVIUM_27456,
++ MIDR_RANGE(MIDR_THUNDERX, 0x00,
++ (1 << MIDR_VARIANT_SHIFT) | 1),
++ },
++#endif
+ {
+ }
+ };
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -25,6 +25,8 @@
+ #include <asm/hwcap.h>
+ #include <asm/pgtable-hwdef.h>
+ #include <asm/pgtable.h>
++#include <asm/cpufeature.h>
++#include <asm/alternative.h>
+
+ #include "proc-macros.S"
+
+@@ -137,7 +139,17 @@ ENTRY(cpu_do_switch_mm)
+ bfi x0, x1, #48, #16 // set the ASID
+ msr ttbr0_el1, x0 // set TTBR0
+ isb
++alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
+ ret
++ nop
++ nop
++ nop
++alternative_else
++ ic iallu
++ dsb nsh
++ isb
++ ret
++alternative_endif
+ ENDPROC(cpu_do_switch_mm)
+
+ .section ".text.init", #alloc, #execinstr
--- /dev/null
+From 581919f8939f278d7553e3ec38470f13a2b34235 Mon Sep 17 00:00:00 2001
+From: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Date: Thu, 3 Mar 2016 17:54:54 +0100
+Subject: [PATCH 086/135] tipc: fix nullptr crash during subscription cancel
+
+[ Upstream commit 4de13d7ed6ffdcbb34317acaa9236f121176f5f8 ]
+
+commit 4d5cfcba2f6e ('tipc: fix connection abort during subscription
+cancel'), removes the check for a valid subscription before calling
+tipc_nametbl_subscribe().
+
+This will lead to a nullptr exception when we process a
+subscription cancel request. For a cancel request, a null
+subscription is passed to tipc_nametbl_subscribe() resulting
+in exception.
+
+In this commit, we call tipc_nametbl_subscribe() only for
+a valid subscription.
+
+Fixes: 4d5cfcba2f6e ('tipc: fix connection abort during subscription cancel')
+Reported-by: Anders Widell <anders.widell@ericsson.com>
+Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/subscr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -296,7 +296,8 @@ static void tipc_subscrb_rcv_cb(struct n
+ if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
+ return tipc_conn_terminate(tn->topsrv, subscrb->conid);
+
+- tipc_nametbl_subscribe(sub);
++ if (sub)
++ tipc_nametbl_subscribe(sub);
+ }
+
+ /* Handle one request to establish a new subscriber */
--- /dev/null
+From 92ecfb507752c4a454239057abda7a0d2737a265 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 10 Feb 2016 19:59:05 +0200
+Subject: [PATCH 087/135] drm/i915: Fix hpd live status bits for g4x
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 0780cd36c7af70c55981ee624084f0f48cae9b95 ]
+
+Looks like g4x hpd live status bits actually agree with the spec. At
+least they do on the machine I have, and apparently on Nick Bowler's
+g4x as well.
+
+So gm45 may be the only platform where they don't agree. At least
+that seems to be the case based on the (somewhat incomplete)
+logs/dumps in [1], and Daniel has also tested this on his gm45
+sometime in the past.
+
+So let's change the bits to match the spec on g4x. That actually makes
+the g4x bits identical to vlv/chv so we can just share the code
+between those platforms, leaving gm45 as the special case.
+
+[1] https://bugzilla.kernel.org/show_bug.cgi?id=52361
+
+Cc: Shashank Sharma <shashank.sharma@intel.com>
+Cc: Sonika Jindal <sonika.jindal@intel.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: Nick Bowler <nbowler@draconx.ca>
+References: https://lists.freedesktop.org/archives/dri-devel/2016-February/100382.html
+Reported-by: Nick Bowler <nbowler@draconx.ca>
+Cc: stable@vger.kernel.org
+Fixes: 237ed86c693d ("drm/i915: Check live status before reading edid")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1455127145-20087-1-git-send-email-ville.syrjala@linux.intel.com
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_reg.h | 15 ++++++++-------
+ drivers/gpu/drm/i915/intel_dp.c | 14 +++++++-------
+ 2 files changed, 15 insertions(+), 14 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3240,19 +3240,20 @@ enum skl_disp_power_wells {
+
+ #define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
+ /*
+- * HDMI/DP bits are gen4+
++ * HDMI/DP bits are g4x+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
++/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
++#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
++#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
++#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
++/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
++#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+ #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+-/* VLV DP/HDMI bits again match Bspec */
+-#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
+-#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
+-#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
++#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+ #define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+ #define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+ #define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4592,20 +4592,20 @@ static bool g4x_digital_port_connected(s
+ return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ }
+
+-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
+- struct intel_digital_port *port)
++static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
++ struct intel_digital_port *port)
+ {
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_B:
+- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
++ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ case PORT_C:
+- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
++ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ case PORT_D:
+- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
++ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ default:
+ MISSING_CASE(port->port);
+@@ -4657,8 +4657,8 @@ bool intel_digital_port_connected(struct
+ return cpt_digital_port_connected(dev_priv, port);
+ else if (IS_BROXTON(dev_priv))
+ return bxt_digital_port_connected(dev_priv, port);
+- else if (IS_VALLEYVIEW(dev_priv))
+- return vlv_digital_port_connected(dev_priv, port);
++ else if (IS_GM45(dev_priv))
++ return gm45_digital_port_connected(dev_priv, port);
+ else
+ return g4x_digital_port_connected(dev_priv, port);
+ }
--- /dev/null
+From 41c634f80bb2cc44a409381122f0ae3966a71929 Mon Sep 17 00:00:00 2001
+From: Libin Yang <libin.yang@linux.intel.com>
+Date: Thu, 14 Jan 2016 14:09:00 +0800
+Subject: [PATCH 088/135] ALSA: hda - add codec support for Kabylake display
+ audio codec
+
+[ Upstream commit 91815d8aa7e2f45d30e51caa297061ad893628d9 ]
+
+This patch adds codec ID (0x8086280b) for Kabylake display codec
+and apply the hsw fix-ups to Kabylake.
+
+Signed-off-by: Libin Yang <libin.yang@linux.intel.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_hdmi.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -51,8 +51,10 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't
+ #define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
+ #define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
+ #define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
++#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
+ #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
+- || is_skylake(codec) || is_broxton(codec))
++ || is_skylake(codec) || is_broxton(codec) \
++ || is_kabylake(codec))
+
+ #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
+ #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
+@@ -3584,6 +3586,7 @@ HDA_CODEC_ENTRY(0x80862807, "Haswell HDM
+ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_generic_hdmi),
++HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_generic_hdmi),
--- /dev/null
+From e4f58be65b185dcfc9ba8961bad0bd7d50e0bd3e Mon Sep 17 00:00:00 2001
+From: Gavin Guo <gavin.guo@canonical.com>
+Date: Wed, 20 Jan 2016 12:36:58 +0800
+Subject: [PATCH 089/135] sched/numa: Fix use-after-free bug in the
+ task_numa_compare
+
+[ Upstream commit 1dff76b92f69051e579bdc131e01500da9fa2a91 ]
+
+The following message can be observed on the Ubuntu v3.13.0-65 with KASan
+backported:
+
+ ==================================================================
+ BUG: KASan: use after free in task_numa_find_cpu+0x64c/0x890 at addr ffff880dd393ecd8
+ Read of size 8 by task qemu-system-x86/3998900
+ =============================================================================
+ BUG kmalloc-128 (Tainted: G B ): kasan: bad access detected
+ -----------------------------------------------------------------------------
+
+ INFO: Allocated in task_numa_fault+0xc1b/0xed0 age=41980 cpu=18 pid=3998890
+ __slab_alloc+0x4f8/0x560
+ __kmalloc+0x1eb/0x280
+ task_numa_fault+0xc1b/0xed0
+ do_numa_page+0x192/0x200
+ handle_mm_fault+0x808/0x1160
+ __do_page_fault+0x218/0x750
+ do_page_fault+0x1a/0x70
+ page_fault+0x28/0x30
+ SyS_poll+0x66/0x1a0
+ system_call_fastpath+0x1a/0x1f
+ INFO: Freed in task_numa_free+0x1d2/0x200 age=62 cpu=18 pid=0
+ __slab_free+0x2ab/0x3f0
+ kfree+0x161/0x170
+ task_numa_free+0x1d2/0x200
+ finish_task_switch+0x1d2/0x210
+ __schedule+0x5d4/0xc60
+ schedule_preempt_disabled+0x40/0xc0
+ cpu_startup_entry+0x2da/0x340
+ start_secondary+0x28f/0x360
+ Call Trace:
+ [<ffffffff81a6ce35>] dump_stack+0x45/0x56
+ [<ffffffff81244aed>] print_trailer+0xfd/0x170
+ [<ffffffff8124ac36>] object_err+0x36/0x40
+ [<ffffffff8124cbf9>] kasan_report_error+0x1e9/0x3a0
+ [<ffffffff8124d260>] kasan_report+0x40/0x50
+ [<ffffffff810dda7c>] ? task_numa_find_cpu+0x64c/0x890
+ [<ffffffff8124bee9>] __asan_load8+0x69/0xa0
+ [<ffffffff814f5c38>] ? find_next_bit+0xd8/0x120
+ [<ffffffff810dda7c>] task_numa_find_cpu+0x64c/0x890
+ [<ffffffff810de16c>] task_numa_migrate+0x4ac/0x7b0
+ [<ffffffff810de523>] numa_migrate_preferred+0xb3/0xc0
+ [<ffffffff810e0b88>] task_numa_fault+0xb88/0xed0
+ [<ffffffff8120ef02>] do_numa_page+0x192/0x200
+ [<ffffffff81211038>] handle_mm_fault+0x808/0x1160
+ [<ffffffff810d7dbd>] ? sched_clock_cpu+0x10d/0x160
+ [<ffffffff81068c52>] ? native_load_tls+0x82/0xa0
+ [<ffffffff81a7bd68>] __do_page_fault+0x218/0x750
+ [<ffffffff810c2186>] ? hrtimer_try_to_cancel+0x76/0x160
+ [<ffffffff81a6f5e7>] ? schedule_hrtimeout_range_clock.part.24+0xf7/0x1c0
+ [<ffffffff81a7c2ba>] do_page_fault+0x1a/0x70
+ [<ffffffff81a772e8>] page_fault+0x28/0x30
+ [<ffffffff8128cbd4>] ? do_sys_poll+0x1c4/0x6d0
+ [<ffffffff810e64f6>] ? enqueue_task_fair+0x4b6/0xaa0
+ [<ffffffff810233c9>] ? sched_clock+0x9/0x10
+ [<ffffffff810cf70a>] ? resched_task+0x7a/0xc0
+ [<ffffffff810d0663>] ? check_preempt_curr+0xb3/0x130
+ [<ffffffff8128b5c0>] ? poll_select_copy_remaining+0x170/0x170
+ [<ffffffff810d3bc0>] ? wake_up_state+0x10/0x20
+ [<ffffffff8112a28f>] ? drop_futex_key_refs.isra.14+0x1f/0x90
+ [<ffffffff8112d40e>] ? futex_requeue+0x3de/0xba0
+ [<ffffffff8112e49e>] ? do_futex+0xbe/0x8f0
+ [<ffffffff81022c89>] ? read_tsc+0x9/0x20
+ [<ffffffff8111bd9d>] ? ktime_get_ts+0x12d/0x170
+ [<ffffffff8108f699>] ? timespec_add_safe+0x59/0xe0
+ [<ffffffff8128d1f6>] SyS_poll+0x66/0x1a0
+ [<ffffffff81a830dd>] system_call_fastpath+0x1a/0x1f
+
+As commit 1effd9f19324 ("sched/numa: Fix unsafe get_task_struct() in
+task_numa_assign()") points out, the rcu_read_lock() cannot protect the
+task_struct from being freed in the finish_task_switch(). And the bug
+happens in the process of calculation of imp which requires the access of
+p->numa_faults being freed in the following path:
+
+do_exit()
+ current->flags |= PF_EXITING;
+ release_task()
+ ~~delayed_put_task_struct()~~
+ schedule()
+ ...
+ ...
+rq->curr = next;
+ context_switch()
+ finish_task_switch()
+ put_task_struct()
+ __put_task_struct()
+ task_numa_free()
+
+The fix here to get_task_struct() early before end of dst_rq->lock to
+protect the calculation process and also put_task_struct() in the
+corresponding point if finally the dst_rq->curr somehow cannot be
+assigned.
+
+Additional credit to Liang Chen who helped fix the error logic and add the
+put_task_struct() to the place it missed.
+
+Signed-off-by: Gavin Guo <gavin.guo@canonical.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: jay.vosburgh@canonical.com
+Cc: liang.chen@canonical.com
+Link: http://lkml.kernel.org/r/1453264618-17645-1-git-send-email-gavin.guo@canonical.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c | 30 +++++++++++++++++++++++-------
+ 1 file changed, 23 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1191,8 +1191,6 @@ static void task_numa_assign(struct task
+ {
+ if (env->best_task)
+ put_task_struct(env->best_task);
+- if (p)
+- get_task_struct(p);
+
+ env->best_task = p;
+ env->best_imp = imp;
+@@ -1260,20 +1258,30 @@ static void task_numa_compare(struct tas
+ long imp = env->p->numa_group ? groupimp : taskimp;
+ long moveimp = imp;
+ int dist = env->dist;
++ bool assigned = false;
+
+ rcu_read_lock();
+
+ raw_spin_lock_irq(&dst_rq->lock);
+ cur = dst_rq->curr;
+ /*
+- * No need to move the exiting task, and this ensures that ->curr
+- * wasn't reaped and thus get_task_struct() in task_numa_assign()
+- * is safe under RCU read lock.
+- * Note that rcu_read_lock() itself can't protect from the final
+- * put_task_struct() after the last schedule().
++ * No need to move the exiting task or idle task.
+ */
+ if ((cur->flags & PF_EXITING) || is_idle_task(cur))
+ cur = NULL;
++ else {
++ /*
++ * The task_struct must be protected here to protect the
++ * p->numa_faults access in the task_weight since the
++ * numa_faults could already be freed in the following path:
++ * finish_task_switch()
++ * --> put_task_struct()
++ * --> __put_task_struct()
++ * --> task_numa_free()
++ */
++ get_task_struct(cur);
++ }
++
+ raw_spin_unlock_irq(&dst_rq->lock);
+
+ /*
+@@ -1357,6 +1365,7 @@ balance:
+ */
+ if (!load_too_imbalanced(src_load, dst_load, env)) {
+ imp = moveimp - 1;
++ put_task_struct(cur);
+ cur = NULL;
+ goto assign;
+ }
+@@ -1382,9 +1391,16 @@ balance:
+ env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+
+ assign:
++ assigned = true;
+ task_numa_assign(env, cur, imp);
+ unlock:
+ rcu_read_unlock();
++ /*
++ * The dst_rq->curr isn't assigned. The protection for task_struct is
++ * finished.
++ */
++ if (cur && !assigned)
++ put_task_struct(cur);
+ }
+
+ static void task_numa_find_cpu(struct task_numa_env *env,
--- /dev/null
+From 7b456bd7a3592a4651022e91ba71447e8e91ce35 Mon Sep 17 00:00:00 2001
+From: Aviv Greenberg <avivgr@gmail.com>
+Date: Fri, 16 Oct 2015 08:48:51 -0300
+Subject: [PATCH 090/135] UVC: Add support for R200 depth camera
+
+[ Upstream commit 5d8d8db851ef81337e7026b32a9d5a9cfb2271d5 ]
+
+Add support for Intel R200 depth camera in uvc driver.
+This includes adding new uvc GUIDs for the new pixel formats,
+adding new V4L pixel format definition to user api headers,
+and updating the uvc driver GUID-to-4cc tables with the new formats.
+
+Tested-by: Greenberg, Aviv D <aviv.d.greenberg@intel.com>
+Signed-off-by: Aviv Greenberg <aviv.d.greenberg@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 20 ++++++++++++++++++++
+ drivers/media/usb/uvc/uvcvideo.h | 12 ++++++++++++
+ include/uapi/linux/videodev2.h | 3 +++
+ 3 files changed, 35 insertions(+)
+
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -148,6 +148,26 @@ static struct uvc_format_desc uvc_fmts[]
+ .guid = UVC_GUID_FORMAT_H264,
+ .fcc = V4L2_PIX_FMT_H264,
+ },
++ {
++ .name = "Greyscale 8 L/R (Y8I)",
++ .guid = UVC_GUID_FORMAT_Y8I,
++ .fcc = V4L2_PIX_FMT_Y8I,
++ },
++ {
++ .name = "Greyscale 12 L/R (Y12I)",
++ .guid = UVC_GUID_FORMAT_Y12I,
++ .fcc = V4L2_PIX_FMT_Y12I,
++ },
++ {
++ .name = "Depth data 16-bit (Z16)",
++ .guid = UVC_GUID_FORMAT_Z16,
++ .fcc = V4L2_PIX_FMT_Z16,
++ },
++ {
++ .name = "Bayer 10-bit (SRGGB10P)",
++ .guid = UVC_GUID_FORMAT_RW10,
++ .fcc = V4L2_PIX_FMT_SRGGB10P,
++ },
+ };
+
+ /* ------------------------------------------------------------------------
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -119,6 +119,18 @@
+ #define UVC_GUID_FORMAT_H264 \
+ { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
++#define UVC_GUID_FORMAT_Y8I \
++ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
++ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
++#define UVC_GUID_FORMAT_Y12I \
++ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
++ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
++#define UVC_GUID_FORMAT_Z16 \
++ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
++ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
++#define UVC_GUID_FORMAT_RW10 \
++ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
++ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+ /* ------------------------------------------------------------------------
+ * Driver specific constants.
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -621,6 +621,9 @@ struct v4l2_pix_format {
+ #define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */
+ #define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */
+ #define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
++#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
++#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
++#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
+
+ /* SDR formats - used only for Software Defined Radio devices */
+ #define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
--- /dev/null
+From d2ca38968a7d17f9687d1be0d03b13dc7be4b0cc Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Thu, 26 Nov 2015 14:00:46 +0200
+Subject: [PATCH 091/135] mmc: sdhci: Do not BUG on invalid vdd
+
+[ Upstream commit 9d5de93f6d543b356e39e225988ef443a7bce34c ]
+
+The driver may not be able to set the power correctly but that
+is not a reason to BUG().
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Reviewed-by: Venu Byravarasu <vbyravarasu@nvidia.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1315,7 +1315,9 @@ static void sdhci_set_power(struct sdhci
+ pwr = SDHCI_POWER_330;
+ break;
+ default:
+- BUG();
++ WARN(1, "%s: Invalid vdd %#x\n",
++ mmc_hostname(host->mmc), vdd);
++ break;
+ }
+ }
+
--- /dev/null
+From c684d74c4d307ebb9f54e8ec9308c44dd6cd4525 Mon Sep 17 00:00:00 2001
+From: Gal Pressman <galp@mellanox.com>
+Date: Wed, 2 Mar 2016 00:13:37 +0200
+Subject: [PATCH 092/135] net/mlx5e: Don't try to modify CQ moderation if it is
+ not supported
+
+[ Upstream commit 7524a5d88b94afef8397a79f1e664af5b7052c22 ]
+
+If CQ moderation is not supported by the device, print a warning on
+netdevice load, and return error when trying to modify/query cq
+moderation via ethtool.
+
+Fixes: f62b8bb8f2d3 ('net/mlx5: Extend mlx5_core to support ConnectX-4
+Ethernet functionality')
+Signed-off-by: Gal Pressman <galp@mellanox.com>
+
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 6 ++++++
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 ++++++------
+ 2 files changed, 12 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -399,6 +399,9 @@ static int mlx5e_get_coalesce(struct net
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
++ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
++ return -ENOTSUPP;
++
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
+@@ -416,6 +419,9 @@ static int mlx5e_set_coalesce(struct net
+ int tc;
+ int i;
+
++ if (!MLX5_CAP_GEN(mdev, cq_moderation))
++ return -ENOTSUPP;
++
+ priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -863,12 +863,10 @@ static int mlx5e_open_cq(struct mlx5e_ch
+ if (err)
+ goto err_destroy_cq;
+
+- err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+- moderation_usecs,
+- moderation_frames);
+- if (err)
+- goto err_destroy_cq;
+-
++ if (MLX5_CAP_GEN(mdev, cq_moderation))
++ mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
++ moderation_usecs,
++ moderation_frames);
+ return 0;
+
+ err_destroy_cq:
+@@ -1963,6 +1961,8 @@ static int mlx5e_check_required_hca_cap(
+ }
+ if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
+ mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
++ if (!MLX5_CAP_GEN(mdev, cq_moderation))
++ mlx5_core_warn(mdev, "CQ modiration is not supported\n");
+
+ return 0;
+ }
--- /dev/null
+From f27c400e1e9704f855009d6f23cf4827bdb09055 Mon Sep 17 00:00:00 2001
+From: Gal Pressman <galp@mellanox.com>
+Date: Wed, 2 Mar 2016 00:13:38 +0200
+Subject: [PATCH 093/135] net/mlx5e: Don't modify CQ before it was created
+
+[ Upstream commit 2fcb92fbd04eef26dfe7e67839da6262d83d6b65 ]
+
+Calling mlx5e_set_coalesce while the interface is down will result in
+modifying CQs that don't exist.
+
+Fixes: f62b8bb8f2d3 ('net/mlx5: Extend mlx5_core to support ConnectX-4
+Ethernet functionality')
+Signed-off-by: Gal Pressman <galp@mellanox.com>
+
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -422,11 +422,15 @@ static int mlx5e_set_coalesce(struct net
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -ENOTSUPP;
+
++ mutex_lock(&priv->state_lock);
+ priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
++ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
++ goto out;
++
+ for (i = 0; i < priv->params.num_channels; ++i) {
+ c = priv->channel[i];
+
+@@ -442,6 +446,8 @@ static int mlx5e_set_coalesce(struct net
+ coal->rx_max_coalesced_frames);
+ }
+
++out:
++ mutex_unlock(&priv->state_lock);
+ return 0;
+ }
+
--- /dev/null
+From 354b1472c4922e12f28e4f9bbd290266132efef3 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Mon, 16 Nov 2015 14:35:48 +0100
+Subject: [PATCH 094/135] s390/pci_dma: fix DMA table corruption with > 4 TB
+ main memory
+
+[ Upstream commit 69eea95c48857c9dfcac120d6acea43027627b28 ]
+
+DMA addresses returned from map_page() are calculated by using an iommu
+bitmap plus a start_dma offset. The size of this bitmap is based on the main
+memory size. If we have more than (4 TB - start_dma) main memory, the DMA
+address calculation will also produce addresses > 4 TB. Such addresses
+cannot be inserted in the 3-level DMA page table, instead the entries
+modulo 4 TB will be overwritten.
+
+Fix this by restricting the iommu bitmap size to (4 TB - start_dma).
+Also set zdev->end_dma to the actual end address of the usable
+range, instead of the theoretical maximum as reported by the hardware,
+which fixes a sanity check in dma_map() and also the IOMMU API domain
+geometry aperture calculation.
+
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Reviewed-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/pci_dma.h | 2 ++
+ arch/s390/pci/pci.c | 3 +--
+ arch/s390/pci/pci_dma.c | 19 ++++++++++++++-----
+ 3 files changed, 17 insertions(+), 7 deletions(-)
+
+--- a/arch/s390/include/asm/pci_dma.h
++++ b/arch/s390/include/asm/pci_dma.h
+@@ -23,6 +23,8 @@ enum zpci_ioat_dtype {
+ #define ZPCI_IOTA_FS_2G 2
+ #define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+
++#define ZPCI_TABLE_SIZE_RT (1UL << 42)
++
+ #define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
+ #define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
+ #define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -701,8 +701,7 @@ static int zpci_restore(struct device *d
+ goto out;
+
+ zpci_map_resources(pdev);
+- zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
+- zdev->start_dma + zdev->iommu_size - 1,
++ zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+ (u64) zdev->dma_table);
+
+ out:
+--- a/arch/s390/pci/pci_dma.c
++++ b/arch/s390/pci/pci_dma.c
+@@ -458,7 +458,19 @@ int zpci_dma_init_device(struct zpci_dev
+ goto out_clean;
+ }
+
+- zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
++ /*
++ * Restrict the iommu bitmap size to the minimum of the following:
++ * - main memory size
++ * - 3-level pagetable address limit minus start_dma offset
++ * - DMA address range allowed by the hardware (clp query pci fn)
++ *
++ * Also set zdev->end_dma to the actual end address of the usable
++ * range, instead of the theoretical maximum as reported by hardware.
++ */
++ zdev->iommu_size = min3((u64) high_memory,
++ ZPCI_TABLE_SIZE_RT - zdev->start_dma,
++ zdev->end_dma - zdev->start_dma + 1);
++ zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
+ zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
+ zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+ if (!zdev->iommu_bitmap) {
+@@ -466,10 +478,7 @@ int zpci_dma_init_device(struct zpci_dev
+ goto out_reg;
+ }
+
+- rc = zpci_register_ioat(zdev,
+- 0,
+- zdev->start_dma + PAGE_OFFSET,
+- zdev->start_dma + zdev->iommu_size - 1,
++ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+ (u64) zdev->dma_table);
+ if (rc)
+ goto out_reg;
--- /dev/null
+From 5fc6dd89f67b8f94a5a7bbe7e884813ee0d2a4c3 Mon Sep 17 00:00:00 2001
+From: Ching Huang <ching2048@areca.com.tw>
+Date: Wed, 25 Nov 2015 19:36:02 +0800
+Subject: [PATCH 095/135] arcmsr: fixed getting wrong configuration data
+
+[ Upstream commit 251e2d25bfb72b69edd414abfa42a41191d9657a ]
+
+Fixed getting wrong configuration data of adapter type B and type D.
+
+Signed-off-by: Ching Huang <ching2048@areca.com.tw>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/arcmsr/arcmsr_hba.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2694,15 +2694,15 @@ static bool arcmsr_hbaB_get_config(struc
+ acb->firm_model,
+ acb->firm_version);
+
+- acb->signature = readl(®->message_rwbuffer[1]);
++ acb->signature = readl(®->message_rwbuffer[0]);
+ /*firm_signature,1,00-03*/
+- acb->firm_request_len = readl(®->message_rwbuffer[2]);
++ acb->firm_request_len = readl(®->message_rwbuffer[1]);
+ /*firm_request_len,1,04-07*/
+- acb->firm_numbers_queue = readl(®->message_rwbuffer[3]);
++ acb->firm_numbers_queue = readl(®->message_rwbuffer[2]);
+ /*firm_numbers_queue,2,08-11*/
+- acb->firm_sdram_size = readl(®->message_rwbuffer[4]);
++ acb->firm_sdram_size = readl(®->message_rwbuffer[3]);
+ /*firm_sdram_size,3,12-15*/
+- acb->firm_hd_channels = readl(®->message_rwbuffer[5]);
++ acb->firm_hd_channels = readl(®->message_rwbuffer[4]);
+ /*firm_ide_channels,4,16-19*/
+ acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ /*firm_ide_channels,4,16-19*/
+@@ -2880,15 +2880,15 @@ static bool arcmsr_hbaD_get_config(struc
+ iop_device_map++;
+ count--;
+ }
+- acb->signature = readl(®->msgcode_rwbuffer[1]);
++ acb->signature = readl(®->msgcode_rwbuffer[0]);
+ /*firm_signature,1,00-03*/
+- acb->firm_request_len = readl(®->msgcode_rwbuffer[2]);
++ acb->firm_request_len = readl(®->msgcode_rwbuffer[1]);
+ /*firm_request_len,1,04-07*/
+- acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[3]);
++ acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]);
+ /*firm_numbers_queue,2,08-11*/
+- acb->firm_sdram_size = readl(®->msgcode_rwbuffer[4]);
++ acb->firm_sdram_size = readl(®->msgcode_rwbuffer[3]);
+ /*firm_sdram_size,3,12-15*/
+- acb->firm_hd_channels = readl(®->msgcode_rwbuffer[5]);
++ acb->firm_hd_channels = readl(®->msgcode_rwbuffer[4]);
+ /*firm_hd_channels,4,16-19*/
+ acb->firm_cfg_version = readl(®->msgcode_rwbuffer[25]);
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
--- /dev/null
+From 298b897bf44f9321243dc4614d0568433e820da8 Mon Sep 17 00:00:00 2001
+From: Ching Huang <ching2048@areca.com.tw>
+Date: Wed, 25 Nov 2015 19:41:23 +0800
+Subject: [PATCH 096/135] arcmsr: fixes not release allocated resource
+
+[ Upstream commit 98f90debc2b64a40a416dd9794ac2d8de6b43af2 ]
+
+Releasing allocated resource if get configuration data failed.
+
+Signed-off-by: Ching Huang <ching2048@areca.com.tw>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Reviewed-by: Hannes Reinicke <hare@suse.de>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/arcmsr/arcmsr_hba.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2664,7 +2664,7 @@ static bool arcmsr_hbaB_get_config(struc
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+- return false;
++ goto err_free_dma;
+ }
+ count = 8;
+ while (count){
+@@ -2707,6 +2707,10 @@ static bool arcmsr_hbaB_get_config(struc
+ acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ /*firm_ide_channels,4,16-19*/
+ return true;
++err_free_dma:
++ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
++ acb->dma_coherent2, acb->dma_coherent_handle2);
++ return false;
+ }
+
+ static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
--- /dev/null
+From 864900991b27198b2006f7e8abc50599ea923968 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Wed, 27 Jan 2016 22:29:34 -0800
+Subject: [PATCH 097/135] Drivers: hv: vmbus: avoid infinite loop in
+ init_vp_index()
+
+[ Upstream commit 79fd8e706637a5c7c41f9498fe0fbfb437abfdc8 ]
+
+When we pick a CPU to use for a new subchannel we try find a non-used one
+on the appropriate NUMA node, we keep track of them with the
+primary->alloced_cpus_in_node mask. Under normal circumstances we don't run
+out of available CPUs but it is possible when we we don't initialize some
+cpus in Linux, e.g. when we boot with 'nr_cpus=' limitation.
+
+Avoid the infinite loop in init_vp_index() by checking that we still have
+non-used CPUs in the alloced_cpus_in_node mask and resetting it in case
+we don't.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/channel_mgmt.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -459,6 +459,17 @@ static void init_vp_index(struct vmbus_c
+ cpumask_of_node(primary->numa_node));
+
+ cur_cpu = -1;
++
++ /*
++ * Normally Hyper-V host doesn't create more subchannels than there
++ * are VCPUs on the node but it is possible when not all present VCPUs
++ * on the node are initialized by guest. Clear the alloced_cpus_in_node
++ * to start over.
++ */
++ if (cpumask_equal(&primary->alloced_cpus_in_node,
++ cpumask_of_node(primary->numa_node)))
++ cpumask_clear(&primary->alloced_cpus_in_node);
++
+ while (true) {
+ cur_cpu = cpumask_next(cur_cpu, &available_mask);
+ if (cur_cpu >= nr_cpu_ids) {
--- /dev/null
+From 04023b914cfb9c54dc70f1407086536218145729 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Wed, 27 Jan 2016 22:29:35 -0800
+Subject: [PATCH 098/135] Drivers: hv: vmbus: avoid scheduling in interrupt
+ context in vmbus_initiate_unload()
+
+[ Upstream commit 415719160de3fae3bb9cbc617664649919cd00d0 ]
+
+We have to call vmbus_initiate_unload() on crash to make kdump work but
+the crash can also be happening in interrupt (e.g. Sysrq + c results in
+such) where we can't schedule or the following will happen:
+
+[ 314.905786] bad: scheduling from the idle thread!
+
+Just skipping the wait (and even adding some random wait here) won't help:
+to make host-side magic working we're supposed to receive CHANNELMSG_UNLOAD
+(and actually confirm the fact that we received it) but we can't use
+interrupt-base path (vmbus_isr()-> vmbus_on_msg_dpc()). Implement a simple
+busy wait ignoring all the other messages and use it if we're in an
+interrupt context.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/channel_mgmt.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 43 insertions(+), 1 deletion(-)
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -28,6 +28,7 @@
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/completion.h>
++#include <linux/delay.h>
+ #include <linux/hyperv.h>
+
+ #include "hyperv_vmbus.h"
+@@ -499,6 +500,40 @@ static void init_vp_index(struct vmbus_c
+ channel->target_vp = hv_context.vp_index[cur_cpu];
+ }
+
++static void vmbus_wait_for_unload(void)
++{
++ int cpu = smp_processor_id();
++ void *page_addr = hv_context.synic_message_page[cpu];
++ struct hv_message *msg = (struct hv_message *)page_addr +
++ VMBUS_MESSAGE_SINT;
++ struct vmbus_channel_message_header *hdr;
++ bool unloaded = false;
++
++ while (1) {
++ if (msg->header.message_type == HVMSG_NONE) {
++ mdelay(10);
++ continue;
++ }
++
++ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
++ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
++ unloaded = true;
++
++ msg->header.message_type = HVMSG_NONE;
++ /*
++ * header.message_type needs to be written before we do
++ * wrmsrl() below.
++ */
++ mb();
++
++ if (msg->header.message_flags.msg_pending)
++ wrmsrl(HV_X64_MSR_EOM, 0);
++
++ if (unloaded)
++ break;
++ }
++}
++
+ /*
+ * vmbus_unload_response - Handler for the unload response.
+ */
+@@ -524,7 +559,14 @@ void vmbus_initiate_unload(void)
+ hdr.msgtype = CHANNELMSG_UNLOAD;
+ vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
+
+- wait_for_completion(&vmbus_connection.unload_event);
++ /*
++ * vmbus_initiate_unload() is also called on crash and the crash can be
++ * happening in an interrupt context, where scheduling is impossible.
++ */
++ if (!in_interrupt())
++ wait_for_completion(&vmbus_connection.unload_event);
++ else
++ vmbus_wait_for_unload();
+ }
+
+ /*
--- /dev/null
+From 6039340ca3dd6603715e81cbb9823fbc28a72574 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Wed, 27 Jan 2016 22:29:36 -0800
+Subject: [PATCH 099/135] Drivers: hv: vmbus: don't manipulate with
+ clocksources on crash
+
+[ Upstream commit 3ccb4fd8f492f99aece21acc1bd6142275f26236 ]
+
+clocksource_change_rating() involves mutex usage and can't be called
+in interrupt context. It also makes sense to avoid doing redundant work
+on crash.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/hv.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -293,8 +293,14 @@ void hv_cleanup(void)
+ * Cleanup the TSC page based CS.
+ */
+ if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+- clocksource_change_rating(&hyperv_cs_tsc, 10);
+- clocksource_unregister(&hyperv_cs_tsc);
++ /*
++ * Crash can happen in an interrupt context and unregistering
++ * a clocksource is impossible and redundant in this case.
++ */
++ if (!oops_in_progress) {
++ clocksource_change_rating(&hyperv_cs_tsc, 10);
++ clocksource_unregister(&hyperv_cs_tsc);
++ }
+
+ hypercall_msr.as_uint64 = 0;
+ wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
--- /dev/null
+From 635ddd20e481eec6a5fe6b33253abb0d1f174660 Mon Sep 17 00:00:00 2001
+From: "Manoj N. Kumar" <manoj@linux.vnet.ibm.com>
+Date: Fri, 4 Mar 2016 15:55:19 -0600
+Subject: [PATCH 100/135] cxlflash: Fix to avoid unnecessary scan with internal
+ LUNs
+
+[ Upstream commit 603ecce95f4817074a724a889cd88c3c8210f933 ]
+
+When switching to the internal LUN defined on the
+IBM CXL flash adapter, there is an unnecessary
+scan occurring on the second port. This scan leads
+to the following extra lines in the log:
+
+Dec 17 10:09:00 tul83p1 kernel: [ 3708.561134] cxlflash 0008:00:00.0: cxlflash_queuecommand: (scp=c0000000fc1f0f00) 11/1/0/0 cdb=(A0000000-00000000-10000000-00000000)
+Dec 17 10:09:00 tul83p1 kernel: [ 3708.561147] process_cmd_err: cmd failed afu_rc=32 scsi_rc=0 fc_rc=0 afu_extra=0xE, scsi_extra=0x0, fc_extra=0x0
+
+By definition, both of the internal LUNs are on the first port/channel.
+
+When the lun_mode is switched to internal LUN the
+same value for host->max_channel is retained. This
+causes an unnecessary scan over the second port/channel.
+
+This fix alters the host->max_channel to 0 (1 port), if internal
+LUNs are configured and switches it back to 1 (2 ports) while
+going back to external LUNs.
+
+Signed-off-by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
+Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Reviewed-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/cxlflash/main.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -2149,6 +2149,16 @@ static ssize_t lun_mode_store(struct dev
+ rc = kstrtouint(buf, 10, &lun_mode);
+ if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
+ afu->internal_lun = lun_mode;
++
++ /*
++ * When configured for internal LUN, there is only one channel,
++ * channel number 0, else there will be 2 (default).
++ */
++ if (afu->internal_lun)
++ shost->max_channel = 0;
++ else
++ shost->max_channel = NUM_FC_PORTS - 1;
++
+ afu_reset(cfg);
+ scsi_scan_host(cfg->host);
+ }
--- /dev/null
+From bbef1b4a396a050636e87b1f26968982ab73bea1 Mon Sep 17 00:00:00 2001
+From: Dasaratharaman Chandramouli <dasaratharaman.chandramouli@intel.com>
+Date: Thu, 4 Sep 2014 17:22:54 -0700
+Subject: [PATCH 101/135] intel_idle: Support for Intel Xeon Phi Processor x200
+ Product Family
+
+[ Upstream commit 281baf7a702693deaa45c98ef0c5161006b48257 ]
+
+Enables "Intel(R) Xeon Phi(TM) Processor x200 Product Family" support,
+formerly code-named KNL. It is based on modified Intel Atom Silvermont
+microarchitecture.
+
+Signed-off-by: Dasaratharaman Chandramouli <dasaratharaman.chandramouli@intel.com>
+[micah.barany@intel.com: adjusted values of residency and latency]
+Signed-off-by: Micah Barany <micah.barany@intel.com>
+[hubert.chrzaniuk@intel.com: removed deprecated CPUIDLE_FLAG_TIME_VALID flag]
+Signed-off-by: Hubert Chrzaniuk <hubert.chrzaniuk@intel.com>
+Signed-off-by: Pawel Karczewski <pawel.karczewski@intel.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/idle/intel_idle.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[
+ {
+ .enter = NULL }
+ };
++static struct cpuidle_state knl_cstates[] = {
++ {
++ .name = "C1-KNL",
++ .desc = "MWAIT 0x00",
++ .flags = MWAIT2flg(0x00),
++ .exit_latency = 1,
++ .target_residency = 2,
++ .enter = &intel_idle,
++ .enter_freeze = intel_idle_freeze },
++ {
++ .name = "C6-KNL",
++ .desc = "MWAIT 0x10",
++ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 120,
++ .target_residency = 500,
++ .enter = &intel_idle,
++ .enter_freeze = intel_idle_freeze },
++ {
++ .enter = NULL }
++};
+
+ /**
+ * intel_idle
+@@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_av
+ .disable_promotion_to_c1e = true,
+ };
+
++static const struct idle_cpu idle_cpu_knl = {
++ .state_table = knl_cstates,
++};
++
+ #define ICPU(model, cpu) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
+
+@@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idl
+ ICPU(0x56, idle_cpu_bdw),
+ ICPU(0x4e, idle_cpu_skl),
+ ICPU(0x5e, idle_cpu_skl),
++ ICPU(0x57, idle_cpu_knl),
+ {}
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
--- /dev/null
+From 1de9ad3feaa4de9224ad09b5fdf664036b27596d Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Tue, 2 Feb 2016 16:57:29 -0800
+Subject: [PATCH 102/135] proc: revert /proc/<pid>/maps [stack:TID] annotation
+
+[ Upstream commit 65376df582174ffcec9e6471bf5b0dd79ba05e4a ]
+
+Commit b76437579d13 ("procfs: mark thread stack correctly in
+proc/<pid>/maps") added [stack:TID] annotation to /proc/<pid>/maps.
+
+Finding the task of a stack VMA requires walking the entire thread list,
+turning this into quadratic behavior: a thousand threads means a
+thousand stacks, so the rendering of /proc/<pid>/maps needs to look at a
+million combinations.
+
+The cost is not in proportion to the usefulness as described in the
+patch.
+
+Drop the [stack:TID] annotation to make /proc/<pid>/maps (and
+/proc/<pid>/numa_maps) usable again for higher thread counts.
+
+The [stack] annotation inside /proc/<pid>/task/<tid>/maps is retained, as
+identifying the stack VMA there is an O(1) operation.
+
+Siddesh said:
+ "The end users needed a way to identify thread stacks programmatically and
+ there wasn't a way to do that. I'm afraid I no longer remember (or have
+ access to the resources that would aid my memory since I changed
+ employers) the details of their requirement. However, I did do this on my
+ own time because I thought it was an interesting project for me and nobody
+ really gave any feedback then as to its utility, so as far as I am
+ concerned you could roll back the main thread maps information since the
+ information is available in the thread-specific files"
+
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
+Cc: Siddhesh Poyarekar <siddhesh.poyarekar@gmail.com>
+Cc: Shaohua Li <shli@fb.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/filesystems/proc.txt | 9 +----
+ fs/proc/task_mmu.c | 66 ++++++++++++-------------------------
+ fs/proc/task_nommu.c | 49 +++++++++++----------------
+ include/linux/mm.h | 3 -
+ mm/util.c | 27 ---------------
+ 5 files changed, 48 insertions(+), 106 deletions(-)
+
+--- a/Documentation/filesystems/proc.txt
++++ b/Documentation/filesystems/proc.txt
+@@ -346,7 +346,7 @@ address perms offset dev in
+ a7cb1000-a7cb2000 ---p 00000000 00:00 0
+ a7cb2000-a7eb2000 rw-p 00000000 00:00 0
+ a7eb2000-a7eb3000 ---p 00000000 00:00 0
+-a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack:1001]
++a7eb3000-a7ed5000 rw-p 00000000 00:00 0
+ a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
+ a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
+ a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
+@@ -378,7 +378,6 @@ is not associated with a file:
+
+ [heap] = the heap of the program
+ [stack] = the stack of the main process
+- [stack:1001] = the stack of the thread with tid 1001
+ [vdso] = the "virtual dynamic shared object",
+ the kernel system call handler
+
+@@ -386,10 +385,8 @@ is not associated with a file:
+
+ The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
+ of the individual tasks of a process. In this file you will see a mapping marked
+-as [stack] if that task sees it as a stack. This is a key difference from the
+-content of /proc/PID/maps, where you will see all mappings that are being used
+-as stack by all of those tasks. Hence, for the example above, the task-level
+-map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
++as [stack] if that task sees it as a stack. Hence, for the example above, the
++task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
+
+ 08048000-08049000 r-xp 00000000 03:00 8312 /opt/test
+ 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -248,23 +248,29 @@ static int do_maps_open(struct inode *in
+ sizeof(struct proc_maps_private));
+ }
+
+-static pid_t pid_of_stack(struct proc_maps_private *priv,
+- struct vm_area_struct *vma, bool is_pid)
++/*
++ * Indicate if the VMA is a stack for the given task; for
++ * /proc/PID/maps that is the stack of the main task.
++ */
++static int is_stack(struct proc_maps_private *priv,
++ struct vm_area_struct *vma, int is_pid)
+ {
+- struct inode *inode = priv->inode;
+- struct task_struct *task;
+- pid_t ret = 0;
++ int stack = 0;
++
++ if (is_pid) {
++ stack = vma->vm_start <= vma->vm_mm->start_stack &&
++ vma->vm_end >= vma->vm_mm->start_stack;
++ } else {
++ struct inode *inode = priv->inode;
++ struct task_struct *task;
+
+- rcu_read_lock();
+- task = pid_task(proc_pid(inode), PIDTYPE_PID);
+- if (task) {
+- task = task_of_stack(task, vma, is_pid);
++ rcu_read_lock();
++ task = pid_task(proc_pid(inode), PIDTYPE_PID);
+ if (task)
+- ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
++ stack = vma_is_stack_for_task(vma, task);
++ rcu_read_unlock();
+ }
+- rcu_read_unlock();
+-
+- return ret;
++ return stack;
+ }
+
+ static void
+@@ -324,8 +330,6 @@ show_map_vma(struct seq_file *m, struct
+
+ name = arch_vma_name(vma);
+ if (!name) {
+- pid_t tid;
+-
+ if (!mm) {
+ name = "[vdso]";
+ goto done;
+@@ -337,21 +341,8 @@ show_map_vma(struct seq_file *m, struct
+ goto done;
+ }
+
+- tid = pid_of_stack(priv, vma, is_pid);
+- if (tid != 0) {
+- /*
+- * Thread stack in /proc/PID/task/TID/maps or
+- * the main process stack.
+- */
+- if (!is_pid || (vma->vm_start <= mm->start_stack &&
+- vma->vm_end >= mm->start_stack)) {
+- name = "[stack]";
+- } else {
+- /* Thread stack in /proc/PID/maps */
+- seq_pad(m, ' ');
+- seq_printf(m, "[stack:%d]", tid);
+- }
+- }
++ if (is_stack(priv, vma, is_pid))
++ name = "[stack]";
+ }
+
+ done:
+@@ -1566,19 +1557,8 @@ static int show_numa_map(struct seq_file
+ seq_file_path(m, file, "\n\t= ");
+ } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ seq_puts(m, " heap");
+- } else {
+- pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
+- if (tid != 0) {
+- /*
+- * Thread stack in /proc/PID/task/TID/maps or
+- * the main process stack.
+- */
+- if (!is_pid || (vma->vm_start <= mm->start_stack &&
+- vma->vm_end >= mm->start_stack))
+- seq_puts(m, " stack");
+- else
+- seq_printf(m, " stack:%d", tid);
+- }
++ } else if (is_stack(proc_priv, vma, is_pid)) {
++ seq_puts(m, " stack");
+ }
+
+ if (is_vm_hugetlb_page(vma))
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struc
+ return size;
+ }
+
+-static pid_t pid_of_stack(struct proc_maps_private *priv,
+- struct vm_area_struct *vma, bool is_pid)
++static int is_stack(struct proc_maps_private *priv,
++ struct vm_area_struct *vma, int is_pid)
+ {
+- struct inode *inode = priv->inode;
+- struct task_struct *task;
+- pid_t ret = 0;
+-
+- rcu_read_lock();
+- task = pid_task(proc_pid(inode), PIDTYPE_PID);
+- if (task) {
+- task = task_of_stack(task, vma, is_pid);
++ struct mm_struct *mm = vma->vm_mm;
++ int stack = 0;
++
++ if (is_pid) {
++ stack = vma->vm_start <= mm->start_stack &&
++ vma->vm_end >= mm->start_stack;
++ } else {
++ struct inode *inode = priv->inode;
++ struct task_struct *task;
++
++ rcu_read_lock();
++ task = pid_task(proc_pid(inode), PIDTYPE_PID);
+ if (task)
+- ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
++ stack = vma_is_stack_for_task(vma, task);
++ rcu_read_unlock();
+ }
+- rcu_read_unlock();
+-
+- return ret;
++ return stack;
+ }
+
+ /*
+@@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_fil
+ if (file) {
+ seq_pad(m, ' ');
+ seq_file_path(m, file, "");
+- } else if (mm) {
+- pid_t tid = pid_of_stack(priv, vma, is_pid);
+-
+- if (tid != 0) {
+- seq_pad(m, ' ');
+- /*
+- * Thread stack in /proc/PID/task/TID/maps or
+- * the main process stack.
+- */
+- if (!is_pid || (vma->vm_start <= mm->start_stack &&
+- vma->vm_end >= mm->start_stack))
+- seq_printf(m, "[stack]");
+- else
+- seq_printf(m, "[stack:%d]", tid);
+- }
++ } else if (mm && is_stack(priv, vma, is_pid)) {
++ seq_pad(m, ' ');
++ seq_printf(m, "[stack]");
+ }
+
+ seq_putc(m, '\n');
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1311,8 +1311,7 @@ static inline int stack_guard_page_end(s
+ !vma_growsup(vma->vm_next, addr);
+ }
+
+-extern struct task_struct *task_of_stack(struct task_struct *task,
+- struct vm_area_struct *vma, bool in_group);
++int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
+
+ extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -199,36 +199,11 @@ void __vma_link_list(struct mm_struct *m
+ }
+
+ /* Check if the vma is being used as a stack by this task */
+-static int vm_is_stack_for_task(struct task_struct *t,
+- struct vm_area_struct *vma)
++int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
+ {
+ return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
+ }
+
+-/*
+- * Check if the vma is being used as a stack.
+- * If is_group is non-zero, check in the entire thread group or else
+- * just check in the current task. Returns the task_struct of the task
+- * that the vma is stack for. Must be called under rcu_read_lock().
+- */
+-struct task_struct *task_of_stack(struct task_struct *task,
+- struct vm_area_struct *vma, bool in_group)
+-{
+- if (vm_is_stack_for_task(task, vma))
+- return task;
+-
+- if (in_group) {
+- struct task_struct *t;
+-
+- for_each_thread(task, t) {
+- if (vm_is_stack_for_task(t, vma))
+- return t;
+- }
+- }
+-
+- return NULL;
+-}
+-
+ #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
--- /dev/null
+From db553bf6a4ed0c90c33ff622f3c963b940711242 Mon Sep 17 00:00:00 2001
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Date: Thu, 17 Mar 2016 14:52:17 +0100
+Subject: [PATCH 103/135] s390/crypto: provide correct file mode at device
+ register.
+
+[ Upstream commit 74b2375e6767935e6d9220bdbc6ed0db57f71a59 ]
+
+When the prng device driver calls misc_register() there is the possibility
+to also provide the recommented file permissions. This fix now gives
+useful values (0644) where previously just the default was used (resulting
+in 0600 for the device file).
+
+Signed-off-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/crypto/prng.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/s390/crypto/prng.c
++++ b/arch/s390/crypto/prng.c
+@@ -669,11 +669,13 @@ static const struct file_operations prng
+ static struct miscdevice prng_sha512_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
++ .mode = 0644,
+ .fops = &prng_sha512_fops,
+ };
+ static struct miscdevice prng_tdes_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
++ .mode = 0644,
+ .fops = &prng_tdes_fops,
+ };
+
--- /dev/null
+From 14f7272e474c54172b243755f2e5edb9e1b614cc Mon Sep 17 00:00:00 2001
+From: Vikas Shivappa <vikas.shivappa@linux.intel.com>
+Date: Thu, 10 Mar 2016 15:32:07 -0800
+Subject: [PATCH 104/135] perf/x86/cqm: Fix CQM handling of grouping events
+ into a cache_group
+
+[ Upstream commit a223c1c7ab4cc64537dc4b911f760d851683768a ]
+
+Currently CQM (cache quality of service monitoring) is grouping all
+events belonging to same PID to use one RMID. However its not counting
+all of these different events. Hence we end up with a count of zero
+for all events other than the group leader.
+
+The patch tries to address the issue by keeping a flag in the
+perf_event.hw which has other CQM related fields. The field is updated
+at event creation and during grouping.
+
+Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
+[peterz: Changed hw_perf_event::is_group_event to an int]
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: fenghua.yu@intel.com
+Cc: h.peter.anvin@intel.com
+Cc: ravi.v.shankar@intel.com
+Cc: vikas.shivappa@intel.com
+Link: http://lkml.kernel.org/r/1457652732-4499-2-git-send-email-vikas.shivappa@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/perf_event_intel_cqm.c | 13 ++++++++++---
+ include/linux/perf_event.h | 1 +
+ 2 files changed, 11 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+@@ -281,9 +281,13 @@ static bool __match_event(struct perf_ev
+
+ /*
+ * Events that target same task are placed into the same cache group.
++ * Mark it as a multi event group, so that we update ->count
++ * for every event rather than just the group leader later.
+ */
+- if (a->hw.target == b->hw.target)
++ if (a->hw.target == b->hw.target) {
++ b->hw.is_group_event = true;
+ return true;
++ }
+
+ /*
+ * Are we an inherited event?
+@@ -849,6 +853,7 @@ static void intel_cqm_setup_event(struct
+ bool conflict = false;
+ u32 rmid;
+
++ event->hw.is_group_event = false;
+ list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
+ rmid = iter->hw.cqm_rmid;
+
+@@ -940,7 +945,9 @@ static u64 intel_cqm_event_count(struct
+ return __perf_event_count(event);
+
+ /*
+- * Only the group leader gets to report values. This stops us
++ * Only the group leader gets to report values except in case of
++ * multiple events in the same group, we still need to read the
++ * other events.This stops us
+ * reporting duplicate values to userspace, and gives us a clear
+ * rule for which task gets to report the values.
+ *
+@@ -948,7 +955,7 @@ static u64 intel_cqm_event_count(struct
+ * specific packages - we forfeit that ability when we create
+ * task events.
+ */
+- if (!cqm_group_leader(event))
++ if (!cqm_group_leader(event) && !event->hw.is_group_event)
+ return 0;
+
+ /*
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -121,6 +121,7 @@ struct hw_perf_event {
+ struct { /* intel_cqm */
+ int cqm_state;
+ u32 cqm_rmid;
++ int is_group_event;
+ struct list_head cqm_events_entry;
+ struct list_head cqm_groups_entry;
+ struct list_head cqm_group_entry;
--- /dev/null
+From a134c91bceb0f44a23207876f9a1b580c3f894fb Mon Sep 17 00:00:00 2001
+From: Vikas Shivappa <vikas.shivappa@linux.intel.com>
+Date: Thu, 10 Mar 2016 15:32:08 -0800
+Subject: [PATCH 105/135] perf/x86/cqm: Fix CQM memory leak and notifier leak
+
+[ Upstream commit ada2f634cd50d050269b67b4e2966582387e7c27 ]
+
+Fixes the hotcpu notifier leak and other global variable memory leaks
+during CQM (cache quality of service monitoring) initialization.
+
+Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: fenghua.yu@intel.com
+Cc: h.peter.anvin@intel.com
+Cc: ravi.v.shankar@intel.com
+Cc: vikas.shivappa@intel.com
+Link: http://lkml.kernel.org/r/1457652732-4499-3-git-send-email-vikas.shivappa@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/perf_event_intel_cqm.c | 43 +++++++++++++++++++++--------
+ 1 file changed, 32 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+@@ -211,6 +211,20 @@ static void __put_rmid(u32 rmid)
+ list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
+ }
+
++static void cqm_cleanup(void)
++{
++ int i;
++
++ if (!cqm_rmid_ptrs)
++ return;
++
++ for (i = 0; i < cqm_max_rmid; i++)
++ kfree(cqm_rmid_ptrs[i]);
++
++ kfree(cqm_rmid_ptrs);
++ cqm_rmid_ptrs = NULL;
++}
++
+ static int intel_cqm_setup_rmid_cache(void)
+ {
+ struct cqm_rmid_entry *entry;
+@@ -218,7 +232,7 @@ static int intel_cqm_setup_rmid_cache(vo
+ int r = 0;
+
+ nr_rmids = cqm_max_rmid + 1;
+- cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
++ cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
+ nr_rmids, GFP_KERNEL);
+ if (!cqm_rmid_ptrs)
+ return -ENOMEM;
+@@ -249,11 +263,9 @@ static int intel_cqm_setup_rmid_cache(vo
+ mutex_unlock(&cache_mutex);
+
+ return 0;
+-fail:
+- while (r--)
+- kfree(cqm_rmid_ptrs[r]);
+
+- kfree(cqm_rmid_ptrs);
++fail:
++ cqm_cleanup();
+ return -ENOMEM;
+ }
+
+@@ -1322,7 +1334,7 @@ static const struct x86_cpu_id intel_cqm
+
+ static int __init intel_cqm_init(void)
+ {
+- char *str, scale[20];
++ char *str = NULL, scale[20];
+ int i, cpu, ret;
+
+ if (!x86_match_cpu(intel_cqm_match))
+@@ -1382,16 +1394,25 @@ static int __init intel_cqm_init(void)
+ cqm_pick_event_reader(i);
+ }
+
+- __perf_cpu_notifier(intel_cqm_cpu_notifier);
+-
+ ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
+- if (ret)
++ if (ret) {
+ pr_err("Intel CQM perf registration failed: %d\n", ret);
+- else
+- pr_info("Intel CQM monitoring enabled\n");
++ goto out;
++ }
++
++ pr_info("Intel CQM monitoring enabled\n");
+
++ /*
++ * Register the hot cpu notifier once we are sure cqm
++ * is enabled to avoid notifier leak.
++ */
++ __perf_cpu_notifier(intel_cqm_cpu_notifier);
+ out:
+ cpu_notifier_register_done();
++ if (ret) {
++ kfree(str);
++ cqm_cleanup();
++ }
+
+ return ret;
+ }
--- /dev/null
+From a36c8443c10fd9fcc6cf8f7462ae516895a4d81a Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham@cavium.com>
+Date: Tue, 16 Feb 2016 16:29:49 +0530
+Subject: [PATCH 106/135] net: thunderx: Fix for multiqset not configured upon
+ interface toggle
+
+[ Upstream commit 6a9bab79bb79bd9b2eda16f0aba1b4c43f677be9 ]
+
+When a interface is assigned morethan 8 queues and the logical interface
+is toggled i.e down & up, additional queues or qsets are not initialized
+as secondary qset count is being set to zero while tearing down.
+
+Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -1117,7 +1117,6 @@ int nicvf_stop(struct net_device *netdev
+
+ /* Clear multiqset info */
+ nic->pnicvf = nic;
+- nic->sqs_count = 0;
+
+ return 0;
+ }
--- /dev/null
+From 7f60de28c3b41cbe22e3044755ad79f6117bbd0c Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham@cavium.com>
+Date: Tue, 16 Feb 2016 16:29:51 +0530
+Subject: [PATCH 107/135] net: thunderx: Fix receive packet stats
+
+[ Upstream commit ad2ecebd67d8a80fe5412d11df375a5ed2db7cd1 ]
+
+Counting rx packets for every CQE_RX in CQ irq handler is incorrect.
+Synchronization is missing when multiple queues are receiving packets
+simultaneously. Like transmit packet stats use HW stats here.
+
+Also removed unused 'cqe_type' parameter in nicvf_rcv_pkt_handler().
+
+Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c | 11 ++++++-----
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 8 ++------
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 3 +--
+ 3 files changed, 9 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -566,8 +566,7 @@ static inline void nicvf_set_rxhash(stru
+
+ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+ struct napi_struct *napi,
+- struct cmp_queue *cq,
+- struct cqe_rx_t *cqe_rx, int cqe_type)
++ struct cqe_rx_t *cqe_rx)
+ {
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+@@ -583,7 +582,7 @@ static void nicvf_rcv_pkt_handler(struct
+ }
+
+ /* Check for errors */
+- err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
++ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+
+@@ -674,8 +673,7 @@ loop:
+ cq_idx, cq_desc->cqe_type);
+ switch (cq_desc->cqe_type) {
+ case CQE_TYPE_RX:
+- nicvf_rcv_pkt_handler(netdev, napi, cq,
+- cq_desc, CQE_TYPE_RX);
++ nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
+ work_done++;
+ break;
+ case CQE_TYPE_SEND:
+@@ -1345,6 +1343,9 @@ void nicvf_update_stats(struct nicvf *ni
+ drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
+ stats->tx_bcast_frames_ok +
+ stats->tx_mcast_frames_ok;
++ drv_stats->rx_frames_ok = stats->rx_ucast_frames +
++ stats->rx_bcast_frames +
++ stats->rx_mcast_frames;
+ drv_stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
+ drv_stats->tx_drops = stats->tx_drops;
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -1414,16 +1414,12 @@ void nicvf_update_sq_stats(struct nicvf
+ }
+
+ /* Check for errors in the receive cmp.queue entry */
+-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
++int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+ {
+ struct nicvf_hw_stats *stats = &nic->hw_stats;
+- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+
+- if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+- drv_stats->rx_frames_ok++;
++ if (!cqe_rx->err_level && !cqe_rx->err_opcode)
+ return 0;
+- }
+
+ if (netif_msg_rx_err(nic))
+ netdev_err(nic->netdev,
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+@@ -344,8 +344,7 @@ u64 nicvf_queue_reg_read(struct nicvf *
+ /* Stats */
+ void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
++int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+ int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+ #endif /* NICVF_QUEUES_H */
--- /dev/null
+From cd9e16196af9667ab688db7be4efb3c51a08780c Mon Sep 17 00:00:00 2001
+From: Pavel Rojtberg <rojtberg@gmail.com>
+Date: Wed, 9 Dec 2015 11:57:01 -0800
+Subject: [PATCH 108/135] Input: xpad - correctly handle concurrent LED and FF
+ requests
+
+[ Upstream commit 7fc595f4c02636eadaeeecfe7bbc45b57c173004 ]
+
+Track the status of the irq_out URB to prevent submission iof new requests
+while current one is active. Failure to do so results in the "URB submitted
+while active" warning/stack trace.
+
+Store pending brightness and FF effect in the driver structure and replace
+it with the latest requests until the device is ready to process next
+request. Alternate serving LED vs FF requests to make sure one does not
+starve another. See [1] for discussion. Inspired by patch of Sarah Bessmer
+[2].
+
+[1]: http://www.spinics.net/lists/linux-input/msg40708.html
+[2]: http://www.spinics.net/lists/linux-input/msg31450.html
+
+Signed-off-by: Pavel Rojtberg <rojtberg@gmail.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/input/joystick/xpad.c | 322 +++++++++++++++++++++++++++++-------------
+ 1 file changed, 223 insertions(+), 99 deletions(-)
+
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -317,6 +317,19 @@ static struct usb_device_id xpad_table[]
+
+ MODULE_DEVICE_TABLE(usb, xpad_table);
+
++struct xpad_output_packet {
++ u8 data[XPAD_PKT_LEN];
++ u8 len;
++ bool pending;
++};
++
++#define XPAD_OUT_CMD_IDX 0
++#define XPAD_OUT_FF_IDX 1
++#define XPAD_OUT_LED_IDX (1 + IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF))
++#define XPAD_NUM_OUT_PACKETS (1 + \
++ IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF) + \
++ IS_ENABLED(CONFIG_JOYSTICK_XPAD_LEDS))
++
+ struct usb_xpad {
+ struct input_dev *dev; /* input device interface */
+ struct usb_device *udev; /* usb device */
+@@ -329,9 +342,13 @@ struct usb_xpad {
+ dma_addr_t idata_dma;
+
+ struct urb *irq_out; /* urb for interrupt out report */
++ bool irq_out_active; /* we must not use an active URB */
+ unsigned char *odata; /* output data */
+ dma_addr_t odata_dma;
+- struct mutex odata_mutex;
++ spinlock_t odata_lock;
++
++ struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS];
++ int last_out_packet;
+
+ #if defined(CONFIG_JOYSTICK_XPAD_LEDS)
+ struct xpad_led *led;
+@@ -678,18 +695,71 @@ exit:
+ __func__, retval);
+ }
+
++/* Callers must hold xpad->odata_lock spinlock */
++static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
++{
++ struct xpad_output_packet *pkt, *packet = NULL;
++ int i;
++
++ for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) {
++ if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS)
++ xpad->last_out_packet = 0;
++
++ pkt = &xpad->out_packets[xpad->last_out_packet];
++ if (pkt->pending) {
++ dev_dbg(&xpad->intf->dev,
++ "%s - found pending output packet %d\n",
++ __func__, xpad->last_out_packet);
++ packet = pkt;
++ break;
++ }
++ }
++
++ if (packet) {
++ memcpy(xpad->odata, packet->data, packet->len);
++ xpad->irq_out->transfer_buffer_length = packet->len;
++ return true;
++ }
++
++ return false;
++}
++
++/* Callers must hold xpad->odata_lock spinlock */
++static int xpad_try_sending_next_out_packet(struct usb_xpad *xpad)
++{
++ int error;
++
++ if (!xpad->irq_out_active && xpad_prepare_next_out_packet(xpad)) {
++ error = usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
++ if (error) {
++ dev_err(&xpad->intf->dev,
++ "%s - usb_submit_urb failed with result %d\n",
++ __func__, error);
++ return -EIO;
++ }
++
++ xpad->irq_out_active = true;
++ }
++
++ return 0;
++}
++
+ static void xpad_irq_out(struct urb *urb)
+ {
+ struct usb_xpad *xpad = urb->context;
+ struct device *dev = &xpad->intf->dev;
+- int retval, status;
++ int status = urb->status;
++ int error;
++ unsigned long flags;
+
+- status = urb->status;
++ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ switch (status) {
+ case 0:
+ /* success */
+- return;
++ xpad->out_packets[xpad->last_out_packet].pending = false;
++ xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
++ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+@@ -697,19 +767,26 @@ static void xpad_irq_out(struct urb *urb
+ /* this urb is terminated, clean up */
+ dev_dbg(dev, "%s - urb shutting down with status: %d\n",
+ __func__, status);
+- return;
++ xpad->irq_out_active = false;
++ break;
+
+ default:
+ dev_dbg(dev, "%s - nonzero urb status received: %d\n",
+ __func__, status);
+- goto exit;
++ break;
+ }
+
+-exit:
+- retval = usb_submit_urb(urb, GFP_ATOMIC);
+- if (retval)
+- dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
+- __func__, retval);
++ if (xpad->irq_out_active) {
++ error = usb_submit_urb(urb, GFP_ATOMIC);
++ if (error) {
++ dev_err(dev,
++ "%s - usb_submit_urb failed with result %d\n",
++ __func__, error);
++ xpad->irq_out_active = false;
++ }
++ }
++
++ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+ }
+
+ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
+@@ -728,7 +805,7 @@ static int xpad_init_output(struct usb_i
+ goto fail1;
+ }
+
+- mutex_init(&xpad->odata_mutex);
++ spin_lock_init(&xpad->odata_lock);
+
+ xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
+ if (!xpad->irq_out) {
+@@ -770,27 +847,57 @@ static void xpad_deinit_output(struct us
+
+ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
+ {
++ struct xpad_output_packet *packet =
++ &xpad->out_packets[XPAD_OUT_CMD_IDX];
++ unsigned long flags;
+ int retval;
+
+- mutex_lock(&xpad->odata_mutex);
++ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+- xpad->odata[0] = 0x08;
+- xpad->odata[1] = 0x00;
+- xpad->odata[2] = 0x0F;
+- xpad->odata[3] = 0xC0;
+- xpad->odata[4] = 0x00;
+- xpad->odata[5] = 0x00;
+- xpad->odata[6] = 0x00;
+- xpad->odata[7] = 0x00;
+- xpad->odata[8] = 0x00;
+- xpad->odata[9] = 0x00;
+- xpad->odata[10] = 0x00;
+- xpad->odata[11] = 0x00;
+- xpad->irq_out->transfer_buffer_length = 12;
++ packet->data[0] = 0x08;
++ packet->data[1] = 0x00;
++ packet->data[2] = 0x0F;
++ packet->data[3] = 0xC0;
++ packet->data[4] = 0x00;
++ packet->data[5] = 0x00;
++ packet->data[6] = 0x00;
++ packet->data[7] = 0x00;
++ packet->data[8] = 0x00;
++ packet->data[9] = 0x00;
++ packet->data[10] = 0x00;
++ packet->data[11] = 0x00;
++ packet->len = 12;
++ packet->pending = true;
++
++ /* Reset the sequence so we send out presence first */
++ xpad->last_out_packet = -1;
++ retval = xpad_try_sending_next_out_packet(xpad);
+
+- retval = usb_submit_urb(xpad->irq_out, GFP_KERNEL);
++ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+
+- mutex_unlock(&xpad->odata_mutex);
++ return retval;
++}
++
++static int xpad_start_xbox_one(struct usb_xpad *xpad)
++{
++ struct xpad_output_packet *packet =
++ &xpad->out_packets[XPAD_OUT_CMD_IDX];
++ unsigned long flags;
++ int retval;
++
++ spin_lock_irqsave(&xpad->odata_lock, flags);
++
++ /* Xbox one controller needs to be initialized. */
++ packet->data[0] = 0x05;
++ packet->data[1] = 0x20;
++ packet->len = 2;
++ packet->pending = true;
++
++ /* Reset the sequence so we send out start packet first */
++ xpad->last_out_packet = -1;
++ retval = xpad_try_sending_next_out_packet(xpad);
++
++ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+
+ return retval;
+ }
+@@ -799,8 +906,11 @@ static int xpad_inquiry_pad_presence(str
+ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
+ {
+ struct usb_xpad *xpad = input_get_drvdata(dev);
++ struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_FF_IDX];
+ __u16 strong;
+ __u16 weak;
++ int retval;
++ unsigned long flags;
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+@@ -808,69 +918,80 @@ static int xpad_play_effect(struct input
+ strong = effect->u.rumble.strong_magnitude;
+ weak = effect->u.rumble.weak_magnitude;
+
++ spin_lock_irqsave(&xpad->odata_lock, flags);
++
+ switch (xpad->xtype) {
+ case XTYPE_XBOX:
+- xpad->odata[0] = 0x00;
+- xpad->odata[1] = 0x06;
+- xpad->odata[2] = 0x00;
+- xpad->odata[3] = strong / 256; /* left actuator */
+- xpad->odata[4] = 0x00;
+- xpad->odata[5] = weak / 256; /* right actuator */
+- xpad->irq_out->transfer_buffer_length = 6;
++ packet->data[0] = 0x00;
++ packet->data[1] = 0x06;
++ packet->data[2] = 0x00;
++ packet->data[3] = strong / 256; /* left actuator */
++ packet->data[4] = 0x00;
++ packet->data[5] = weak / 256; /* right actuator */
++ packet->len = 6;
++ packet->pending = true;
+ break;
+
+ case XTYPE_XBOX360:
+- xpad->odata[0] = 0x00;
+- xpad->odata[1] = 0x08;
+- xpad->odata[2] = 0x00;
+- xpad->odata[3] = strong / 256; /* left actuator? */
+- xpad->odata[4] = weak / 256; /* right actuator? */
+- xpad->odata[5] = 0x00;
+- xpad->odata[6] = 0x00;
+- xpad->odata[7] = 0x00;
+- xpad->irq_out->transfer_buffer_length = 8;
++ packet->data[0] = 0x00;
++ packet->data[1] = 0x08;
++ packet->data[2] = 0x00;
++ packet->data[3] = strong / 256; /* left actuator? */
++ packet->data[4] = weak / 256; /* right actuator? */
++ packet->data[5] = 0x00;
++ packet->data[6] = 0x00;
++ packet->data[7] = 0x00;
++ packet->len = 8;
++ packet->pending = true;
+ break;
+
+ case XTYPE_XBOX360W:
+- xpad->odata[0] = 0x00;
+- xpad->odata[1] = 0x01;
+- xpad->odata[2] = 0x0F;
+- xpad->odata[3] = 0xC0;
+- xpad->odata[4] = 0x00;
+- xpad->odata[5] = strong / 256;
+- xpad->odata[6] = weak / 256;
+- xpad->odata[7] = 0x00;
+- xpad->odata[8] = 0x00;
+- xpad->odata[9] = 0x00;
+- xpad->odata[10] = 0x00;
+- xpad->odata[11] = 0x00;
+- xpad->irq_out->transfer_buffer_length = 12;
++ packet->data[0] = 0x00;
++ packet->data[1] = 0x01;
++ packet->data[2] = 0x0F;
++ packet->data[3] = 0xC0;
++ packet->data[4] = 0x00;
++ packet->data[5] = strong / 256;
++ packet->data[6] = weak / 256;
++ packet->data[7] = 0x00;
++ packet->data[8] = 0x00;
++ packet->data[9] = 0x00;
++ packet->data[10] = 0x00;
++ packet->data[11] = 0x00;
++ packet->len = 12;
++ packet->pending = true;
+ break;
+
+ case XTYPE_XBOXONE:
+- xpad->odata[0] = 0x09; /* activate rumble */
+- xpad->odata[1] = 0x08;
+- xpad->odata[2] = 0x00;
+- xpad->odata[3] = 0x08; /* continuous effect */
+- xpad->odata[4] = 0x00; /* simple rumble mode */
+- xpad->odata[5] = 0x03; /* L and R actuator only */
+- xpad->odata[6] = 0x00; /* TODO: LT actuator */
+- xpad->odata[7] = 0x00; /* TODO: RT actuator */
+- xpad->odata[8] = strong / 256; /* left actuator */
+- xpad->odata[9] = weak / 256; /* right actuator */
+- xpad->odata[10] = 0x80; /* length of pulse */
+- xpad->odata[11] = 0x00; /* stop period of pulse */
+- xpad->irq_out->transfer_buffer_length = 12;
++ packet->data[0] = 0x09; /* activate rumble */
++ packet->data[1] = 0x08;
++ packet->data[2] = 0x00;
++ packet->data[3] = 0x08; /* continuous effect */
++ packet->data[4] = 0x00; /* simple rumble mode */
++ packet->data[5] = 0x03; /* L and R actuator only */
++ packet->data[6] = 0x00; /* TODO: LT actuator */
++ packet->data[7] = 0x00; /* TODO: RT actuator */
++ packet->data[8] = strong / 256; /* left actuator */
++ packet->data[9] = weak / 256; /* right actuator */
++ packet->data[10] = 0x80; /* length of pulse */
++ packet->data[11] = 0x00; /* stop period of pulse */
++ packet->len = 12;
++ packet->pending = true;
+ break;
+
+ default:
+ dev_dbg(&xpad->dev->dev,
+ "%s - rumble command sent to unsupported xpad type: %d\n",
+ __func__, xpad->xtype);
+- return -EINVAL;
++ retval = -EINVAL;
++ goto out;
+ }
+
+- return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
++ retval = xpad_try_sending_next_out_packet(xpad);
++
++out:
++ spin_unlock_irqrestore(&xpad->odata_lock, flags);
++ return retval;
+ }
+
+ static int xpad_init_ff(struct usb_xpad *xpad)
+@@ -921,36 +1042,44 @@ struct xpad_led {
+ */
+ static void xpad_send_led_command(struct usb_xpad *xpad, int command)
+ {
++ struct xpad_output_packet *packet =
++ &xpad->out_packets[XPAD_OUT_LED_IDX];
++ unsigned long flags;
++
+ command %= 16;
+
+- mutex_lock(&xpad->odata_mutex);
++ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ switch (xpad->xtype) {
+ case XTYPE_XBOX360:
+- xpad->odata[0] = 0x01;
+- xpad->odata[1] = 0x03;
+- xpad->odata[2] = command;
+- xpad->irq_out->transfer_buffer_length = 3;
++ packet->data[0] = 0x01;
++ packet->data[1] = 0x03;
++ packet->data[2] = command;
++ packet->len = 3;
++ packet->pending = true;
+ break;
++
+ case XTYPE_XBOX360W:
+- xpad->odata[0] = 0x00;
+- xpad->odata[1] = 0x00;
+- xpad->odata[2] = 0x08;
+- xpad->odata[3] = 0x40 + command;
+- xpad->odata[4] = 0x00;
+- xpad->odata[5] = 0x00;
+- xpad->odata[6] = 0x00;
+- xpad->odata[7] = 0x00;
+- xpad->odata[8] = 0x00;
+- xpad->odata[9] = 0x00;
+- xpad->odata[10] = 0x00;
+- xpad->odata[11] = 0x00;
+- xpad->irq_out->transfer_buffer_length = 12;
++ packet->data[0] = 0x00;
++ packet->data[1] = 0x00;
++ packet->data[2] = 0x08;
++ packet->data[3] = 0x40 + command;
++ packet->data[4] = 0x00;
++ packet->data[5] = 0x00;
++ packet->data[6] = 0x00;
++ packet->data[7] = 0x00;
++ packet->data[8] = 0x00;
++ packet->data[9] = 0x00;
++ packet->data[10] = 0x00;
++ packet->data[11] = 0x00;
++ packet->len = 12;
++ packet->pending = true;
+ break;
+ }
+
+- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+- mutex_unlock(&xpad->odata_mutex);
++ xpad_try_sending_next_out_packet(xpad);
++
++ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+ }
+
+ /*
+@@ -1048,13 +1177,8 @@ static int xpad_open(struct input_dev *d
+ if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
+ return -EIO;
+
+- if (xpad->xtype == XTYPE_XBOXONE) {
+- /* Xbox one controller needs to be initialized. */
+- xpad->odata[0] = 0x05;
+- xpad->odata[1] = 0x20;
+- xpad->irq_out->transfer_buffer_length = 2;
+- return usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+- }
++ if (xpad->xtype == XTYPE_XBOXONE)
++ return xpad_start_xbox_one(xpad);
+
+ return 0;
+ }
--- /dev/null
+From 2b189fe86b2820b583a80419ad3063b5f49fc007 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Thu, 3 Dec 2015 22:09:31 -0500
+Subject: [PATCH 109/135] time: Verify time values in adjtimex ADJ_SETOFFSET to
+ avoid overflow
+
+[ Upstream commit 37cf4dc3370fbca0344e23bb96446eb2c3548ba7 ]
+
+For adjtimex()'s ADJ_SETOFFSET, make sure the tv_usec value is
+sane. We might multiply them later which can cause an overflow
+and undefined behavior.
+
+This patch introduces new helper functions to simplify the
+checking code and adds comments to clarify
+
+Orginally this patch was by Sasha Levin, but I've basically
+rewritten it, so he should get credit for finding the issue
+and I should get the blame for any mistakes made since.
+
+Also, credit to Richard Cochran for the phrasing used in the
+comment for what is considered valid here.
+
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/time.h | 26 ++++++++++++++++++++++++++
+ kernel/time/ntp.c | 10 ++++++++--
+ kernel/time/timekeeping.c | 2 +-
+ 3 files changed, 35 insertions(+), 3 deletions(-)
+
+--- a/include/linux/time.h
++++ b/include/linux/time.h
+@@ -125,6 +125,32 @@ static inline bool timeval_valid(const s
+
+ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
+
++/*
++ * Validates if a timespec/timeval used to inject a time offset is valid.
++ * Offsets can be postive or negative. The value of the timeval/timespec
++ * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must
++ * always be non-negative.
++ */
++static inline bool timeval_inject_offset_valid(const struct timeval *tv)
++{
++ /* We don't check the tv_sec as it can be positive or negative */
++
++ /* Can't have more microseconds then a second */
++ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
++ return false;
++ return true;
++}
++
++static inline bool timespec_inject_offset_valid(const struct timespec *ts)
++{
++ /* We don't check the tv_sec as it can be positive or negative */
++
++ /* Can't have more nanoseconds then a second */
++ if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
++ return false;
++ return true;
++}
++
+ #define CURRENT_TIME (current_kernel_time())
+ #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
+
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -674,8 +674,14 @@ int ntp_validate_timex(struct timex *txc
+ return -EINVAL;
+ }
+
+- if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
+- return -EPERM;
++ if (txc->modes & ADJ_SETOFFSET) {
++ /* In order to inject time, you gotta be super-user! */
++ if (!capable(CAP_SYS_TIME))
++ return -EPERM;
++
++ if (!timeval_inject_offset_valid(&txc->time))
++ return -EINVAL;
++ }
+
+ /*
+ * Check for potential multiplication overflows that can
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -958,7 +958,7 @@ int timekeeping_inject_offset(struct tim
+ struct timespec64 ts64, tmp;
+ int ret = 0;
+
+- if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
++ if (!timespec_inject_offset_valid(ts))
+ return -EINVAL;
+
+ ts64 = timespec_to_timespec64(*ts);
--- /dev/null
+From 0774d1b938548ca879d15f4ec549714b38a62644 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Thu, 14 Apr 2016 10:25:14 -0600
+Subject: [PATCH 110/135] ntp: Fix ADJ_SETOFFSET being used w/ ADJ_NANO
+
+[ Upstream commit dd4e17ab704269bce71402285f5e8b9ac24b1eff ]
+
+Recently, in commit 37cf4dc3370f I forgot to check if the timeval being passed
+was actually a timespec (as is signaled with ADJ_NANO).
+
+This resulted in that patch breaking ADJ_SETOFFSET users who set
+ADJ_NANO, by rejecting valid timespecs that were compared with
+valid timeval ranges.
+
+This patch addresses this by checking for the ADJ_NANO flag and
+using the timepsec check instead in that case.
+
+Reported-by: Harald Hoyer <harald@redhat.com>
+Reported-by: Kay Sievers <kay@vrfy.org>
+Fixes: 37cf4dc3370f "time: Verify time values in adjtimex ADJ_SETOFFSET to avoid overflow"
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: David Herrmann <dh.herrmann@gmail.com>
+Link: http://lkml.kernel.org/r/1453417415-19110-2-git-send-email-john.stultz@linaro.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/ntp.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -679,8 +679,18 @@ int ntp_validate_timex(struct timex *txc
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+- if (!timeval_inject_offset_valid(&txc->time))
+- return -EINVAL;
++ if (txc->modes & ADJ_NANO) {
++ struct timespec ts;
++
++ ts.tv_sec = txc->time.tv_sec;
++ ts.tv_nsec = txc->time.tv_usec;
++ if (!timespec_inject_offset_valid(&ts))
++ return -EINVAL;
++
++ } else {
++ if (!timeval_inject_offset_valid(&txc->time))
++ return -EINVAL;
++ }
+ }
+
+ /*
--- /dev/null
+From 8d8f32644cf6424d7a16f4a9ee83e7d71fd83c75 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 9 May 2016 12:01:27 -0600
+Subject: [PATCH 111/135] drm: Balance error path for GEM handle allocation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 6984128d01cf935820a0563f3a00c6623ba58109 ]
+
+The current error path for failure when establishing a handle for a GEM
+object is unbalance, e.g. we call object_close() without calling first
+object_open(). Use the typical onion structure to only undo what has
+been set up prior to the error.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_gem.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -338,27 +338,32 @@ drm_gem_handle_create_tail(struct drm_fi
+ spin_unlock(&file_priv->table_lock);
+ idr_preload_end();
+ mutex_unlock(&dev->object_name_lock);
+- if (ret < 0) {
+- drm_gem_object_handle_unreference_unlocked(obj);
+- return ret;
+- }
++ if (ret < 0)
++ goto err_unref;
++
+ *handlep = ret;
+
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+- if (ret) {
+- drm_gem_handle_delete(file_priv, *handlep);
+- return ret;
+- }
++ if (ret)
++ goto err_remove;
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+- if (ret) {
+- drm_gem_handle_delete(file_priv, *handlep);
+- return ret;
+- }
++ if (ret)
++ goto err_revoke;
+ }
+
+ return 0;
++
++err_revoke:
++ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
++err_remove:
++ spin_lock(&file_priv->table_lock);
++ idr_remove(&file_priv->object_idr, *handlep);
++ spin_unlock(&file_priv->table_lock);
++err_unref:
++ drm_gem_object_handle_unreference_unlocked(obj);
++ return ret;
+ }
+
+ /**
--- /dev/null
+From 33fef695fb119ae3c7cb253ff12ed0bd800d5d42 Mon Sep 17 00:00:00 2001
+From: Maruthi Srinivas Bayyavarapu <Maruthi.Bayyavarapu@amd.com>
+Date: Wed, 11 May 2016 08:16:36 -0400
+Subject: [PATCH 112/135] ALSA: hda: add AMD Polaris-10/11 AZ PCI IDs with
+ proper driver caps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 8eb22214b7cb0c0a28be6caf3b81201629d8ea7c ]
+
+This commit fixes garbled audio on Polaris-10/11 variants
+
+Signed-off-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/hda_intel.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2366,6 +2366,10 @@ static const struct pci_device_id azx_id
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0xaae8),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++ { PCI_DEVICE(0x1002, 0xaae0),
++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++ { PCI_DEVICE(0x1002, 0xaaf0),
++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ /* VIA VT8251/VT8237A */
+ { PCI_DEVICE(0x1106, 0x3288),
+ .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
--- /dev/null
+From c8e2078306d9e7d3784fffaf1189b0a623a6afa5 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 8 Jun 2016 19:20:33 +0100
+Subject: [PATCH 113/135] ecryptfs: fix handling of directory opening
+
+[ Upstream commit 6a480a7842545ec520a91730209ec0bae41694c1 ]
+
+First of all, trying to open them r/w is idiocy; it's guaranteed to fail.
+Moreover, assigning ->f_pos and assuming that everything will work is
+blatantly broken - try that with e.g. tmpfs as underlying layer and watch
+the fireworks. There may be a non-trivial amount of state associated with
+current IO position, well beyond the numeric offset. Using the single
+struct file associated with underlying inode is really not a good idea;
+we ought to open one for each ecryptfs directory struct file.
+
+Additionally, file_operations both for directories and non-directories are
+full of pointless methods; non-directories should *not* have ->iterate(),
+directories should not have ->flush(), ->fasync() and ->splice_read().
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ecryptfs/file.c | 71 +++++++++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 55 insertions(+), 16 deletions(-)
+
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file
+ .sb = inode->i_sb,
+ };
+ lower_file = ecryptfs_file_to_lower(file);
+- lower_file->f_pos = ctx->pos;
+ rc = iterate_dir(lower_file, &buf.ctx);
+ ctx->pos = buf.ctx.pos;
+ if (rc < 0)
+@@ -236,14 +235,6 @@ static int ecryptfs_open(struct inode *i
+ }
+ ecryptfs_set_file_lower(
+ file, ecryptfs_inode_to_private(inode)->lower_file);
+- if (d_is_dir(ecryptfs_dentry)) {
+- ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
+- mutex_lock(&crypt_stat->cs_mutex);
+- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+- mutex_unlock(&crypt_stat->cs_mutex);
+- rc = 0;
+- goto out;
+- }
+ rc = read_or_initialize_metadata(ecryptfs_dentry);
+ if (rc)
+ goto out_put;
+@@ -260,6 +251,45 @@ out:
+ return rc;
+ }
+
++/**
++ * ecryptfs_dir_open
++ * @inode: inode speciying file to open
++ * @file: Structure to return filled in
++ *
++ * Opens the file specified by inode.
++ *
++ * Returns zero on success; non-zero otherwise
++ */
++static int ecryptfs_dir_open(struct inode *inode, struct file *file)
++{
++ struct dentry *ecryptfs_dentry = file->f_path.dentry;
++ /* Private value of ecryptfs_dentry allocated in
++ * ecryptfs_lookup() */
++ struct ecryptfs_file_info *file_info;
++ struct file *lower_file;
++
++ /* Released in ecryptfs_release or end of function if failure */
++ file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
++ ecryptfs_set_file_private(file, file_info);
++ if (unlikely(!file_info)) {
++ ecryptfs_printk(KERN_ERR,
++ "Error attempting to allocate memory\n");
++ return -ENOMEM;
++ }
++ lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
++ file->f_flags, current_cred());
++ if (IS_ERR(lower_file)) {
++ printk(KERN_ERR "%s: Error attempting to initialize "
++ "the lower file for the dentry with name "
++ "[%pd]; rc = [%ld]\n", __func__,
++ ecryptfs_dentry, PTR_ERR(lower_file));
++ kmem_cache_free(ecryptfs_file_info_cache, file_info);
++ return PTR_ERR(lower_file);
++ }
++ ecryptfs_set_file_lower(file, lower_file);
++ return 0;
++}
++
+ static int ecryptfs_flush(struct file *file, fl_owner_t td)
+ {
+ struct file *lower_file = ecryptfs_file_to_lower(file);
+@@ -280,6 +310,19 @@ static int ecryptfs_release(struct inode
+ return 0;
+ }
+
++static int ecryptfs_dir_release(struct inode *inode, struct file *file)
++{
++ fput(ecryptfs_file_to_lower(file));
++ kmem_cache_free(ecryptfs_file_info_cache,
++ ecryptfs_file_to_private(file));
++ return 0;
++}
++
++static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++ return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
++}
++
+ static int
+ ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+@@ -359,20 +402,16 @@ const struct file_operations ecryptfs_di
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+- .open = ecryptfs_open,
+- .flush = ecryptfs_flush,
+- .release = ecryptfs_release,
++ .open = ecryptfs_dir_open,
++ .release = ecryptfs_dir_release,
+ .fsync = ecryptfs_fsync,
+- .fasync = ecryptfs_fasync,
+- .splice_read = generic_file_splice_read,
+- .llseek = default_llseek,
++ .llseek = ecryptfs_dir_llseek,
+ };
+
+ const struct file_operations ecryptfs_main_fops = {
+ .llseek = generic_file_llseek,
+ .read_iter = ecryptfs_read_update_atime,
+ .write_iter = generic_file_write_iter,
+- .iterate = ecryptfs_readdir,
+ .unlocked_ioctl = ecryptfs_unlocked_ioctl,
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
--- /dev/null
+From eacade1a4dc44974d2f9c734ddfd988d3c772fbd Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Tue, 22 Mar 2016 09:38:18 +1000
+Subject: [PATCH 114/135] drm/radeon/mst: fix regression in lane/link handling.
+
+[ Upstream commit b36f7d26a7fdc0b07b1217368ee09bb8560269f8 ]
+
+The function this used changed in
+ 092c96a8ab9d1bd60ada2ed385cc364ce084180e
+ drm/radeon: fix dp link rate selection (v2)
+
+However for MST we should just always train to the
+max link/rate. Though we probably need to limit this
+for future hw, in theory radeon won't support it.
+
+This fixes my 30" monitor with MST enabled.
+
+Cc: stable@vger.kernel.org # v4.4
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/radeon_dp_mst.c | 12 ++----------
+ 1 file changed, 2 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -525,17 +525,9 @@ static bool radeon_mst_mode_fixup(struct
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ {
+ struct radeon_connector_atom_dig *dig_connector;
+- int ret;
+-
+ dig_connector = mst_enc->connector->con_priv;
+- ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
+- dig_connector->dpcd, adjusted_mode->clock,
+- &dig_connector->dp_lane_count,
+- &dig_connector->dp_clock);
+- if (ret) {
+- dig_connector->dp_lane_count = 0;
+- dig_connector->dp_clock = 0;
+- }
++ dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
++ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
+ DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ dig_connector->dp_lane_count, dig_connector->dp_clock);
+ }
--- /dev/null
+From 99c34101fdc0f25de0f6197d0d4b2056d64585a3 Mon Sep 17 00:00:00 2001
+From: "Manoj N. Kumar" <manoj@linux.vnet.ibm.com>
+Date: Mon, 23 May 2016 14:30:42 -0600
+Subject: [PATCH 115/135] cxlflash: Fix to resolve dead-lock during EEH
+ recovery
+
+[ Upstream commit 635f6b0893cff193a1774881ebb1e4a4b9a7fead ]
+
+When a cxlflash adapter goes into EEH recovery and multiple processes
+(each having established its own context) are active, the EEH recovery
+can hang if the processes attempt to recover in parallel. The symptom
+logged after a couple of minutes is:
+
+INFO: task eehd:48 blocked for more than 120 seconds.
+Not tainted 4.5.0-491-26f710d+ #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+eehd 0 48 2
+Call Trace:
+__switch_to+0x2f0/0x410
+__schedule+0x300/0x980
+schedule+0x48/0xc0
+rwsem_down_write_failed+0x294/0x410
+down_write+0x88/0xb0
+cxlflash_pci_error_detected+0x100/0x1c0 [cxlflash]
+cxl_vphb_error_detected+0x88/0x110 [cxl]
+cxl_pci_error_detected+0xb0/0x1d0 [cxl]
+eeh_report_error+0xbc/0x130
+eeh_pe_dev_traverse+0x94/0x160
+eeh_handle_normal_event+0x17c/0x450
+eeh_handle_event+0x184/0x370
+eeh_event_handler+0x1c8/0x1d0
+kthread+0x110/0x130
+ret_from_kernel_thread+0x5c/0xa4
+INFO: task blockio:33215 blocked for more than 120 seconds.
+
+Not tainted 4.5.0-491-26f710d+ #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+blockio 0 33215 33213
+Call Trace:
+0x1 (unreliable)
+__switch_to+0x2f0/0x410
+__schedule+0x300/0x980
+schedule+0x48/0xc0
+rwsem_down_read_failed+0x124/0x1d0
+down_read+0x68/0x80
+cxlflash_ioctl+0x70/0x6f0 [cxlflash]
+scsi_ioctl+0x3b0/0x4c0
+sg_ioctl+0x960/0x1010
+do_vfs_ioctl+0xd8/0x8c0
+SyS_ioctl+0xd4/0xf0
+system_call+0x38/0xb4
+INFO: task eehd:48 blocked for more than 120 seconds.
+
+The hang is because of a 3 way dead-lock:
+
+Process A holds the recovery mutex, and waits for eehd to complete.
+Process B holds the semaphore and waits for the recovery mutex.
+eehd waits for semaphore.
+
+The fix is to have Process B above release the semaphore before
+attempting to acquire the recovery mutex. This will allow
+eehd to proceed to completion.
+
+Signed-off-by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
+Reviewed-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/cxlflash/superpipe.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/scsi/cxlflash/superpipe.c
++++ b/drivers/scsi/cxlflash/superpipe.c
+@@ -1590,6 +1590,13 @@ err1:
+ * place at the same time and the failure was due to CXL services being
+ * unable to keep up.
+ *
++ * As this routine is called on ioctl context, it holds the ioctl r/w
++ * semaphore that is used to drain ioctls in recovery scenarios. The
++ * implementation to achieve the pacing described above (a local mutex)
++ * requires that the ioctl r/w semaphore be dropped and reacquired to
++ * avoid a 3-way deadlock when multiple process recoveries operate in
++ * parallel.
++ *
+ * Because a user can detect an error condition before the kernel, it is
+ * quite possible for this routine to act as the kernel's EEH detection
+ * source (MMIO read of mbox_r). Because of this, there is a window of
+@@ -1617,9 +1624,17 @@ static int cxlflash_afu_recover(struct s
+ int rc = 0;
+
+ atomic_inc(&cfg->recovery_threads);
++ up_read(&cfg->ioctl_rwsem);
+ rc = mutex_lock_interruptible(mutex);
++ down_read(&cfg->ioctl_rwsem);
+ if (rc)
+ goto out;
++ rc = check_state(cfg);
++ if (rc) {
++ dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
++ rc = -ENODEV;
++ goto out;
++ }
+
+ dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
+ __func__, recover->reason, rctxid);
--- /dev/null
+From 2fe9173cdad3c8bbc3f2b51cedb4e6f6edbf340e Mon Sep 17 00:00:00 2001
+From: Keith Busch <keith.busch@intel.com>
+Date: Thu, 26 May 2016 10:25:51 -0600
+Subject: [PATCH 116/135] blk-mq: End unstarted requests on dying queue
+
+[ Upstream commit a59e0f5795fe52dad42a99c00287e3766153b312 ]
+
+Go directly to ending a request if it wasn't started. Previously, completing a
+request may invoke a driver callback for a request it didn't initialize.
+
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
+Acked-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -601,8 +601,10 @@ static void blk_mq_check_expired(struct
+ * If a request wasn't started before the queue was
+ * marked dying, kill it here or it'll go unnoticed.
+ */
+- if (unlikely(blk_queue_dying(rq->q)))
+- blk_mq_complete_request(rq, -EIO);
++ if (unlikely(blk_queue_dying(rq->q))) {
++ rq->errors = -EIO;
++ blk_mq_end_request(rq, rq->errors);
++ }
+ return;
+ }
+ if (rq->cmd_flags & REQ_NO_TIMEOUT)
--- /dev/null
+From 0994008d55c250e6b9106bae4b01cce793fbdfb2 Mon Sep 17 00:00:00 2001
+From: Zhao Lei <zhaolei@cn.fujitsu.com>
+Date: Fri, 27 May 2016 14:59:00 -0400
+Subject: [PATCH 117/135] btrfs: Continue write in case of can_not_nocow
+
+[ Upstream commit 4da2e26a2a32b174878744bd0f07db180c875f26 ]
+
+btrfs failed in xfstests btrfs/080 with -o nodatacow.
+
+Can be reproduced by following script:
+ DEV=/dev/vdg
+ MNT=/mnt/tmp
+
+ umount $DEV &>/dev/null
+ mkfs.btrfs -f $DEV
+ mount -o nodatacow $DEV $MNT
+
+ dd if=/dev/zero of=$MNT/test bs=1 count=2048 &
+ btrfs subvolume snapshot -r $MNT $MNT/test_snap &
+ wait
+ --
+ We can see dd failed on NO_SPACE.
+
+Reason:
+ __btrfs_buffered_write should run cow write when no_cow impossible,
+ and current code is designed with above logic.
+ But check_can_nocow() have 2 type of return value(0 and <0) on
+ can_not_no_cow, and current code only continue write on first case,
+ the second case happened in doing subvolume.
+
+Fix:
+ Continue write when check_can_nocow() return 0 and <0.
+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Zhao Lei <zhaolei@cn.fujitsu.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/file.c | 37 +++++++++++++++++--------------------
+ 1 file changed, 17 insertions(+), 20 deletions(-)
+
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1526,27 +1526,24 @@ static noinline ssize_t __btrfs_buffered
+
+ reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
+
+- if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+- BTRFS_INODE_PREALLOC)) {
+- ret = check_can_nocow(inode, pos, &write_bytes);
+- if (ret < 0)
+- break;
+- if (ret > 0) {
+- /*
+- * For nodata cow case, no need to reserve
+- * data space.
+- */
+- only_release_metadata = true;
+- /*
+- * our prealloc extent may be smaller than
+- * write_bytes, so scale down.
+- */
+- num_pages = DIV_ROUND_UP(write_bytes + offset,
+- PAGE_CACHE_SIZE);
+- reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
+- goto reserve_metadata;
+- }
++ if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
++ BTRFS_INODE_PREALLOC)) &&
++ check_can_nocow(inode, pos, &write_bytes) > 0) {
++ /*
++ * For nodata cow case, no need to reserve
++ * data space.
++ */
++ only_release_metadata = true;
++ /*
++ * our prealloc extent may be smaller than
++ * write_bytes, so scale down.
++ */
++ num_pages = DIV_ROUND_UP(write_bytes + offset,
++ PAGE_CACHE_SIZE);
++ reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
++ goto reserve_metadata;
+ }
++
+ ret = btrfs_check_data_free_space(inode, pos, write_bytes);
+ if (ret < 0)
+ break;
--- /dev/null
+From 1ebec6da3542d656442d066834cb48c13bae0426 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Fri, 3 Jun 2016 20:38:35 -0600
+Subject: [PATCH 118/135] clocksource: Allow unregistering the watchdog
+
+[ Upstream commit bbf66d897adf2bb0c310db96c97e8db6369f39e1 ]
+
+Hyper-V vmbus module registers TSC page clocksource when loaded. This is
+the clocksource with the highest rating and thus it becomes the watchdog
+making unloading of the vmbus module impossible.
+Separate clocksource_select_watchdog() from clocksource_enqueue_watchdog()
+and use it on clocksource register/rating change/unregister.
+
+After all, lobotomized monkeys may need some love too.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: Dexuan Cui <decui@microsoft.com>
+Cc: K. Y. Srinivasan <kys@microsoft.com>
+Link: http://lkml.kernel.org/r/1453483913-25672-1-git-send-email-vkuznets@redhat.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/clocksource.c | 52 +++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 42 insertions(+), 10 deletions(-)
+
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog
+ /* cs is a watchdog. */
+ if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
+ cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
++ }
++ spin_unlock_irqrestore(&watchdog_lock, flags);
++}
++
++static void clocksource_select_watchdog(bool fallback)
++{
++ struct clocksource *cs, *old_wd;
++ unsigned long flags;
++
++ spin_lock_irqsave(&watchdog_lock, flags);
++ /* save current watchdog */
++ old_wd = watchdog;
++ if (fallback)
++ watchdog = NULL;
++
++ list_for_each_entry(cs, &clocksource_list, list) {
++ /* cs is a clocksource to be watched. */
++ if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
++ continue;
++
++ /* Skip current if we were requested for a fallback. */
++ if (fallback && cs == old_wd)
++ continue;
++
+ /* Pick the best watchdog. */
+- if (!watchdog || cs->rating > watchdog->rating) {
++ if (!watchdog || cs->rating > watchdog->rating)
+ watchdog = cs;
+- /* Reset watchdog cycles */
+- clocksource_reset_watchdog();
+- }
+ }
++ /* If we failed to find a fallback restore the old one. */
++ if (!watchdog)
++ watchdog = old_wd;
++
++ /* If we changed the watchdog we need to reset cycles. */
++ if (watchdog != old_wd)
++ clocksource_reset_watchdog();
++
+ /* Check if the watchdog timer needs to be started. */
+ clocksource_start_watchdog();
+ spin_unlock_irqrestore(&watchdog_lock, flags);
+@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog
+ cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
+ }
+
++static void clocksource_select_watchdog(bool fallback) { }
+ static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
+ static inline void clocksource_resume_watchdog(void) { }
+ static inline int __clocksource_watchdog_kthread(void) { return 0; }
+@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct
+ clocksource_enqueue(cs);
+ clocksource_enqueue_watchdog(cs);
+ clocksource_select();
++ clocksource_select_watchdog(false);
+ mutex_unlock(&clocksource_mutex);
+ return 0;
+ }
+@@ -758,6 +789,7 @@ void clocksource_change_rating(struct cl
+ mutex_lock(&clocksource_mutex);
+ __clocksource_change_rating(cs, rating);
+ clocksource_select();
++ clocksource_select_watchdog(false);
+ mutex_unlock(&clocksource_mutex);
+ }
+ EXPORT_SYMBOL(clocksource_change_rating);
+@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating)
+ */
+ static int clocksource_unbind(struct clocksource *cs)
+ {
+- /*
+- * I really can't convince myself to support this on hardware
+- * designed by lobotomized monkeys.
+- */
+- if (clocksource_is_watchdog(cs))
+- return -EBUSY;
++ if (clocksource_is_watchdog(cs)) {
++ /* Select and try to install a replacement watchdog. */
++ clocksource_select_watchdog(true);
++ if (clocksource_is_watchdog(cs))
++ return -EBUSY;
++ }
+
+ if (cs == curr_clocksource) {
+ /* Select and try to install a replacement clock source */
--- /dev/null
+From b14ba5d5101017dddc1b569f310d52eaa27f7acf Mon Sep 17 00:00:00 2001
+From: Ganapatrao Kulkarni <gkulkarni@caviumnetworks.com>
+Date: Wed, 25 May 2016 15:29:20 +0200
+Subject: [PATCH 119/135] irqchip/gicv3-its: numa: Enable workaround for Cavium
+ thunderx erratum 23144
+
+[ Upstream commit fbf8f40e1658cb2f17452dbd3c708e329c5d27e0 ]
+
+The erratum fixes the hang of ITS SYNC command by avoiding inter node
+io and collections/cpu mapping on thunderx dual-socket platform.
+
+This fix is only applicable for Cavium's ThunderX dual-socket platform.
+
+Reviewed-by: Robert Richter <rrichter@cavium.com>
+Signed-off-by: Ganapatrao Kulkarni <gkulkarni@caviumnetworks.com>
+Signed-off-by: Robert Richter <rrichter@cavium.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig | 9 +++++++
+ drivers/irqchip/irq-gic-v3-its.c | 49 +++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 56 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -391,6 +391,15 @@ config CAVIUM_ERRATUM_22375
+
+ If unsure, say Y.
+
++config CAVIUM_ERRATUM_23144
++ bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
++ depends on NUMA
++ default y
++ help
++ ITS SYNC command hang for cross node io and collections/cpu mapping.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_23154
+ bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
+ default y
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -41,6 +41,7 @@
+
+ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
+ #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
++#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
+
+ #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+
+@@ -71,6 +72,7 @@ struct its_node {
+ struct list_head its_device_list;
+ u64 flags;
+ u32 ite_size;
++ int numa_node;
+ };
+
+ #define ITS_ITT_ALIGN SZ_256
+@@ -600,11 +602,23 @@ static void its_unmask_irq(struct irq_da
+ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+ {
+- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
++ unsigned int cpu;
++ const struct cpumask *cpu_mask = cpu_online_mask;
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_collection *target_col;
+ u32 id = its_get_event_id(d);
+
++ /* lpi cannot be routed to a redistributor that is on a foreign node */
++ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
++ if (its_dev->its->numa_node >= 0) {
++ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
++ if (!cpumask_intersects(mask_val, cpu_mask))
++ return -EINVAL;
++ }
++ }
++
++ cpu = cpumask_any_and(mask_val, cpu_mask);
++
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+@@ -1081,6 +1095,16 @@ static void its_cpu_init_collection(void
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 target;
+
++ /* avoid cross node collections and its mapping */
++ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
++ struct device_node *cpu_node;
++
++ cpu_node = of_get_cpu_node(cpu, NULL);
++ if (its->numa_node != NUMA_NO_NODE &&
++ its->numa_node != of_node_to_nid(cpu_node))
++ continue;
++ }
++
+ /*
+ * We now have to bind each collection to its target
+ * redistributor.
+@@ -1308,9 +1332,14 @@ static void its_irq_domain_activate(stru
+ {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
++ const struct cpumask *cpu_mask = cpu_online_mask;
++
++ /* get the cpu_mask of local node */
++ if (its_dev->its->numa_node >= 0)
++ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+
+ /* Bind the LPI to the first possible CPU */
+- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
++ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
+
+ /* Map the GIC IRQ and event to the device */
+ its_send_mapvi(its_dev, d->hwirq, event);
+@@ -1400,6 +1429,13 @@ static void __maybe_unused its_enable_qu
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
+ }
+
++static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
++{
++ struct its_node *its = data;
++
++ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
++}
++
+ static const struct gic_quirk its_quirks[] = {
+ #ifdef CONFIG_CAVIUM_ERRATUM_22375
+ {
+@@ -1409,6 +1445,14 @@ static const struct gic_quirk its_quirks
+ .init = its_enable_quirk_cavium_22375,
+ },
+ #endif
++#ifdef CONFIG_CAVIUM_ERRATUM_23144
++ {
++ .desc = "ITS: Cavium erratum 23144",
++ .iidr = 0xa100034c, /* ThunderX pass 1.x */
++ .mask = 0xffff0fff,
++ .init = its_enable_quirk_cavium_23144,
++ },
++#endif
+ {
+ }
+ };
+@@ -1470,6 +1514,7 @@ static int its_probe(struct device_node
+ its->base = its_base;
+ its->phys_base = res.start;
+ its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
++ its->numa_node = of_node_to_nid(node);
+
+ its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
+ if (!its->cmd_base) {
--- /dev/null
+From 0bcfdc6ac7ac106c46b1a6c53310b754c58a8a5f Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Fri, 3 Jun 2016 07:42:17 -0600
+Subject: [PATCH 120/135] block: fix blk_rq_get_max_sectors for driver private
+ requests
+
+[ Upstream commit f21018427cb007a0894c36ad702990ab639cbbb4 ]
+
+Driver private request types should not get the artifical cap for the
+FS requests. This is important to use the full device capabilities
+for internal command or NVMe pass through commands.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Jeff Lien <Jeff.Lien@hgst.com>
+Tested-by: Jeff Lien <Jeff.Lien@hgst.com>
+Reviewed-by: Keith Busch <keith.busch@intel.com>
+
+Updated by me to use an explicit check for the one command type that
+does support extended checking, instead of relying on the ordering
+of the enum command values - as suggested by Keith.
+
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/blkdev.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -890,7 +890,7 @@ static inline unsigned int blk_rq_get_ma
+ {
+ struct request_queue *q = rq->q;
+
+- if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
++ if (unlikely(rq->cmd_type != REQ_TYPE_FS))
+ return q->limits.max_hw_sectors;
+
+ if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
--- /dev/null
+From cae45652901c6b14f4307cc02147b44b6b061d2c Mon Sep 17 00:00:00 2001
+From: James Smart <james.smart@broadcom.com>
+Date: Fri, 3 Jun 2016 07:14:08 -0600
+Subject: [PATCH 121/135] lpfc: Fix DMA faults observed upon plugging loopback
+ connector
+
+[ Upstream commit ae09c765109293b600ba9169aa3d632e1ac1a843 ]
+
+Driver didn't program the REG_VFI mailbox correctly, giving the adapter
+bad addresses.
+
+Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
+Signed-off-by: James Smart <james.smart@avagotech.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_mbox.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_mbox.c
++++ b/drivers/scsi/lpfc/lpfc_mbox.c
+@@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, str
+ reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+ reg_vfi->e_d_tov = phba->fc_edtov;
+ reg_vfi->r_a_tov = phba->fc_ratov;
+- reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+- reg_vfi->bde.addrLow = putPaddrLow(phys);
+- reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+- reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
++ if (phys) {
++ reg_vfi->bde.addrHigh = putPaddrHigh(phys);
++ reg_vfi->bde.addrLow = putPaddrLow(phys);
++ reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
++ reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
++ }
+ bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+
+ /* Only FC supports upd bit */
--- /dev/null
+From 1c326060de598cf4c4821e318ad4d3157e8023ba Mon Sep 17 00:00:00 2001
+From: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Date: Tue, 14 Jun 2016 10:55:22 -0700
+Subject: [PATCH 122/135] HID: core: prevent out-of-bound readings
+
+[ Upstream commit 50220dead1650609206efe91f0cc116132d59b3f ]
+
+Plugging a Logitech DJ receiver with KASAN activated raises a bunch of
+out-of-bound readings.
+
+The fields are allocated up to MAX_USAGE, meaning that potentially, we do
+not have enough fields to fit the incoming values.
+Add checks and silence KASAN.
+
+Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/hid-core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1251,6 +1251,7 @@ static void hid_input_field(struct hid_d
+ /* Ignore report if ErrorRollOver */
+ if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
+ value[n] >= min && value[n] <= max &&
++ value[n] - min < field->maxusage &&
+ field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
+ goto exit;
+ }
+@@ -1263,11 +1264,13 @@ static void hid_input_field(struct hid_d
+ }
+
+ if (field->value[n] >= min && field->value[n] <= max
++ && field->value[n] - min < field->maxusage
+ && field->usage[field->value[n] - min].hid
+ && search(value, field->value[n], count))
+ hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
+
+ if (value[n] >= min && value[n] <= max
++ && value[n] - min < field->maxusage
+ && field->usage[value[n] - min].hid
+ && search(field->value, value[n], count))
+ hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
--- /dev/null
+From 890c53b8667ad56511f5bf563db96003660d440f Mon Sep 17 00:00:00 2001
+From: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+Date: Thu, 5 May 2016 11:09:27 -0300
+Subject: [PATCH 123/135] crypto: vmx - comply with ABIs that specify vrsave as
+ reserved.
+
+[ Upstream commit 5ca55738201c7ae1b556ad87bbb22c139ecc01dd ]
+
+It gives significant improvements ( ~+15%) on some modes.
+
+These code has been adopted from OpenSSL project in collaboration
+with the original author (Andy Polyakov <appro@openssl.org>).
+
+Signed-off-by: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/vmx/ppc-xlate.pl | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/crypto/vmx/ppc-xlate.pl
++++ b/drivers/crypto/vmx/ppc-xlate.pl
+@@ -139,6 +139,26 @@ my $vmr = sub {
+ " vor $vx,$vy,$vy";
+ };
+
++# Some ABIs specify vrsave, special-purpose register #256, as reserved
++# for system use.
++my $no_vrsave = ($flavour =~ /aix|linux64le/);
++my $mtspr = sub {
++ my ($f,$idx,$ra) = @_;
++ if ($idx == 256 && $no_vrsave) {
++ " or $ra,$ra,$ra";
++ } else {
++ " mtspr $idx,$ra";
++ }
++};
++my $mfspr = sub {
++ my ($f,$rd,$idx) = @_;
++ if ($idx == 256 && $no_vrsave) {
++ " li $rd,-1";
++ } else {
++ " mfspr $rd,$idx";
++ }
++};
++
+ # PowerISA 2.06 stuff
+ sub vsxmem_op {
+ my ($f, $vrt, $ra, $rb, $op) = @_;
--- /dev/null
+From 081cd5f81ebee304c30421ca6440faefebe7dff6 Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Fri, 10 Jun 2016 16:47:02 +1000
+Subject: [PATCH 124/135] crypto: vmx - Fix ABI detection
+
+[ Upstream commit 975f57fdff1d0eb9816806cabd27162a8a1a4038 ]
+
+When calling ppc-xlate.pl, we pass it either linux-ppc64 or
+linux-ppc64le. The script however was expecting linux64le, a result
+of its OpenSSL origins. This means we aren't obeying the ppc64le
+ABIv2 rules.
+
+Fix this by checking for linux-ppc64le.
+
+Fixes: 5ca55738201c ("crypto: vmx - comply with ABIs that specify vrsave as reserved.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/vmx/ppc-xlate.pl | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/vmx/ppc-xlate.pl
++++ b/drivers/crypto/vmx/ppc-xlate.pl
+@@ -141,7 +141,7 @@ my $vmr = sub {
+
+ # Some ABIs specify vrsave, special-purpose register #256, as reserved
+ # for system use.
+-my $no_vrsave = ($flavour =~ /aix|linux64le/);
++my $no_vrsave = ($flavour =~ /linux-ppc64le/);
+ my $mtspr = sub {
+ my ($f,$idx,$ra) = @_;
+ if ($idx == 256 && $no_vrsave) {
--- /dev/null
+From 89e7ec89ee899bc872879585940304ad1ffcd8a3 Mon Sep 17 00:00:00 2001
+From: Matthias Schwarzott <zzam@gentoo.org>
+Date: Mon, 20 Jun 2016 06:22:12 -0600
+Subject: [PATCH 125/135] tda10071: Fix dependency to REGMAP_I2C
+
+[ Upstream commit b046d3ad38d90276379c862f15ddd99fa8739906 ]
+
+Without I get this error for by dvb-card:
+ tda10071: Unknown symbol devm_regmap_init_i2c (err 0)
+ cx23885_dvb_register() dvb_register failed err = -22
+ cx23885_dev_setup() Failed to register dvb adapters on VID_B
+
+Signed-off-by: Matthias Schwarzott <zzam@gentoo.org>
+Reviewed-by: Antti Palosaari <crope@iki.fi>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/dvb-frontends/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/dvb-frontends/Kconfig
++++ b/drivers/media/dvb-frontends/Kconfig
+@@ -264,7 +264,7 @@ config DVB_MB86A16
+ config DVB_TDA10071
+ tristate "NXP TDA10071"
+ depends on DVB_CORE && I2C
+- select REGMAP
++ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
--- /dev/null
+From 48bf34a48924b9cec19c198dc12dfee3afeb4bca Mon Sep 17 00:00:00 2001
+From: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
+Date: Mon, 27 Jun 2016 09:12:02 -0600
+Subject: [PATCH 126/135] crypto: vmx - IV size failing on skcipher API
+
+[ Upstream commit 0d3d054b43719ef33232677ba27ba6097afdafbc ]
+
+IV size was zero on CBC and CTR modes,
+causing a bug triggered by skcipher.
+
+Fixing this adding a correct size.
+
+Signed-off-by: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
+Signed-off-by: Paulo Smorigo <pfsmorigo@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/vmx/aes_cbc.c | 2 +-
+ drivers/crypto/vmx/aes_ctr.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = {
+ .cra_init = p8_aes_cbc_init,
+ .cra_exit = p8_aes_cbc_exit,
+ .cra_blkcipher = {
+- .ivsize = 0,
++ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = p8_aes_cbc_setkey,
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = {
+ .cra_init = p8_aes_ctr_init,
+ .cra_exit = p8_aes_ctr_exit,
+ .cra_blkcipher = {
+- .ivsize = 0,
++ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = p8_aes_ctr_setkey,
--- /dev/null
+From 6fe5c916f1eafe54e2c9350419df6a843d011020 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Fri, 15 Apr 2016 15:50:32 +0200
+Subject: [PATCH 127/135] x86/hyperv: Avoid reporting bogus NMI status for Gen2
+ instances
+
+[ Upstream commit 1e2ae9ec072f3b7887f456426bc2cf23b80f661a ]
+
+Generation2 instances don't support reporting the NMI status on port 0x61,
+read from there returns 'ff' and we end up reporting nonsensical PCI
+error (as there is no PCI bus in these instances) on all NMIs:
+
+ NMI: PCI system error (SERR) for reason ff on CPU 0.
+ Dazed and confused, but trying to continue
+
+Fix the issue by overriding x86_platform.get_nmi_reason. Use 'booted on
+EFI' flag to detect Gen2 instances.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Cathy Avery <cavery@redhat.com>
+Cc: Haiyang Zhang <haiyangz@microsoft.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: K. Y. Srinivasan <kys@microsoft.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: devel@linuxdriverproject.org
+Link: http://lkml.kernel.org/r/1460728232-31433-1-git-send-email-vkuznets@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/mshyperv.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
++static unsigned char hv_get_nmi_reason(void)
++{
++ return 0;
++}
++
+ static void __init ms_hyperv_init_platform(void)
+ {
+ /*
+@@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platfo
+ machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+ #endif
+ mark_tsc_unstable("running on Hyper-V");
++
++ /*
++ * Generation 2 instances don't support reading the NMI status from
++ * 0x61 port.
++ */
++ if (efi_enabled(EFI_BOOT))
++ x86_platform.get_nmi_reason = hv_get_nmi_reason;
+ }
+
+ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
--- /dev/null
+From f184dbed4e1254c42b73f62bc59ae89d513a6aec Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham@cavium.com>
+Date: Mon, 27 Jun 2016 15:30:02 +0530
+Subject: [PATCH 128/135] net: thunderx: Fix link status reporting
+
+[ Upstream commit 3f4c68cfde30caa1f6d8368fd19590671411ade2 ]
+
+Check for SMU RX local/remote faults along with SPU LINK
+status. Otherwise at times link is UP at our end but DOWN
+at link partner's side. Also due to an issue in BGX it's
+rarely seen that initialization doesn't happen properly
+and SMU RX reports faults with everything fine at SPU.
+This patch tries to reinitialize LMAC to fix it.
+
+Also fixed LMAC disable sequence to properly bring down link.
+
+Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
+Signed-off-by: Tao Wang <tao.wang@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 91 ++++++++++++++--------
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.h | 2
+ 2 files changed, 62 insertions(+), 31 deletions(-)
+
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -549,7 +549,9 @@ static int bgx_xaui_check_link(struct lm
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
++ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
++ bgx_reg_modify(bgx, lmacid,
++ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+ if (bgx->use_training) {
+@@ -568,13 +570,6 @@ static int bgx_xaui_check_link(struct lm
+ return -1;
+ }
+
+- /* Wait for MAC RX to be ready */
+- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+- SMU_RX_CTL_STATUS, true)) {
+- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
+- return -1;
+- }
+-
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+@@ -587,29 +582,30 @@ static int bgx_xaui_check_link(struct lm
+ return -1;
+ }
+
+- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+- dev_err(&bgx->pdev->dev, "Receive fault\n");
+- return -1;
+- }
+-
+- /* Receive link is latching low. Force it high and verify it */
+- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+- SPU_STATUS1_RCV_LNK, false)) {
+- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
+- return -1;
+- }
+-
++ /* Clear receive packet disable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+- return 0;
++
++ /* Check for MAC RX faults */
++ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
++ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
++ cfg &= SMU_RX_CTL_STATUS;
++ if (!cfg)
++ return 0;
++
++ /* Rx local/remote fault seen.
++ * Do lmac reinit to see if condition recovers
++ */
++ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
++
++ return -1;
+ }
+
+ static void bgx_poll_for_link(struct work_struct *work)
+ {
+ struct lmac *lmac;
+- u64 link;
++ u64 spu_link, smu_link;
+
+ lmac = container_of(work, struct lmac, dwork.work);
+
+@@ -619,8 +615,11 @@ static void bgx_poll_for_link(struct wor
+ bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false);
+
+- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+- if (link & SPU_STATUS1_RCV_LNK) {
++ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
++ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
++
++ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
++ !(smu_link & SMU_RX_CTL_STATUS)) {
+ lmac->link_up = 1;
+ if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ lmac->last_speed = 40000;
+@@ -634,9 +633,15 @@ static void bgx_poll_for_link(struct wor
+ }
+
+ if (lmac->last_link != lmac->link_up) {
++ if (lmac->link_up) {
++ if (bgx_xaui_check_link(lmac)) {
++ /* Errors, clear link_up state */
++ lmac->link_up = 0;
++ lmac->last_speed = SPEED_UNKNOWN;
++ lmac->last_duplex = DUPLEX_UNKNOWN;
++ }
++ }
+ lmac->last_link = lmac->link_up;
+- if (lmac->link_up)
+- bgx_xaui_check_link(lmac);
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+@@ -708,7 +713,7 @@ static int bgx_lmac_enable(struct bgx *b
+ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+ {
+ struct lmac *lmac;
+- u64 cmrx_cfg;
++ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->check_link) {
+@@ -717,9 +722,33 @@ static void bgx_lmac_disable(struct bgx
+ destroy_workqueue(lmac->check_link);
+ }
+
+- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+- cmrx_cfg &= ~(1 << 15);
+- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
++ /* Disable packet reception */
++ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
++ cfg &= ~CMR_PKT_RX_EN;
++ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
++
++ /* Give chance for Rx/Tx FIFO to get drained */
++ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
++ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
++
++ /* Disable packet transmission */
++ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
++ cfg &= ~CMR_PKT_TX_EN;
++ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
++
++ /* Disable serdes lanes */
++ if (!lmac->is_sgmii)
++ bgx_reg_modify(bgx, lmacid,
++ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
++ else
++ bgx_reg_modify(bgx, lmacid,
++ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
++
++ /* Disable LMAC */
++ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
++ cfg &= ~CMR_EN;
++ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
++
+ bgx_flush_dmac_addrs(bgx, lmacid);
+
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+@@ -41,6 +41,7 @@
+ #define BGX_CMRX_RX_STAT10 0xC0
+ #define BGX_CMRX_RX_BP_DROP 0xC8
+ #define BGX_CMRX_RX_DMAC_CTL 0x0E8
++#define BGX_CMRX_RX_FIFO_LEN 0x108
+ #define BGX_CMR_RX_DMACX_CAM 0x200
+ #define RX_DMACX_CAM_EN BIT_ULL(48)
+ #define RX_DMACX_CAM_LMACID(x) (x << 49)
+@@ -50,6 +51,7 @@
+ #define BGX_CMR_CHAN_MSK_AND 0x450
+ #define BGX_CMR_BIST_STATUS 0x460
+ #define BGX_CMR_RX_LMACS 0x468
++#define BGX_CMRX_TX_FIFO_LEN 0x518
+ #define BGX_CMRX_TX_STAT0 0x600
+ #define BGX_CMRX_TX_STAT1 0x608
+ #define BGX_CMRX_TX_STAT2 0x610
--- /dev/null
+From 3898d97cefa00264fffcbf8d5f50f1a9c2682526 Mon Sep 17 00:00:00 2001
+From: Pavel Rojtberg <rojtberg@gmail.com>
+Date: Fri, 1 Jul 2016 17:32:09 -0400
+Subject: [PATCH 129/135] Input: xpad - move pending clear to the correct
+ location
+
+[ Upstream commit 4efc6939a83c54fb3417541be48991afd0290ba3 ]
+
+otherwise we lose ff commands: https://github.com/paroj/xpad/issues/27
+
+Signed-off-by: Pavel Rojtberg <rojtberg@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/input/joystick/xpad.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -718,6 +718,7 @@ static bool xpad_prepare_next_out_packet
+ if (packet) {
+ memcpy(xpad->odata, packet->data, packet->len);
+ xpad->irq_out->transfer_buffer_length = packet->len;
++ packet->pending = false;
+ return true;
+ }
+
+@@ -757,7 +758,6 @@ static void xpad_irq_out(struct urb *urb
+ switch (status) {
+ case 0:
+ /* success */
+- xpad->out_packets[xpad->last_out_packet].pending = false;
+ xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
+ break;
+
--- /dev/null
+From 7b7f307e0d9a3b69955e1c6ef9c4ca297c35cf5f Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 7 Jul 2016 16:24:28 +0800
+Subject: [PATCH 130/135] drm/i915: Only ignore eDP ports that are connected
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 457c52d87e5dac9a4cf1a6a287e60ea7645067d4 ]
+
+If the VBT says that a certain port should be eDP (and hence fused off
+from HDMI), but in reality it isn't, we need to try and acquire the HDMI
+connection instead. So only trust the VBT edp setting if we can connect
+to an eDP device on that port.
+
+Fixes: d2182a6608 (drm/i915: Don't register HDMI connectors for eDP ports on VLV/CHV)
+References: https://bugs.freedesktop.org/show_bug.cgi?id=96288
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Tested-by: Phidias Chiang <phidias.chiang@canonical.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1464766070-31623-1-git-send-email-chris@chris-wilson.co.uk
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/intel_display.c | 20 ++++++++++----------
+ drivers/gpu/drm/i915/intel_dp.c | 12 ++++++------
+ drivers/gpu/drm/i915/intel_drv.h | 2 +-
+ 3 files changed, 17 insertions(+), 17 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -14160,6 +14160,8 @@ static void intel_setup_outputs(struct d
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
+ } else if (IS_VALLEYVIEW(dev)) {
++ bool has_edp;
++
+ /*
+ * The DP_DETECTED bit is the latched state of the DDC
+ * SDA pin at boot. However since eDP doesn't require DDC
+@@ -14169,19 +14171,17 @@ static void intel_setup_outputs(struct d
+ * eDP ports. Consult the VBT as well as DP_DETECTED to
+ * detect eDP ports.
+ */
+- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
+- !intel_dp_is_edp(dev, PORT_B))
++ has_edp = intel_dp_is_edp(dev, PORT_B);
++ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_edp)
++ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
++ if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && !has_edp)
+ intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
+- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
+- intel_dp_is_edp(dev, PORT_B))
+- intel_dp_init(dev, VLV_DP_B, PORT_B);
+
+- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
+- !intel_dp_is_edp(dev, PORT_C))
++ has_edp = intel_dp_is_edp(dev, PORT_C);
++ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_edp)
++ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
++ if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && !has_edp)
+ intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
+- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
+- intel_dp_is_edp(dev, PORT_C))
+- intel_dp_init(dev, VLV_DP_C, PORT_C);
+
+ if (IS_CHERRYVIEW(dev)) {
+ /* eDP not supported on port D, so don't check VBT */
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -6113,8 +6113,9 @@ intel_dp_init_connector(struct intel_dig
+ return true;
+ }
+
+-void
+-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
++bool intel_dp_init(struct drm_device *dev,
++ int output_reg,
++ enum port port)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port;
+@@ -6124,7 +6125,7 @@ intel_dp_init(struct drm_device *dev, in
+
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
+ if (!intel_dig_port)
+- return;
++ return false;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector)
+@@ -6179,15 +6180,14 @@ intel_dp_init(struct drm_device *dev, in
+ if (!intel_dp_init_connector(intel_dig_port, intel_connector))
+ goto err_init_connector;
+
+- return;
++ return true;
+
+ err_init_connector:
+ drm_encoder_cleanup(encoder);
+ kfree(intel_connector);
+ err_connector_alloc:
+ kfree(intel_dig_port);
+-
+- return;
++ return false;
+ }
+
+ void intel_dp_mst_suspend(struct drm_device *dev)
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1195,7 +1195,7 @@ void intel_csr_ucode_fini(struct drm_dev
+ void assert_csr_loaded(struct drm_i915_private *dev_priv);
+
+ /* intel_dp.c */
+-void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
++bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+ bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+ void intel_dp_set_link_params(struct intel_dp *intel_dp,
--- /dev/null
+From d8cee6a0e346789b7f813fd84f6a8c2a084d15c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Thu, 7 Jul 2016 16:24:29 +0800
+Subject: [PATCH 131/135] drm/i915: Check VBT for port presence in addition to
+ the strap on VLV/CHV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 22f35042593c2b369861f0b9740efb8065a42db0 ]
+
+Apparently some CHV boards failed to hook up the port presence straps
+for HDMI ports as well (earlier we assumed this problem only affected
+eDP ports). So let's check the VBT in addition to the strap, and if
+either one claims that the port is present go ahead and register the
+relevant connector.
+
+While at it, change port D to register DP before HDMI as we do for ports
+B and C since
+commit 457c52d87e5d ("drm/i915: Only ignore eDP ports that are connected")
+
+Also print a debug message when we register a HDMI connector to aid
+in diagnosing missing/incorrect ports. We already had such a print for
+DP/eDP.
+
+v2: Improve the comment in the code a bit, note the port D change in
+ the commit message
+
+Cc: Radoslav Duda <radosd@radosd.com>
+Tested-by: Radoslav Duda <radosd@radosd.com>
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=96321
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1464945463-14364-1-git-send-email-ville.syrjala@linux.intel.com
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_drv.h | 3 ++
+ drivers/gpu/drm/i915/intel_bios.c | 39 +++++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/i915/intel_display.c | 30 ++++++++++++++++++--------
+ drivers/gpu/drm/i915/intel_hdmi.c | 3 ++
+ 4 files changed, 66 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3313,6 +3313,9 @@ static inline bool intel_gmbus_is_forced
+ }
+ extern void intel_i2c_reset(struct drm_device *dev);
+
++/* intel_bios.c */
++bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
++
+ /* intel_opregion.c */
+ #ifdef CONFIG_ACPI
+ extern int intel_opregion_setup(struct drm_device *dev);
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1351,3 +1351,42 @@ intel_parse_bios(struct drm_device *dev)
+
+ return 0;
+ }
++
++/**
++ * intel_bios_is_port_present - is the specified digital port present
++ * @dev_priv: i915 device instance
++ * @port: port to check
++ *
++ * Return true if the device in %port is present.
++ */
++bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
++{
++ static const struct {
++ u16 dp, hdmi;
++ } port_mapping[] = {
++ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
++ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
++ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
++ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
++ };
++ int i;
++
++ /* FIXME maybe deal with port A as well? */
++ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
++ return false;
++
++ if (!dev_priv->vbt.child_dev_num)
++ return false;
++
++ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
++ const union child_device_config *p_child =
++ &dev_priv->vbt.child_dev[i];
++ if ((p_child->common.dvo_port == port_mapping[port].dp ||
++ p_child->common.dvo_port == port_mapping[port].hdmi) &&
++ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
++ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
++ return true;
++ }
++
++ return false;
++}
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -14160,7 +14160,7 @@ static void intel_setup_outputs(struct d
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
+ } else if (IS_VALLEYVIEW(dev)) {
+- bool has_edp;
++ bool has_edp, has_port;
+
+ /*
+ * The DP_DETECTED bit is the latched state of the DDC
+@@ -14170,25 +14170,37 @@ static void intel_setup_outputs(struct d
+ * Thus we can't rely on the DP_DETECTED bit alone to detect
+ * eDP ports. Consult the VBT as well as DP_DETECTED to
+ * detect eDP ports.
++ *
++ * Sadly the straps seem to be missing sometimes even for HDMI
++ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
++ * and VBT for the presence of the port. Additionally we can't
++ * trust the port type the VBT declares as we've seen at least
++ * HDMI ports that the VBT claim are DP or eDP.
+ */
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+- if (I915_READ(VLV_DP_B) & DP_DETECTED || has_edp)
++ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
++ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && !has_edp)
++ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
+
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+- if (I915_READ(VLV_DP_C) & DP_DETECTED || has_edp)
++ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
++ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && !has_edp)
++ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
+
+ if (IS_CHERRYVIEW(dev)) {
+- /* eDP not supported on port D, so don't check VBT */
+- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
+- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
+- if (I915_READ(CHV_DP_D) & DP_DETECTED)
++ /*
++ * eDP not supported on port D,
++ * so no need to worry about it
++ */
++ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
++ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
+ intel_dp_init(dev, CHV_DP_D, PORT_D);
++ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
++ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
+ }
+
+ intel_dsi_init(dev);
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -2030,6 +2030,9 @@ void intel_hdmi_init_connector(struct in
+ enum port port = intel_dig_port->port;
+ uint8_t alternate_ddc_pin;
+
++ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
++ port_name(port));
++
+ drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
--- /dev/null
+From 765bc64e034d268aa130ea5253a7cc322cf87be0 Mon Sep 17 00:00:00 2001
+From: Tedd Ho-Jeong An <tedd.an@intel.com>
+Date: Wed, 13 Jul 2016 16:13:23 +0800
+Subject: [PATCH 132/135] Bluetooth: Add support for Intel Bluetooth device
+ 8265 [8087:0a2b]
+
+[ Upstream commit a0af53b511423cca93900066512379e21586d7dd ]
+
+This patch adds support for Intel Bluetooth device 8265 also known
+as Windstorm Peak (WsP).
+
+T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#= 6 Spd=12 MxCh= 0
+D: Ver= 2.00 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=8087 ProdID=0a2b Rev= 0.10
+C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA
+I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=81(I) Atr=03(Int.) MxPS= 64 Ivl=1ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+
+Signed-off-by: Tedd Ho-Jeong An <tedd.an@intel.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bluetooth/btusb.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2056,12 +2056,13 @@ static int btusb_setup_intel_new(struct
+ return -EINVAL;
+ }
+
+- /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+- * supported by this firmware loading method. This check has been
+- * put in place to ensure correct forward compatibility options
+- * when newer hardware variants come along.
++ /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
++ * and 0x0c (WsP) are supported by this firmware loading method.
++ *
++ * This check has been put in place to ensure correct forward
++ * compatibility options when newer hardware variants come along.
+ */
+- if (ver->hw_variant != 0x0b) {
++ if (ver->hw_variant != 0x0b && ver->hw_variant != 0x0c) {
+ BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+ hdev->name, ver->hw_variant);
+ kfree_skb(skb);
--- /dev/null
+From 857d46ff805805aeb539429216be3db548b03350 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 13 Jul 2016 11:44:54 +0100
+Subject: [PATCH 133/135] netfilter: x_tables: check for size overflow
+
+[ Upstream commit d157bd761585605b7882935ffb86286919f62ea1 ]
+
+Ben Hawkes says:
+ integer overflow in xt_alloc_table_info, which on 32-bit systems can
+ lead to small structure allocation and a copy_from_user based heap
+ corruption.
+
+Reported-by: Ben Hawkes <hawkes@google.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/x_tables.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -897,6 +897,9 @@ struct xt_table_info *xt_alloc_table_inf
+ struct xt_table_info *info = NULL;
+ size_t sz = sizeof(*info) + size;
+
++ if (sz < sizeof(*info))
++ return NULL;
++
+ /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
+ if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
+ return NULL;
--- /dev/null
+From b3295e24834304c804eed1c95fd98ea93b6923ed Mon Sep 17 00:00:00 2001
+From: Kangjie Lu <kangjielu@gmail.com>
+Date: Thu, 14 Jul 2016 15:02:06 +0100
+Subject: [PATCH 134/135] tipc: fix an infoleak in tipc_nl_compat_link_dump
+
+[ Upstream commit 5d2be1422e02ccd697ccfcd45c85b4a26e6178e2 ]
+
+link_info.str is a char array of size 60. Memory after the NULL
+byte is not initialized. Sending the whole object out can cause
+a leak.
+
+Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/netlink_compat.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -574,7 +574,8 @@ static int tipc_nl_compat_link_dump(stru
+
+ link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
+ link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
+- strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
++ nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
++ TIPC_MAX_LINK_NAME);
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
+ &link_info, sizeof(link_info));
--- /dev/null
+From 20521e346df27f4e9d07984e024bf8aad1c9878b Mon Sep 17 00:00:00 2001
+From: Richard Alpe <richard.alpe@ericsson.com>
+Date: Thu, 14 Jul 2016 15:02:07 +0100
+Subject: [PATCH 135/135] tipc: fix nl compat regression for link statistics
+
+[ Upstream commit 55e77a3e8297581c919b45adcc4d0815b69afa84 ]
+
+Fix incorrect use of nla_strlcpy() where the first NLA_HDRLEN bytes
+of the link name where left out.
+
+Making the output of tipc-config -ls look something like:
+Link statistics:
+dcast-link
+1:data0-1.1.2:data0
+1:data0-1.1.3:data0
+
+Also, for the record, the patch that introduce this regression
+claims "Sending the whole object out can cause a leak". Which isn't
+very likely as this is a compat layer, where the data we are parsing
+is generated by us and we know the string to be NULL terminated. But
+you can of course never be to secure.
+
+Fixes: 5d2be1422e02 (tipc: fix an infoleak in tipc_nl_compat_link_dump)
+Signed-off-by: Richard Alpe <richard.alpe@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/netlink_compat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -574,7 +574,7 @@ static int tipc_nl_compat_link_dump(stru
+
+ link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
+ link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
+- nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
++ nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ TIPC_MAX_LINK_NAME);
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
+0001-i40e-Workaround-fix-for-mss-256-issue.patch
+0002-i40evf-handle-many-MAC-filters-correctly.patch
+0003-i40e-i40evf-Fix-RS-bit-update-in-Tx-path-and-disable.patch
+0004-i40e-fix-do-not-sleep-in-netdev_ops.patch
+0005-i40e-Fix-memory-leaks-sideband-filter-programming.patch
+0006-i40e-properly-delete-VF-MAC-filters.patch
+0007-i40e-don-t-add-zero-MAC-filter.patch
+0008-i40evf-check-rings-before-freeing-resources.patch
+0009-i40e-clean-whole-mac-filter-list.patch
+0010-i40e-Fix-Rx-hash-reported-to-the-stack-by-our-driver.patch
+0011-igb-don-t-unmap-NULL-hw_addr.patch
+0012-igb-use-the-correct-i210-register-for-EEMNGCTL.patch
+0013-igb-fix-NULL-derefs-due-to-skipped-SR-IOV-enabling.patch
+0014-ixgbe-Fix-handling-of-NAPI-budget-when-multiple-queu.patch
+0015-e1000-fix-data-race-between-tx_ring-next_to_clean.patch
+0016-e1000e-fix-division-by-zero-on-jumbo-MTUs.patch
+0017-clk-xgene-Fix-divider-with-non-zero-shift-value.patch
+0018-fm10k-do-not-assume-VF-always-has-1-queue.patch
+0019-fm10k-Correct-MTU-for-jumbo-frames.patch
+0020-fm10k-Fix-handling-of-NAPI-budget-when-multiple-queu.patch
+0021-fm10k-reset-max_queues-on-init_hw_vf-failure.patch
+0022-fm10k-always-check-init_hw-for-errors.patch
+0023-fm10k-reinitialize-queuing-scheme-after-calling-init.patch
+0024-fm10k-Cleanup-MSI-X-interrupts-in-case-of-failure.patch
+0025-fm10k-Cleanup-exception-handling-for-mailbox-interru.patch
+0026-cxlflash-a-couple-off-by-one-bugs.patch
+0027-lightnvm-fix-bio-submission-issue.patch
+0028-lightnvm-fix-incorrect-nr_free_blocks-stat.patch
+0029-lightnvm-add-check-after-mempool-allocation.patch
+0030-lightnvm-unlock-rq-and-free-ppa_list-on-submission-f.patch
+0031-lightnvm-fix-locking-and-mempool-in-rrpc_lun_gc.patch
+0032-lightnvm-fix-missing-grown-bad-block-type.patch
+0033-NVMe-fix-build-with-CONFIG_NVM-enabled.patch
+0034-Drivers-hv-util-Increase-the-timeout-for-util-servic.patch
+0035-Drivers-hv-utils-run-polling-callback-always-in-inte.patch
+0036-tools-hv-report-ENOSPC-errors-in-hv_fcopy_daemon.patch
+0037-Drivers-hv-util-catch-allocation-errors.patch
+0038-drivers-hv-cleanup-synic-msrs-if-vmbus-connect-faile.patch
+0039-Drivers-hv-vss-run-only-on-supported-host-versions.patch
+0040-Drivers-hv-vmbus-serialize-process_chn_event-and-vmb.patch
+0041-Drivers-hv-vmbus-fix-rescind-offer-handling-for-devi.patch
+0042-cxl-Fix-possible-idr-warning-when-contexts-are-relea.patch
+0043-cxl-Fix-DSI-misses-when-the-context-owning-task-exit.patch
+0044-cxlflash-Fix-to-resolve-cmd-leak-after-host-reset.patch
+0045-cxlflash-Resolve-oops-in-wait_port_offline.patch
+0046-cxlflash-Enable-device-id-for-future-IBM-CXL-adapter.patch
+0047-cxl-fix-build-for-GCC-4.6.x.patch
+0048-cxl-Enable-PCI-device-ID-for-future-IBM-CXL-adapter.patch
+0049-lpfc-Fix-FCF-Infinite-loop-in-lpfc_sli4_fcf_rr_next_.patch
+0050-lpfc-Fix-the-FLOGI-discovery-logic-to-comply-with-T1.patch
+0051-lpfc-Fix-RegLogin-failed-error-seen-on-Lancer-FC-dur.patch
+0052-lpfc-Fix-driver-crash-when-module-parameter-lpfc_fcp.patch
+0053-lpfc-Fix-crash-in-fcp-command-completion-path.patch
+0054-lpfc-Fix-RDP-Speed-reporting.patch
+0055-lpfc-Fix-RDP-ACC-being-too-long.patch
+0056-lpfc-Fix-mbox-reuse-in-PLOGI-completion.patch
+0057-lpfc-Fix-external-loopback-failure.patch
+0058-qeth-initialize-net_device-with-carrier-off.patch
+0059-s390-cio-fix-measurement-characteristics-memleak.patch
+0060-s390-cio-ensure-consistent-measurement-state.patch
+0061-s390-cio-update-measurement-characteristics.patch
+0062-megaraid-Fix-possible-NULL-pointer-deference-in-mrai.patch
+0063-megaraid_sas-Do-not-allow-PCI-access-during-OCR.patch
+0064-megaraid_sas-Fix-SMAP-issue.patch
+0065-megaraid_sas-Add-an-i-o-barrier.patch
+0066-pwm-fsl-ftm-Fix-clock-enable-disable-when-using-PM.patch
+0067-pwm-lpc32xx-correct-number-of-PWM-channels-from-2-to.patch
+0068-pwm-lpc32xx-fix-and-simplify-duty-cycle-and-period-c.patch
+0069-irqchip-gic-v3-Make-sure-read-from-ICC_IAR1_EL1-is-v.patch
+0070-arm64-KVM-Configure-TCR_EL2.PS-at-runtime.patch
+0071-net-cavium-liquidio-fix-check-for-in-progress-flag.patch
+0072-mpt3sas-A-correction-in-unmap_resources.patch
+0073-mpt3sas-Fix-for-Asynchronous-completion-of-timedout-.patch
+0074-i40e-i40evf-Fix-RSS-rx-flow-hash-configuration-throu.patch
+0075-hrtimer-Catch-illegal-clockids.patch
+0076-drm-i915-bxt-update-list-of-PCIIDs.patch
+0077-drm-i915-skl-Add-missing-SKL-ids.patch
+0078-drm-atomic-Do-not-unset-crtc-when-an-encoder-is-stol.patch
+0079-mmc-sdhci-64-bit-DMA-actually-has-4-byte-alignment.patch
+0080-qla2xxx-Use-ATIO-type-to-send-correct-tmr-response.patch
+0081-drm-amdgpu-fix-dp-link-rate-selection-v2.patch
+0082-drm-radeon-fix-dp-link-rate-selection-v2.patch
+0083-net-thunderx-Fix-for-Qset-error-due-to-CQ-full.patch
+0084-ahci-Workaround-for-ThunderX-Errata-22536.patch
+0085-arm64-Add-workaround-for-Cavium-erratum-27456.patch
+0086-tipc-fix-nullptr-crash-during-subscription-cancel.patch
+0087-drm-i915-Fix-hpd-live-status-bits-for-g4x.patch
+0088-ALSA-hda-add-codec-support-for-Kabylake-display-audi.patch
+0089-sched-numa-Fix-use-after-free-bug-in-the-task_numa_c.patch
+0090-UVC-Add-support-for-R200-depth-camera.patch
+0091-mmc-sdhci-Do-not-BUG-on-invalid-vdd.patch
+0092-net-mlx5e-Don-t-try-to-modify-CQ-moderation-if-it-is.patch
+0093-net-mlx5e-Don-t-modify-CQ-before-it-was-created.patch
+0094-s390-pci_dma-fix-DMA-table-corruption-with-4-TB-main.patch
+0095-arcmsr-fixed-getting-wrong-configuration-data.patch
+0096-arcmsr-fixes-not-release-allocated-resource.patch
+0097-Drivers-hv-vmbus-avoid-infinite-loop-in-init_vp_inde.patch
+0098-Drivers-hv-vmbus-avoid-scheduling-in-interrupt-conte.patch
+0099-Drivers-hv-vmbus-don-t-manipulate-with-clocksources-.patch
+0100-cxlflash-Fix-to-avoid-unnecessary-scan-with-internal.patch
+0101-intel_idle-Support-for-Intel-Xeon-Phi-Processor-x200.patch
+0102-proc-revert-proc-pid-maps-stack-TID-annotation.patch
+0103-s390-crypto-provide-correct-file-mode-at-device-regi.patch
+0104-perf-x86-cqm-Fix-CQM-handling-of-grouping-events-int.patch
+0105-perf-x86-cqm-Fix-CQM-memory-leak-and-notifier-leak.patch
+0106-net-thunderx-Fix-for-multiqset-not-configured-upon-i.patch
+0107-net-thunderx-Fix-receive-packet-stats.patch
+0108-Input-xpad-correctly-handle-concurrent-LED-and-FF-re.patch
+0109-time-Verify-time-values-in-adjtimex-ADJ_SETOFFSET-to.patch
+0110-ntp-Fix-ADJ_SETOFFSET-being-used-w-ADJ_NANO.patch
+0111-drm-Balance-error-path-for-GEM-handle-allocation.patch
+0112-ALSA-hda-add-AMD-Polaris-10-11-AZ-PCI-IDs-with-prope.patch
+0113-ecryptfs-fix-handling-of-directory-opening.patch
+0114-drm-radeon-mst-fix-regression-in-lane-link-handling.patch
+0115-cxlflash-Fix-to-resolve-dead-lock-during-EEH-recover.patch
+0116-blk-mq-End-unstarted-requests-on-dying-queue.patch
+0117-btrfs-Continue-write-in-case-of-can_not_nocow.patch
+0118-clocksource-Allow-unregistering-the-watchdog.patch
+0119-irqchip-gicv3-its-numa-Enable-workaround-for-Cavium-.patch
+0120-block-fix-blk_rq_get_max_sectors-for-driver-private-.patch
+0121-lpfc-Fix-DMA-faults-observed-upon-plugging-loopback-.patch
+0122-HID-core-prevent-out-of-bound-readings.patch
+0123-crypto-vmx-comply-with-ABIs-that-specify-vrsave-as-r.patch
+0124-crypto-vmx-Fix-ABI-detection.patch
+0125-tda10071-Fix-dependency-to-REGMAP_I2C.patch
+0126-crypto-vmx-IV-size-failing-on-skcipher-API.patch
+0127-x86-hyperv-Avoid-reporting-bogus-NMI-status-for-Gen2.patch
+0128-net-thunderx-Fix-link-status-reporting.patch
+0129-Input-xpad-move-pending-clear-to-the-correct-locatio.patch
+0130-drm-i915-Only-ignore-eDP-ports-that-are-connected.patch
+0131-drm-i915-Check-VBT-for-port-presence-in-addition-to-.patch
+0132-Bluetooth-Add-support-for-Intel-Bluetooth-device-826.patch
+0133-netfilter-x_tables-check-for-size-overflow.patch
+0134-tipc-fix-an-infoleak-in-tipc_nl_compat_link_dump.patch
+0135-tipc-fix-nl-compat-regression-for-link-statistics.patch