--- /dev/null
+From 9aed02feae57bf7a40cb04ea0e3017cb7a998db4 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Fri, 27 Jan 2017 10:45:27 -0800
+Subject: ARC: [arcompact] handle unaligned access delay slot corner case
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit 9aed02feae57bf7a40cb04ea0e3017cb7a998db4 upstream.
+
+After emulating an unaligned access in delay slot of a branch, we
+pretend as the delay slot never happened - so return back to actual
+branch target (or next PC if branch was not taken).
+
+Curently we did this by handling STATUS32.DE, we also need to clear the
+BTA.T bit, which is disregarded when returning from original misaligned
+exception, but could cause weirdness if it took the interrupt return
+path (in case interrupt was acive too)
+
+One ARC700 customer ran into this when enabling unaligned access fixup
+for kernel mode accesses as well
+
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/kernel/unaligned.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arc/kernel/unaligned.c
++++ b/arch/arc/kernel/unaligned.c
+@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long addre
+ if (state.fault)
+ goto fault;
+
++ /* clear any remanants of delay slot */
+ if (delay_mode(regs)) {
+- regs->ret = regs->bta;
++ regs->ret = regs->bta ~1U;
+ regs->status32 &= ~STATUS_DE_MASK;
+ } else {
+ regs->ret += state.instr_len;
--- /dev/null
+From 36425cd67052e3becf325fd4d3ba5691791ef7e4 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Tue, 24 Jan 2017 10:23:42 -0800
+Subject: ARC: udelay: fix inline assembler by adding LP_COUNT to clobber list
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit 36425cd67052e3becf325fd4d3ba5691791ef7e4 upstream.
+
+commit 3c7c7a2fc8811bc ("ARC: Don't use "+l" inline asm constraint")
+modified the inline assembly to setup LP_COUNT register manually and NOT
+rely on gcc to do it (with the +l inline assembler contraint hint, now
+being retired in the compiler)
+
+However the fix was flawed as we didn't add LP_COUNT to asm clobber list,
+meaning gcc doesn't know that LP_COUNT or zero-delay-loops are in action
+in the inline asm.
+
+This resulted in some fun - as nested ZOL loops were being generared
+
+| mov lp_count,250000 ;16 # tmp235,
+| lp .L__GCC__LP14 # <======= OUTER LOOP (gcc generated)
+| .L14:
+| ld r2, [r5] # MEM[(volatile u32 *)prephitmp_43], w
+| dmb 1
+| breq r2, -1, @.L21 #, w,,
+| bbit0 r2,1,@.L13 # w,,
+| ld r4,[r7] ;25 # loops_per_jiffy, loops_per_jiffy
+| mpymu r3,r4,r6 #, loops_per_jiffy, tmp234
+|
+| mov lp_count, r3 # <====== INNER LOOP (from inline asm)
+| lp 1f
+| nop
+| 1:
+| nop_s
+| .L__GCC__LP14: ; loop end, start is @.L14 #,
+
+This caused issues with drivers relying on sane behaviour of udelay
+friends.
+
+With LP_COUNT added to clobber list, gcc doesn't generate the outer
+loop in say above case.
+
+Addresses STAR 9001146134
+
+Reported-by: Joao Pinto <jpinto@synopsys.com>
+Fixes: 3c7c7a2fc8811bc ("ARC: Don't use "+l" inline asm constraint")
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/include/asm/delay.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/arc/include/asm/delay.h
++++ b/arch/arc/include/asm/delay.h
+@@ -26,7 +26,9 @@ static inline void __delay(unsigned long
+ " lp 1f \n"
+ " nop \n"
+ "1: \n"
+- : : "r"(loops));
++ :
++ : "r"(loops)
++ : "lp_count");
+ }
+
+ extern void __bad_udelay(void);
--- /dev/null
+From c97c52be78b8463ac5407f1cf1f22f8f6cf93a37 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Einar=20J=C3=B3n?= <tolvupostur@gmail.com>
+Date: Fri, 12 Aug 2016 13:50:41 +0200
+Subject: can: c_can_pci: fix null-pointer-deref in c_can_start() - set device pointer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Einar Jón <tolvupostur@gmail.com>
+
+commit c97c52be78b8463ac5407f1cf1f22f8f6cf93a37 upstream.
+
+The priv->device pointer for c_can_pci is never set, but it is used
+without a NULL check in c_can_start(). Setting it in c_can_pci_probe()
+like c_can_plat_probe() prevents c_can_pci.ko from crashing, with and
+without CONFIG_PM.
+
+This might also cause the pm_runtime_*() functions in c_can.c to
+actually be executed for c_can_pci devices - they are the only other
+place where priv->device is used, but they all contain a null check.
+
+Signed-off-by: Einar Jón <tolvupostur@gmail.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/c_can/c_can_pci.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/can/c_can/c_can_pci.c
++++ b/drivers/net/can/c_can/c_can_pci.c
+@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_de
+
+ dev->irq = pdev->irq;
+ priv->base = addr;
++ priv->device = &pdev->dev;
+
+ if (!c_can_pci_data->freq) {
+ dev_err(&pdev->dev, "no clock frequency defined\n");
--- /dev/null
+From befa60113ce7ea270cb51eada28443ca2756f480 Mon Sep 17 00:00:00 2001
+From: Yegor Yefremov <yegorslists@googlemail.com>
+Date: Wed, 18 Jan 2017 11:35:57 +0100
+Subject: can: ti_hecc: add missing prepare and unprepare of the clock
+
+From: Yegor Yefremov <yegorslists@googlemail.com>
+
+commit befa60113ce7ea270cb51eada28443ca2756f480 upstream.
+
+In order to make the driver work with the common clock framework, this
+patch converts the clk_enable()/clk_disable() to
+clk_prepare_enable()/clk_disable_unprepare().
+
+Also add error checking for clk_prepare_enable().
+
+Signed-off-by: Yegor Yefremov <yegorslists@googlemail.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/ti_hecc.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform
+ netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
+ HECC_DEF_NAPI_WEIGHT);
+
+- clk_enable(priv->clk);
++ err = clk_prepare_enable(priv->clk);
++ if (err) {
++ dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
++ goto probe_exit_clk;
++ }
++
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pdev->dev, "register_candev() failed\n");
+@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platfor
+ struct ti_hecc_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(ndev);
+- clk_disable(priv->clk);
++ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iounmap(priv->base);
+@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platfo
+ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+- clk_disable(priv->clk);
++ clk_disable_unprepare(priv->clk);
+
+ return 0;
+ }
+@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platfor
+ {
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ti_hecc_priv *priv = netdev_priv(dev);
++ int err;
+
+- clk_enable(priv->clk);
++ err = clk_prepare_enable(priv->clk);
++ if (err)
++ return err;
+
+ hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
--- /dev/null
+From c34f078675f505c4437919bb1897b1351f16a050 Mon Sep 17 00:00:00 2001
+From: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
+Date: Fri, 20 Jan 2017 16:28:42 +0200
+Subject: drm/i915: Don't leak edid in intel_crt_detect_ddc()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
+
+commit c34f078675f505c4437919bb1897b1351f16a050 upstream.
+
+In the path where intel_crt_detect_ddc() detects a CRT, if would return
+true without freeing the edid.
+
+Fixes: a2bd1f541f19 ("drm/i915: check whether we actually received an edid in detect_ddc")
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Daniel Vetter <daniel.vetter@intel.com>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: intel-gfx@lists.freedesktop.org
+Signed-off-by: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1484922525-6131-1-git-send-email-ander.conselvan.de.oliveira@intel.com
+(cherry picked from commit c96b63a6a7ac4bd670ec2e663793a9a31418b790)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_crt.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -445,6 +445,7 @@ static bool intel_crt_detect_ddc(struct
+ struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct edid *edid;
+ struct i2c_adapter *i2c;
++ bool ret = false;
+
+ BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+@@ -461,17 +462,17 @@ static bool intel_crt_detect_ddc(struct
+ */
+ if (!is_digital) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+- return true;
++ ret = true;
++ } else {
++ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ }
+-
+- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ }
+
+ kfree(edid);
+
+- return false;
++ return ret;
+ }
+
+ static enum drm_connector_status
--- /dev/null
+From fc791b6335152c5278dc4a4991bcb2d329f806f9 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 13 Oct 2016 18:26:56 +0200
+Subject: IB/ipoib: move back IB LL address into the hard header
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit fc791b6335152c5278dc4a4991bcb2d329f806f9 upstream.
+
+After the commit 9207f9d45b0a ("net: preserve IP control block
+during GSO segmentation"), the GSO CB and the IPoIB CB conflict.
+That destroy the IPoIB address information cached there,
+causing a severe performance regression, as better described here:
+
+http://marc.info/?l=linux-kernel&m=146787279825501&w=2
+
+This change moves the data cached by the IPoIB driver from the
+skb control lock into the IPoIB hard header, as done before
+the commit 936d7de3d736 ("IPoIB: Stop lying about hard_header_len
+and use skb->cb to stash LL addresses").
+In order to avoid GRO issue, on packet reception, the IPoIB driver
+stash into the skb a dummy pseudo header, so that the received
+packets have actually a hard header matching the declared length.
+To avoid changing the connected mode maximum mtu, the allocated
+head buffer size is increased by the pseudo header length.
+
+After this commit, IPoIB performances are back to pre-regression
+value.
+
+v2 -> v3: rebased
+v1 -> v2: avoid changing the max mtu, increasing the head buf size
+
+Fixes: 9207f9d45b0a ("net: preserve IP control block during GSO segmentation")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Vasiliy Tolstov <v.tolstov@selfip.ru>
+Cc: Nikolay Borisov <n.borisov.lkml@gmail.com>
+Cc: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib.h | 20 ++++++---
+ drivers/infiniband/ulp/ipoib/ipoib_cm.c | 15 +++---
+ drivers/infiniband/ulp/ipoib/ipoib_ib.c | 12 ++---
+ drivers/infiniband/ulp/ipoib/ipoib_main.c | 54 +++++++++++++++----------
+ drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 6 +-
+ 5 files changed, 64 insertions(+), 43 deletions(-)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -63,6 +63,8 @@ enum ipoib_flush_level {
+
+ enum {
+ IPOIB_ENCAP_LEN = 4,
++ IPOIB_PSEUDO_LEN = 20,
++ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
+
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
+@@ -131,15 +133,21 @@ struct ipoib_header {
+ u16 reserved;
+ };
+
+-struct ipoib_cb {
+- struct qdisc_skb_cb qdisc_cb;
+- u8 hwaddr[INFINIBAND_ALEN];
++struct ipoib_pseudo_header {
++ u8 hwaddr[INFINIBAND_ALEN];
+ };
+
+-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
++static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
+ {
+- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
+- return (struct ipoib_cb *)skb->cb;
++ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
++
++ /*
++ * only the ipoib header is present now, make room for a dummy
++ * pseudo header and set skb field accordingly
++ */
++ memset(data, 0, IPOIB_PSEUDO_LEN);
++ skb_reset_mac_header(skb);
++ skb_pull(skb, IPOIB_HARD_LEN);
+ }
+
+ /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
+ #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
+ #define IPOIB_CM_RX_UPDATE_MASK (0x3)
+
++#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
++
+ static struct ib_qp_attr ipoib_cm_err_attr = {
+ .qp_state = IB_QPS_ERR
+ };
+@@ -147,15 +149,15 @@ static struct sk_buff *ipoib_cm_alloc_rx
+ struct sk_buff *skb;
+ int i;
+
+- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
++ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
++ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
+ * IP header to a multiple of 16.
+ */
+- skb_reserve(skb, 12);
++ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
+
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
+ DMA_FROM_DEVICE);
+@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_de
+ if (wc->byte_len < IPOIB_CM_COPYBREAK) {
+ int dlen = wc->byte_len;
+
+- small_skb = dev_alloc_skb(dlen + 12);
++ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
+ if (small_skb) {
+- skb_reserve(small_skb, 12);
++ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
+ ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
+ dlen, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(skb, small_skb->data, dlen);
+@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+
+ copied:
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+- skb_reset_mac_header(skb);
+- skb_pull(skb, IPOIB_ENCAP_LEN);
++ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -130,16 +130,15 @@ static struct sk_buff *ipoib_alloc_rx_sk
+
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+
+- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
++ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
+- * header. So we need 4 more bytes to get to 48 and align the
+- * IP header to a multiple of 16.
++ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
++ * 64 bytes aligned
+ */
+- skb_reserve(skb, 4);
++ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
+
+ mapping = priv->rx_ring[id].mapping;
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+@@ -242,8 +241,7 @@ static void ipoib_ib_handle_rx_wc(struct
+ skb_pull(skb, IB_GRH_BYTES);
+
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+- skb_reset_mac_header(skb);
+- skb_pull(skb, IPOIB_ENCAP_LEN);
++ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -850,9 +850,12 @@ static void neigh_add_path(struct sk_buf
+ ipoib_neigh_free(neigh);
+ goto err_drop;
+ }
+- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
++ if (skb_queue_len(&neigh->queue) <
++ IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, IPOIB_PSEUDO_LEN);
+ __skb_queue_tail(&neigh->queue, skb);
+- else {
++ } else {
+ ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
+ skb_queue_len(&neigh->queue));
+ goto err_drop;
+@@ -889,7 +892,7 @@ err_drop:
+ }
+
+ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+- struct ipoib_cb *cb)
++ struct ipoib_pseudo_header *phdr)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_path *path;
+@@ -897,16 +900,18 @@ static void unicast_arp_send(struct sk_b
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+- path = __path_find(dev, cb->hwaddr + 4);
++ path = __path_find(dev, phdr->hwaddr + 4);
+ if (!path || !path->valid) {
+ int new_path = 0;
+
+ if (!path) {
+- path = path_rec_create(dev, cb->hwaddr + 4);
++ path = path_rec_create(dev, phdr->hwaddr + 4);
+ new_path = 1;
+ }
+ if (path) {
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, IPOIB_PSEUDO_LEN);
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+@@ -934,10 +939,12 @@ static void unicast_arp_send(struct sk_b
+ be16_to_cpu(path->pathrec.dlid));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
++ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+ return;
+ } else if ((path->query || !path_rec_start(dev, path)) &&
+ skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, IPOIB_PSEUDO_LEN);
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+@@ -951,13 +958,15 @@ static int ipoib_start_xmit(struct sk_bu
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_neigh *neigh;
+- struct ipoib_cb *cb = ipoib_skb_cb(skb);
++ struct ipoib_pseudo_header *phdr;
+ struct ipoib_header *header;
+ unsigned long flags;
+
++ phdr = (struct ipoib_pseudo_header *) skb->data;
++ skb_pull(skb, sizeof(*phdr));
+ header = (struct ipoib_header *) skb->data;
+
+- if (unlikely(cb->hwaddr[4] == 0xff)) {
++ if (unlikely(phdr->hwaddr[4] == 0xff)) {
+ /* multicast, arrange "if" according to probability */
+ if ((header->proto != htons(ETH_P_IP)) &&
+ (header->proto != htons(ETH_P_IPV6)) &&
+@@ -970,13 +979,13 @@ static int ipoib_start_xmit(struct sk_bu
+ return NETDEV_TX_OK;
+ }
+ /* Add in the P_Key for multicast*/
+- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+- cb->hwaddr[9] = priv->pkey & 0xff;
++ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
++ phdr->hwaddr[9] = priv->pkey & 0xff;
+
+- neigh = ipoib_neigh_get(dev, cb->hwaddr);
++ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (likely(neigh))
+ goto send_using_neigh;
+- ipoib_mcast_send(dev, cb->hwaddr, skb);
++ ipoib_mcast_send(dev, phdr->hwaddr, skb);
+ return NETDEV_TX_OK;
+ }
+
+@@ -985,16 +994,16 @@ static int ipoib_start_xmit(struct sk_bu
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ case htons(ETH_P_TIPC):
+- neigh = ipoib_neigh_get(dev, cb->hwaddr);
++ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (unlikely(!neigh)) {
+- neigh_add_path(skb, cb->hwaddr, dev);
++ neigh_add_path(skb, phdr->hwaddr, dev);
+ return NETDEV_TX_OK;
+ }
+ break;
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_RARP):
+ /* for unicast ARP and RARP should always perform path find */
+- unicast_arp_send(skb, dev, cb);
++ unicast_arp_send(skb, dev, phdr);
+ return NETDEV_TX_OK;
+ default:
+ /* ethertype not supported by IPoIB */
+@@ -1011,11 +1020,13 @@ send_using_neigh:
+ goto unref;
+ }
+ } else if (neigh->ah) {
+- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
++ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
+ goto unref;
+ }
+
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, sizeof(*phdr));
+ spin_lock_irqsave(&priv->lock, flags);
+ __skb_queue_tail(&neigh->queue, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1047,8 +1058,8 @@ static int ipoib_hard_header(struct sk_b
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned len)
+ {
++ struct ipoib_pseudo_header *phdr;
+ struct ipoib_header *header;
+- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+
+ header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+
+@@ -1057,12 +1068,13 @@ static int ipoib_hard_header(struct sk_b
+
+ /*
+ * we don't rely on dst_entry structure, always stuff the
+- * destination address into skb->cb so we can figure out where
++ * destination address into skb hard header so we can figure out where
+ * to send the packet later.
+ */
+- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
++ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
++ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+
+- return sizeof *header;
++ return IPOIB_HARD_LEN;
+ }
+
+ static void ipoib_set_mcast_list(struct net_device *dev)
+@@ -1638,7 +1650,7 @@ void ipoib_setup(struct net_device *dev)
+
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+
+- dev->hard_header_len = IPOIB_ENCAP_LEN;
++ dev->hard_header_len = IPOIB_HARD_LEN;
+ dev->addr_len = INFINIBAND_ALEN;
+ dev->type = ARPHRD_INFINIBAND;
+ dev->tx_queue_len = ipoib_sendq_size * 2;
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -756,9 +756,11 @@ void ipoib_mcast_send(struct net_device
+ __ipoib_mcast_add(dev, mcast);
+ list_add_tail(&mcast->list, &priv->multicast_list);
+ }
+- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
++ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, sizeof(struct ipoib_pseudo_header));
+ skb_queue_tail(&mcast->pkt_queue, skb);
+- else {
++ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
--- /dev/null
+From 059aa734824165507c65fd30a55ff000afd14983 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Sun, 22 Jan 2017 14:04:29 -0500
+Subject: nfs: Don't increment lock sequence ID after NFS4ERR_MOVED
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 059aa734824165507c65fd30a55ff000afd14983 upstream.
+
+Xuan Qi reports that the Linux NFSv4 client failed to lock a file
+that was migrated. The steps he observed on the wire:
+
+1. The client sent a LOCK request to the source server
+2. The source server replied NFS4ERR_MOVED
+3. The client switched to the destination server
+4. The client sent the same LOCK request to the destination
+ server with a bumped lock sequence ID
+5. The destination server rejected the LOCK request with
+ NFS4ERR_BAD_SEQID
+
+RFC 3530 section 8.1.5 provides a list of NFS errors which do not
+bump a lock sequence ID.
+
+However, RFC 3530 is now obsoleted by RFC 7530. In RFC 7530 section
+9.1.7, this list has been updated by the addition of NFS4ERR_MOVED.
+
+Reported-by: Xuan Qi <xuan.qi@oracle.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/nfs4.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/nfs4.h
++++ b/include/linux/nfs4.h
+@@ -266,7 +266,7 @@ enum nfsstat4 {
+
+ static inline bool seqid_mutating_err(u32 err)
+ {
+- /* rfc 3530 section 8.1.5: */
++ /* See RFC 7530, section 9.1.7 */
+ switch (err) {
+ case NFS4ERR_STALE_CLIENTID:
+ case NFS4ERR_STALE_STATEID:
+@@ -275,6 +275,7 @@ static inline bool seqid_mutating_err(u3
+ case NFS4ERR_BADXDR:
+ case NFS4ERR_RESOURCE:
+ case NFS4ERR_NOFILEHANDLE:
++ case NFS4ERR_MOVED:
+ return false;
+ };
+ return true;
--- /dev/null
+From a430607b2ef7c3be090f88c71cfcb1b3988aa7c0 Mon Sep 17 00:00:00 2001
+From: Benjamin Coddington <bcodding@redhat.com>
+Date: Tue, 24 Jan 2017 11:34:20 -0500
+Subject: NFSv4.0: always send mode in SETATTR after EXCLUSIVE4
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+commit a430607b2ef7c3be090f88c71cfcb1b3988aa7c0 upstream.
+
+Some nfsv4.0 servers may return a mode for the verifier following an open
+with EXCLUSIVE4 createmode, but this does not mean the client should skip
+setting the mode in the following SETATTR. It should only do that for
+EXCLUSIVE4_1 or UNGAURDED createmode.
+
+Fixes: 5334c5bdac92 ("NFS: Send attributes in OPEN request for NFS4_CREATE_EXCLUSIVE4_1")
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2422,7 +2422,8 @@ static inline void nfs4_exclusive_attrse
+ sattr->ia_valid |= ATTR_MTIME;
+
+ /* Except MODE, it seems harmless of setting twice. */
+- if ((attrset[1] & FATTR4_WORD1_MODE))
++ if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
++ attrset[1] & FATTR4_WORD1_MODE)
+ sattr->ia_valid &= ~ATTR_MODE;
+
+ if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
--- /dev/null
+From 2ad5d52d42810bed95100a3d912679d8864421ec Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sat, 28 Jan 2017 11:52:02 +0100
+Subject: parisc: Don't use BITS_PER_LONG in userspace-exported swab.h header
+
+From: Helge Deller <deller@gmx.de>
+
+commit 2ad5d52d42810bed95100a3d912679d8864421ec upstream.
+
+In swab.h the "#if BITS_PER_LONG > 32" breaks compiling userspace programs if
+BITS_PER_LONG is #defined by userspace with the sizeof() compiler builtin.
+
+Solve this problem by using __BITS_PER_LONG instead. Since we now
+#include asm/bitsperlong.h avoid further potential userspace pollution
+by moving the #define of SHIFT_PER_LONG to bitops.h which is not
+exported to userspace.
+
+This patch unbreaks compiling qemu on hppa/parisc.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/bitops.h | 8 +++++++-
+ arch/parisc/include/uapi/asm/bitsperlong.h | 2 --
+ arch/parisc/include/uapi/asm/swab.h | 5 +++--
+ 3 files changed, 10 insertions(+), 5 deletions(-)
+
+--- a/arch/parisc/include/asm/bitops.h
++++ b/arch/parisc/include/asm/bitops.h
+@@ -6,7 +6,7 @@
+ #endif
+
+ #include <linux/compiler.h>
+-#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
++#include <asm/types.h>
+ #include <asm/byteorder.h>
+ #include <asm/barrier.h>
+ #include <linux/atomic.h>
+@@ -17,6 +17,12 @@
+ * to include/asm-i386/bitops.h or kerneldoc
+ */
+
++#if __BITS_PER_LONG == 64
++#define SHIFT_PER_LONG 6
++#else
++#define SHIFT_PER_LONG 5
++#endif
++
+ #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
+
+
+--- a/arch/parisc/include/uapi/asm/bitsperlong.h
++++ b/arch/parisc/include/uapi/asm/bitsperlong.h
+@@ -3,10 +3,8 @@
+
+ #if defined(__LP64__)
+ #define __BITS_PER_LONG 64
+-#define SHIFT_PER_LONG 6
+ #else
+ #define __BITS_PER_LONG 32
+-#define SHIFT_PER_LONG 5
+ #endif
+
+ #include <asm-generic/bitsperlong.h>
+--- a/arch/parisc/include/uapi/asm/swab.h
++++ b/arch/parisc/include/uapi/asm/swab.h
+@@ -1,6 +1,7 @@
+ #ifndef _PARISC_SWAB_H
+ #define _PARISC_SWAB_H
+
++#include <asm/bitsperlong.h>
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+
+@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32
+ }
+ #define __arch_swab32 __arch_swab32
+
+-#if BITS_PER_LONG > 32
++#if __BITS_PER_LONG > 32
+ /*
+ ** From "PA-RISC 2.0 Architecture", HP Professional Books.
+ ** See Appendix I page 8 , "Endian Byte Swapping".
+@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64
+ return x;
+ }
+ #define __arch_swab64 __arch_swab64
+-#endif /* BITS_PER_LONG > 32 */
++#endif /* __BITS_PER_LONG > 32 */
+
+ #endif /* _PARISC_SWAB_H */
--- /dev/null
+From b4cfe3971f6eab542dd7ecc398bfa1aeec889934 Mon Sep 17 00:00:00 2001
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Date: Sun, 15 Jan 2017 20:15:00 +0200
+Subject: RDMA/cma: Fix unknown symbol when CONFIG_IPV6 is not enabled
+
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+
+commit b4cfe3971f6eab542dd7ecc398bfa1aeec889934 upstream.
+
+If IPV6 has not been enabled in the underlying kernel, we must avoid
+calling IPV6 procedures in rdma_cm.ko.
+
+This requires using "IS_ENABLED(CONFIG_IPV6)" in "if" statements
+surrounding any code which calls external IPV6 procedures.
+
+In the instance fixed here, procedure cma_bind_addr() called
+ipv6_addr_type() -- which resulted in calling external procedure
+__ipv6_addr_type().
+
+Fixes: 6c26a77124ff ("RDMA/cma: fix IPv6 address resolution")
+Cc: Spencer Baugh <sbaugh@catern.com>
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Reviewed-by: Moni Shoua <monis@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/cma.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2578,7 +2578,8 @@ static int cma_bind_addr(struct rdma_cm_
+ if (!src_addr || !src_addr->sa_family) {
+ src_addr = (struct sockaddr *) &id->route.addr.src_addr;
+ src_addr->sa_family = dst_addr->sa_family;
+- if (dst_addr->sa_family == AF_INET6) {
++ if (IS_ENABLED(CONFIG_IPV6) &&
++ dst_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
+ struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
+ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
--- /dev/null
+From 9dce990d2cf57b5ed4e71a9cdbd7eae4335111ff Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Tue, 24 Jan 2017 08:05:52 +0100
+Subject: s390/ptrace: Preserve previous registers for short regset write
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 9dce990d2cf57b5ed4e71a9cdbd7eae4335111ff upstream.
+
+Ensure that if userspace supplies insufficient data to
+PTRACE_SETREGSET to fill all the registers, the thread's old
+registers are preserved.
+
+convert_vx_to_fp() is adapted to handle only a specified number of
+registers rather than unconditionally handling all of them: other
+callers of this function are adapted appropriately.
+
+Based on an initial patch by Dave Martin.
+
+Reported-by: Dave Martin <Dave.Martin@arm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/ptrace.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_s
+ if (target == current)
+ save_fpu_regs();
+
++ if (MACHINE_HAS_VX)
++ convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
++ else
++ memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
++
+ /* If setting FPC, must validate it first. */
+ if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
+ u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
+@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task
+ if (target == current)
+ save_fpu_regs();
+
++ for (i = 0; i < __NUM_VXRS_LOW; i++)
++ vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
++
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
+ if (rc == 0)
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
mm-mempolicy.c-do-not-put-mempolicy-before-using-its-nodemask.patch
sysctl-fix-proc_doulongvec_ms_jiffies_minmax.patch
isdn-eicon-silence-misleading-array-bounds-warning.patch
+rdma-cma-fix-unknown-symbol-when-config_ipv6-is-not-enabled.patch
+s390-ptrace-preserve-previous-registers-for-short-regset-write.patch
+can-c_can_pci-fix-null-pointer-deref-in-c_can_start-set-device-pointer.patch
+can-ti_hecc-add-missing-prepare-and-unprepare-of-the-clock.patch
+arc-udelay-fix-inline-assembler-by-adding-lp_count-to-clobber-list.patch
+arc-handle-unaligned-access-delay-slot-corner-case.patch
+parisc-don-t-use-bits_per_long-in-userspace-exported-swab.h-header.patch
+nfs-don-t-increment-lock-sequence-id-after-nfs4err_moved.patch
+nfsv4.0-always-send-mode-in-setattr-after-exclusive4.patch
+sunrpc-cleanup-ida-information-when-removing-sunrpc-module.patch
+drm-i915-don-t-leak-edid-in-intel_crt_detect_ddc.patch
+ib-ipoib-move-back-ib-ll-address-into-the-hard-header.patch
--- /dev/null
+From c929ea0b910355e1876c64431f3d5802f95b3d75 Mon Sep 17 00:00:00 2001
+From: Kinglong Mee <kinglongmee@gmail.com>
+Date: Fri, 20 Jan 2017 16:48:39 +0800
+Subject: SUNRPC: cleanup ida information when removing sunrpc module
+
+From: Kinglong Mee <kinglongmee@gmail.com>
+
+commit c929ea0b910355e1876c64431f3d5802f95b3d75 upstream.
+
+After removing sunrpc module, I get many kmemleak information as,
+unreferenced object 0xffff88003316b1e0 (size 544):
+ comm "gssproxy", pid 2148, jiffies 4294794465 (age 4200.081s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<ffffffffb0cfb58a>] kmemleak_alloc+0x4a/0xa0
+ [<ffffffffb03507fe>] kmem_cache_alloc+0x15e/0x1f0
+ [<ffffffffb0639baa>] ida_pre_get+0xaa/0x150
+ [<ffffffffb0639cfd>] ida_simple_get+0xad/0x180
+ [<ffffffffc06054fb>] nlmsvc_lookup_host+0x4ab/0x7f0 [lockd]
+ [<ffffffffc0605e1d>] lockd+0x4d/0x270 [lockd]
+ [<ffffffffc06061e5>] param_set_timeout+0x55/0x100 [lockd]
+ [<ffffffffc06cba24>] svc_defer+0x114/0x3f0 [sunrpc]
+ [<ffffffffc06cbbe7>] svc_defer+0x2d7/0x3f0 [sunrpc]
+ [<ffffffffc06c71da>] rpc_show_info+0x8a/0x110 [sunrpc]
+ [<ffffffffb044a33f>] proc_reg_write+0x7f/0xc0
+ [<ffffffffb038e41f>] __vfs_write+0xdf/0x3c0
+ [<ffffffffb0390f1f>] vfs_write+0xef/0x240
+ [<ffffffffb0392fbd>] SyS_write+0xad/0x130
+ [<ffffffffb0d06c37>] entry_SYSCALL_64_fastpath+0x1a/0xa9
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+I found, the ida information (dynamic memory) isn't cleanup.
+
+Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
+Fixes: 2f048db4680a ("SUNRPC: Add an identifier for struct rpc_clnt")
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sunrpc/clnt.h | 1 +
+ net/sunrpc/clnt.c | 5 +++++
+ net/sunrpc/sunrpc_syms.c | 1 +
+ 3 files changed, 7 insertions(+)
+
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -180,5 +180,6 @@ const char *rpc_peeraddr2str(struct rpc_
+ int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
+
+ const char *rpc_proc_name(const struct rpc_task *task);
++void rpc_cleanup_clids(void);
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_SUNRPC_CLNT_H */
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -337,6 +337,11 @@ out:
+
+ static DEFINE_IDA(rpc_clids);
+
++void rpc_cleanup_clids(void)
++{
++ ida_destroy(&rpc_clids);
++}
++
+ static int rpc_alloc_clid(struct rpc_clnt *clnt)
+ {
+ int clid;
+--- a/net/sunrpc/sunrpc_syms.c
++++ b/net/sunrpc/sunrpc_syms.c
+@@ -119,6 +119,7 @@ out:
+ static void __exit
+ cleanup_sunrpc(void)
+ {
++ rpc_cleanup_clids();
+ rpcauth_remove_module();
+ cleanup_socket_xprt();
+ svc_cleanup_xprt_sock();