]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 11 Apr 2014 05:09:05 +0000 (22:09 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 11 Apr 2014 05:09:05 +0000 (22:09 -0700)
added patches:
bnx2-fix-shutdown-sequence.patch
bonding-set-correct-vlan-id-for-alb-xmit-path.patch
bridge-multicast-add-sanity-check-for-general-query-destination.patch
bridge-multicast-add-sanity-check-for-query-source.patch
bridge-multicast-enable-snooping-on-general-queries-only.patch
call-efx_set_channels-before-efx-type-dimension_resources.patch
eth-fec-fix-lost-promiscuous-mode-after-reconnecting-cable.patch
inet-frag-make-sure-forced-eviction-removes-all.patch
ip6mr-fix-mfc-notification-flags.patch
ip_tunnel-fix-dst-ref-count.patch
ipmr-fix-mfc-notification-flags.patch
ipv6-avoid-unnecessary-temporary-addresses-being-generated.patch
ipv6-don-t-set-dst_nocount-for-remotely-added-routes.patch
ipv6-fix-exthdrs-offload-registration.patch
ipv6-ip6_append_data_mtu-do-not-handle-the-mtu-of-the-second-fragment-properly.patch
ipv6-some-ipv6-statistic-counters-failed-to-disable-bh.patch
isdnloop-several-buffer-overflows.patch
isdnloop-validate-nul-terminated-strings-from-user.patch
net-cdc_ncm-fix-control-message-ordering.patch
net-fix-for-a-race-condition-in-the-inet-frag-code.patch
net-micrel-ks8851-ml-add-vdd-supply-support.patch
net-sctp-fix-skb-leakage-in-cookie-echo-path-of.patch
net-socket-error-on-a-negative-msg_namelen.patch
net-unix-non-blocking-recvmsg-should-not-return.patch
net-vxlan-fix-crash-when-interface-is-created-with-no-group.patch
netlink-don-t-compare-the-nul-termination-in-nla_strcmp.patch
netpoll-fix-the-skb-check-in-pkt_is_ns.patch
pkt_sched-fq-do-not-hold-qdisc-lock-while-allocating.patch
rds-prevent-dereference-of-a-null-device-in-rds_iw_laddr_check.patch
rtnetlink-fix-fdb-notification-flags.patch
skbuff-skb_segment-orphan-frags-before-copying.patch
skbuff-skb_segment-s-frag-nskb_frag.patch
skbuff-skb_segment-s-fskb-list_skb.patch
skbuff-skb_segment-s-skb-head_skb.patch
skbuff-skb_segment-s-skb_frag-frag.patch
tcp-fix-get_timewait4_sock-delay-computation-on-64bit.patch
tcp-syncookies-do-not-use-getnstimeofday.patch
tcp-tcp_release_cb-should-release-socket-ownership.patch
tg3-do-not-include-vlan-acceleration-features-in-vlan_features.patch
tipc-allow-connection-shutdown-callback-to-be-invoked.patch
tipc-don-t-log-disabled-tasklet-handler-errors.patch
tipc-drop-subscriber-connection-id-invalidation.patch
tipc-fix-connection-refcount-leak.patch
tipc-fix-memory-leak-during-module-removal.patch
tipc-fix-spinlock-recursion-bug-for-failed-subscriptions.patch
usbnet-include-wait-queue-head-in-device-structure.patch
vhost-fix-total-length-when-packets-are-too-short.patch
vhost-validate-vhost_get_vq_desc-return-value.patch
virtio-net-correct-error-handling-of-virtqueue_kick.patch
vlan-set-correct-source-mac-address-with-tx-vlan-offload-enabled.patch
vlan-set-hard_header_len-according-to-available-acceleration.patch
vxlan-fix-nonfunctional-neigh_reduce.patch
vxlan-fix-potential-null-dereference-in-arp_reduce.patch
xen-netback-disable-rogue-vif-in-kthread-context.patch
xen-netback-fix-issue-caused-by-using-gso_type-wrongly.patch
xen-netback-remove-pointless-clause-from-if-statement.patch

57 files changed:
queue-3.13/bnx2-fix-shutdown-sequence.patch [new file with mode: 0644]
queue-3.13/bonding-set-correct-vlan-id-for-alb-xmit-path.patch [new file with mode: 0644]
queue-3.13/bridge-multicast-add-sanity-check-for-general-query-destination.patch [new file with mode: 0644]
queue-3.13/bridge-multicast-add-sanity-check-for-query-source.patch [new file with mode: 0644]
queue-3.13/bridge-multicast-enable-snooping-on-general-queries-only.patch [new file with mode: 0644]
queue-3.13/call-efx_set_channels-before-efx-type-dimension_resources.patch [new file with mode: 0644]
queue-3.13/eth-fec-fix-lost-promiscuous-mode-after-reconnecting-cable.patch [new file with mode: 0644]
queue-3.13/inet-frag-make-sure-forced-eviction-removes-all.patch [new file with mode: 0644]
queue-3.13/ip6mr-fix-mfc-notification-flags.patch [new file with mode: 0644]
queue-3.13/ip_tunnel-fix-dst-ref-count.patch [new file with mode: 0644]
queue-3.13/ipmr-fix-mfc-notification-flags.patch [new file with mode: 0644]
queue-3.13/ipv6-avoid-unnecessary-temporary-addresses-being-generated.patch [new file with mode: 0644]
queue-3.13/ipv6-don-t-set-dst_nocount-for-remotely-added-routes.patch [new file with mode: 0644]
queue-3.13/ipv6-fix-exthdrs-offload-registration.patch [new file with mode: 0644]
queue-3.13/ipv6-ip6_append_data_mtu-do-not-handle-the-mtu-of-the-second-fragment-properly.patch [new file with mode: 0644]
queue-3.13/ipv6-some-ipv6-statistic-counters-failed-to-disable-bh.patch [new file with mode: 0644]
queue-3.13/isdnloop-several-buffer-overflows.patch [new file with mode: 0644]
queue-3.13/isdnloop-validate-nul-terminated-strings-from-user.patch [new file with mode: 0644]
queue-3.13/net-cdc_ncm-fix-control-message-ordering.patch [new file with mode: 0644]
queue-3.13/net-fix-for-a-race-condition-in-the-inet-frag-code.patch [new file with mode: 0644]
queue-3.13/net-micrel-ks8851-ml-add-vdd-supply-support.patch [new file with mode: 0644]
queue-3.13/net-sctp-fix-skb-leakage-in-cookie-echo-path-of.patch [new file with mode: 0644]
queue-3.13/net-socket-error-on-a-negative-msg_namelen.patch [new file with mode: 0644]
queue-3.13/net-unix-non-blocking-recvmsg-should-not-return.patch [new file with mode: 0644]
queue-3.13/net-vxlan-fix-crash-when-interface-is-created-with-no-group.patch [new file with mode: 0644]
queue-3.13/netlink-don-t-compare-the-nul-termination-in-nla_strcmp.patch [new file with mode: 0644]
queue-3.13/netpoll-fix-the-skb-check-in-pkt_is_ns.patch [new file with mode: 0644]
queue-3.13/pkt_sched-fq-do-not-hold-qdisc-lock-while-allocating.patch [new file with mode: 0644]
queue-3.13/rds-prevent-dereference-of-a-null-device-in-rds_iw_laddr_check.patch [new file with mode: 0644]
queue-3.13/rtnetlink-fix-fdb-notification-flags.patch [new file with mode: 0644]
queue-3.13/series
queue-3.13/skbuff-skb_segment-orphan-frags-before-copying.patch [new file with mode: 0644]
queue-3.13/skbuff-skb_segment-s-frag-nskb_frag.patch [new file with mode: 0644]
queue-3.13/skbuff-skb_segment-s-fskb-list_skb.patch [new file with mode: 0644]
queue-3.13/skbuff-skb_segment-s-skb-head_skb.patch [new file with mode: 0644]
queue-3.13/skbuff-skb_segment-s-skb_frag-frag.patch [new file with mode: 0644]
queue-3.13/tcp-fix-get_timewait4_sock-delay-computation-on-64bit.patch [new file with mode: 0644]
queue-3.13/tcp-syncookies-do-not-use-getnstimeofday.patch [new file with mode: 0644]
queue-3.13/tcp-tcp_release_cb-should-release-socket-ownership.patch [new file with mode: 0644]
queue-3.13/tg3-do-not-include-vlan-acceleration-features-in-vlan_features.patch [new file with mode: 0644]
queue-3.13/tipc-allow-connection-shutdown-callback-to-be-invoked.patch [new file with mode: 0644]
queue-3.13/tipc-don-t-log-disabled-tasklet-handler-errors.patch [new file with mode: 0644]
queue-3.13/tipc-drop-subscriber-connection-id-invalidation.patch [new file with mode: 0644]
queue-3.13/tipc-fix-connection-refcount-leak.patch [new file with mode: 0644]
queue-3.13/tipc-fix-memory-leak-during-module-removal.patch [new file with mode: 0644]
queue-3.13/tipc-fix-spinlock-recursion-bug-for-failed-subscriptions.patch [new file with mode: 0644]
queue-3.13/usbnet-include-wait-queue-head-in-device-structure.patch [new file with mode: 0644]
queue-3.13/vhost-fix-total-length-when-packets-are-too-short.patch [new file with mode: 0644]
queue-3.13/vhost-validate-vhost_get_vq_desc-return-value.patch [new file with mode: 0644]
queue-3.13/virtio-net-correct-error-handling-of-virtqueue_kick.patch [new file with mode: 0644]
queue-3.13/vlan-set-correct-source-mac-address-with-tx-vlan-offload-enabled.patch [new file with mode: 0644]
queue-3.13/vlan-set-hard_header_len-according-to-available-acceleration.patch [new file with mode: 0644]
queue-3.13/vxlan-fix-nonfunctional-neigh_reduce.patch [new file with mode: 0644]
queue-3.13/vxlan-fix-potential-null-dereference-in-arp_reduce.patch [new file with mode: 0644]
queue-3.13/xen-netback-disable-rogue-vif-in-kthread-context.patch [new file with mode: 0644]
queue-3.13/xen-netback-fix-issue-caused-by-using-gso_type-wrongly.patch [new file with mode: 0644]
queue-3.13/xen-netback-remove-pointless-clause-from-if-statement.patch [new file with mode: 0644]

diff --git a/queue-3.13/bnx2-fix-shutdown-sequence.patch b/queue-3.13/bnx2-fix-shutdown-sequence.patch
new file mode 100644 (file)
index 0000000..7c52a98
--- /dev/null
@@ -0,0 +1,112 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Michael Chan <mchan@broadcom.com>
+Date: Sun, 9 Mar 2014 15:45:32 -0800
+Subject: bnx2: Fix shutdown sequence
+
+From: Michael Chan <mchan@broadcom.com>
+
+[ Upstream commit a8d9bc2e9f5d1c5a25e33cec096d2a1652d3fd52 ]
+
+The pci shutdown handler added in:
+
+    bnx2: Add pci shutdown handler
+    commit 25bfb1dd4ba3b2d9a49ce9d9b0cd7be1840e15ed
+
+created a shutdown down sequence without chip reset if the device was
+never brought up.  This can cause the firmware to shutdown the PHY
+prematurely and cause MMIO read cycles to be unresponsive.  On some
+systems, it may generate NMI in the bnx2's pci shutdown handler.
+
+The fix is to tell the firmware not to shutdown the PHY if there was
+no prior chip reset.
+
+Signed-off-by: Michael Chan <mchan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2.c |   37 +++++++++++++++++++++++++++++++----
+ drivers/net/ethernet/broadcom/bnx2.h |    5 ++++
+ 2 files changed, 38 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -2490,6 +2490,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_da
+       bp->fw_wr_seq++;
+       msg_data |= bp->fw_wr_seq;
++      bp->fw_last_msg = msg_data;
+       bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+@@ -3982,8 +3983,23 @@ bnx2_setup_wol(struct bnx2 *bp)
+                       wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+       }
+-      if (!(bp->flags & BNX2_FLAG_NO_WOL))
+-              bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
++      if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
++              u32 val;
++
++              wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
++              if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
++                      bnx2_fw_sync(bp, wol_msg, 1, 0);
++                      return;
++              }
++              /* Tell firmware not to power down the PHY yet, otherwise
++               * the chip will take a long time to respond to MMIO reads.
++               */
++              val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
++              bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
++                            val | BNX2_PORT_FEATURE_ASF_ENABLED);
++              bnx2_fw_sync(bp, wol_msg, 1, 0);
++              bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
++      }
+ }
+@@ -4015,9 +4031,22 @@ bnx2_set_power_state(struct bnx2 *bp, pc
+                       if (bp->wol)
+                               pci_set_power_state(bp->pdev, PCI_D3hot);
+-              } else {
+-                      pci_set_power_state(bp->pdev, PCI_D3hot);
++                      break;
++
++              }
++              if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
++                      u32 val;
++
++                      /* Tell firmware not to power down the PHY yet,
++                       * otherwise the other port may not respond to
++                       * MMIO reads.
++                       */
++                      val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
++                      val &= ~BNX2_CONDITION_PM_STATE_MASK;
++                      val |= BNX2_CONDITION_PM_STATE_UNPREP;
++                      bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
+               }
++              pci_set_power_state(bp->pdev, PCI_D3hot);
+               /* No more memory access after this point until
+                * device is brought back to D0.
+--- a/drivers/net/ethernet/broadcom/bnx2.h
++++ b/drivers/net/ethernet/broadcom/bnx2.h
+@@ -6890,6 +6890,7 @@ struct bnx2 {
+       u16                     fw_wr_seq;
+       u16                     fw_drv_pulse_wr_seq;
++      u32                     fw_last_msg;
+       int                     rx_max_ring;
+       int                     rx_ring_size;
+@@ -7396,6 +7397,10 @@ struct bnx2_rv2p_fw_file {
+ #define BNX2_CONDITION_MFW_RUN_NCSI            0x00006000
+ #define BNX2_CONDITION_MFW_RUN_NONE            0x0000e000
+ #define BNX2_CONDITION_MFW_RUN_MASK            0x0000e000
++#define BNX2_CONDITION_PM_STATE_MASK           0x00030000
++#define BNX2_CONDITION_PM_STATE_FULL           0x00030000
++#define BNX2_CONDITION_PM_STATE_PREP           0x00020000
++#define BNX2_CONDITION_PM_STATE_UNPREP                 0x00010000
+ #define BNX2_BC_STATE_DEBUG_CMD                       0x1dc
+ #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE     0x42440000
diff --git a/queue-3.13/bonding-set-correct-vlan-id-for-alb-xmit-path.patch b/queue-3.13/bonding-set-correct-vlan-id-for-alb-xmit-path.patch
new file mode 100644 (file)
index 0000000..5dbfce0
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: dingtianhong <dingtianhong@huawei.com>
+Date: Wed, 12 Mar 2014 17:31:59 +0800
+Subject: bonding: set correct vlan id for alb xmit path
+
+From: dingtianhong <dingtianhong@huawei.com>
+
+[ Upstream commit fb00bc2e6cd2046282ba4b03f4fe682aee70b2f8 ]
+
+The commit d3ab3ffd1d728d7ee77340e7e7e2c7cfe6a4013e
+(bonding: use rlb_client_info->vlan_id instead of ->tag)
+remove the rlb_client_info->tag, but occur some issues,
+The vlan_get_tag() will return 0 for success and -EINVAL for
+error, so the client_info->vlan_id always be set to 0 if the
+vlan_get_tag return 0 for success, so the client_info would
+never get a correct vlan id.
+
+We should only set the vlan id to 0 when the vlan_get_tag return error.
+
+Fixes: d3ab3ffd1d7 (bonding: use rlb_client_info->vlan_id instead of ->tag)
+
+CC: Ding Tianhong <dingtianhong@huawei.com>
+CC: Jay Vosburgh <fubar@us.ibm.com>
+CC: Andy Gospodarek <andy@greyhouse.net>
+Signed-off-by: Ding Tianhong <dingtianhong@huawei.com>
+Acked-by: Veaceslav Falico <vfalico@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_alb.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -731,7 +731,7 @@ static struct slave *rlb_choose_channel(
+                       client_info->ntt = 0;
+               }
+-              if (!vlan_get_tag(skb, &client_info->vlan_id))
++              if (vlan_get_tag(skb, &client_info->vlan_id))
+                       client_info->vlan_id = 0;
+               if (!client_info->assigned) {
diff --git a/queue-3.13/bridge-multicast-add-sanity-check-for-general-query-destination.patch b/queue-3.13/bridge-multicast-add-sanity-check-for-general-query-destination.patch
new file mode 100644 (file)
index 0000000..000415f
--- /dev/null
@@ -0,0 +1,75 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: =?UTF-8?q?Linus=20L=C3=BCssing?= <linus.luessing@web.de>
+Date: Mon, 10 Mar 2014 22:25:24 +0100
+Subject: bridge: multicast: add sanity check for general query destination
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Linus=20L=C3=BCssing?= <linus.luessing@web.de>
+
+[ Upstream commit 9ed973cc40c588abeaa58aea0683ea665132d11d ]
+
+General IGMP and MLD queries are supposed to have the multicast
+link-local all-nodes address as their destination according to RFC2236
+section 9, RFC3376 section 4.1.12/9.1, RFC2710 section 8 and RFC3810
+section 5.1.15.
+
+Without this check, such malformed IGMP/MLD queries can result in a
+denial of service: The queries are ignored by most IGMP/MLD listeners
+therefore they will not respond with an IGMP/MLD report. However,
+without this patch these malformed MLD queries would enable the
+snooping part in the bridge code, potentially shutting down the
+according ports towards these hosts for multicast traffic as the
+bridge did not learn about these listeners.
+
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Linus Lüssing <linus.luessing@web.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_multicast.c |   19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1181,6 +1181,14 @@ static int br_ip4_multicast_query(struct
+                           IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
+       }
++      /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
++       * all-systems destination addresses (224.0.0.1) for general queries
++       */
++      if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
++              err = -EINVAL;
++              goto out;
++      }
++
+       br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
+                                   max_delay);
+@@ -1228,6 +1236,7 @@ static int br_ip6_multicast_query(struct
+       unsigned long max_delay;
+       unsigned long now = jiffies;
+       const struct in6_addr *group = NULL;
++      bool is_general_query;
+       int err = 0;
+       spin_lock(&br->multicast_lock);
+@@ -1262,6 +1271,16 @@ static int br_ip6_multicast_query(struct
+               max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
+       }
++      is_general_query = group && ipv6_addr_any(group);
++
++      /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
++       * all-nodes destination address (ff02::1) for general queries
++       */
++      if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
++              err = -EINVAL;
++              goto out;
++      }
++
+       br_multicast_query_received(br, port, &br->ip6_querier,
+                                   !ipv6_addr_any(&ip6h->saddr), max_delay);
diff --git a/queue-3.13/bridge-multicast-add-sanity-check-for-query-source.patch b/queue-3.13/bridge-multicast-add-sanity-check-for-query-source.patch
new file mode 100644 (file)
index 0000000..d0e4c63
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: =?UTF-8?q?Linus=20L=C3=BCssing?= <linus.luessing@web.de>
+Date: Tue, 4 Mar 2014 03:57:35 +0100
+Subject: bridge: multicast: add sanity check for query source
+ addresses
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Linus=20L=C3=BCssing?= <linus.luessing@web.de>
+
+[ Upstream commit 6565b9eeef194afbb3beec80d6dd2447f4091f8c ]
+
+MLD queries are supposed to have an IPv6 link-local source address
+according to RFC2710, section 4 and RFC3810, section 5.1.14. This patch
+adds a sanity check to ignore such broken MLD queries.
+
+Without this check, such malformed MLD queries can result in a
+denial of service: The queries are ignored by any MLD listener
+therefore they will not respond with an MLD report. However,
+without this patch these malformed MLD queries would enable the
+snooping part in the bridge code, potentially shutting down the
+according ports towards these hosts for multicast traffic as the
+bridge did not learn about these listeners.
+
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Linus Lüssing <linus.luessing@web.de>
+Reviewed-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_multicast.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1235,6 +1235,12 @@ static int br_ip6_multicast_query(struct
+           (port && port->state == BR_STATE_DISABLED))
+               goto out;
++      /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
++      if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
++              err = -EINVAL;
++              goto out;
++      }
++
+       if (skb->len == sizeof(*mld)) {
+               if (!pskb_may_pull(skb, sizeof(*mld))) {
+                       err = -EINVAL;
diff --git a/queue-3.13/bridge-multicast-enable-snooping-on-general-queries-only.patch b/queue-3.13/bridge-multicast-enable-snooping-on-general-queries-only.patch
new file mode 100644 (file)
index 0000000..5a3f14d
--- /dev/null
@@ -0,0 +1,62 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: =?UTF-8?q?Linus=20L=C3=BCssing?= <linus.luessing@web.de>
+Date: Mon, 10 Mar 2014 22:25:25 +0100
+Subject: bridge: multicast: enable snooping on general queries only
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Linus=20L=C3=BCssing?= <linus.luessing@web.de>
+
+[ Upstream commit 20a599bec95a52fa72432b2376a2ce47c5bb68fb ]
+
+Without this check someone could easily create a denial of service
+by injecting multicast-specific queries to enable the bridge
+snooping part if no real querier issuing periodic general queries
+is present on the link which would result in the bridge wrongly
+shutting down ports for multicast traffic as the bridge did not learn
+about these listeners.
+
+With this patch the snooping code is enabled upon receiving valid,
+general queries only.
+
+Signed-off-by: Linus Lüssing <linus.luessing@web.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_multicast.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1127,9 +1127,10 @@ static void br_multicast_query_received(
+                                       struct net_bridge_port *port,
+                                       struct bridge_mcast_querier *querier,
+                                       int saddr,
++                                      bool is_general_query,
+                                       unsigned long max_delay)
+ {
+-      if (saddr)
++      if (saddr && is_general_query)
+               br_multicast_update_querier_timer(br, querier, max_delay);
+       else if (timer_pending(&querier->timer))
+               return;
+@@ -1190,7 +1191,7 @@ static int br_ip4_multicast_query(struct
+       }
+       br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
+-                                  max_delay);
++                                  !group, max_delay);
+       if (!group)
+               goto out;
+@@ -1282,7 +1283,8 @@ static int br_ip6_multicast_query(struct
+       }
+       br_multicast_query_received(br, port, &br->ip6_querier,
+-                                  !ipv6_addr_any(&ip6h->saddr), max_delay);
++                                  !ipv6_addr_any(&ip6h->saddr),
++                                  is_general_query, max_delay);
+       if (!group)
+               goto out;
diff --git a/queue-3.13/call-efx_set_channels-before-efx-type-dimension_resources.patch b/queue-3.13/call-efx_set_channels-before-efx-type-dimension_resources.patch
new file mode 100644 (file)
index 0000000..433dc15
--- /dev/null
@@ -0,0 +1,67 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Daniel Pieczko <dpieczko@solarflare.com>
+Date: Tue, 1 Apr 2014 13:10:34 +0100
+Subject: Call efx_set_channels() before efx->type->dimension_resources()
+
+From: Daniel Pieczko <dpieczko@solarflare.com>
+
+[ Upstream commit 52ad762b85ed7947ec9eff6b036eb985352f6874 ]
+
+When using the "separate_tx_channels=1" module parameter, the TX queues are
+initially numbered starting from the first TX-only channel number (after all the
+RX-only channels).  efx_set_channels() renumbers the queues so that they are
+indexed from zero.
+
+On EF10, the TX queues need to be relabelled in this way before calling the
+dimension_resources NIC type operation, otherwise the TX queue PIO buffers can be
+linked to the wrong VIs when using "separate_tx_channels=1".
+
+Added comments to explain UC/WC mappings for PIO buffers
+
+Signed-off-by: Shradha Shah <sshah@solarflare.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/ef10.c |    7 +++++++
+ drivers/net/ethernet/sfc/efx.c  |    3 ++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -555,10 +555,17 @@ static int efx_ef10_dimension_resources(
+        * several of each (in fact that's the only option if host
+        * page size is >4K).  So we may allocate some extra VIs just
+        * for writing PIO buffers through.
++       *
++       * The UC mapping contains (min_vis - 1) complete VIs and the
++       * first half of the next VI.  Then the WC mapping begins with
++       * the second half of this last VI.
+        */
+       uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+                                    ER_DZ_TX_PIOBUF);
+       if (nic_data->n_piobufs) {
++              /* pio_write_vi_base rounds down to give the number of complete
++               * VIs inside the UC mapping.
++               */
+               pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+               wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+                                              nic_data->n_piobufs) *
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -1514,6 +1514,8 @@ static int efx_probe_nic(struct efx_nic
+       if (rc)
+               goto fail1;
++      efx_set_channels(efx);
++
+       rc = efx->type->dimension_resources(efx);
+       if (rc)
+               goto fail2;
+@@ -1524,7 +1526,6 @@ static int efx_probe_nic(struct efx_nic
+               efx->rx_indir_table[i] =
+                       ethtool_rxfh_indir_default(i, efx->rss_spread);
+-      efx_set_channels(efx);
+       netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+       netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
diff --git a/queue-3.13/eth-fec-fix-lost-promiscuous-mode-after-reconnecting-cable.patch b/queue-3.13/eth-fec-fix-lost-promiscuous-mode-after-reconnecting-cable.patch
new file mode 100644 (file)
index 0000000..023cfc8
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Stefan Wahren <stefan.wahren@i2se.com>
+Date: Wed, 12 Mar 2014 11:28:19 +0100
+Subject: eth: fec: Fix lost promiscuous mode after reconnecting cable
+
+From: Stefan Wahren <stefan.wahren@i2se.com>
+
+[ Upstream commit 84fe61821e4ebab6322eeae3f3c27f77f0031978 ]
+
+If the Freescale fec is in promiscuous mode and network cable is
+reconnected then the promiscuous mode get lost. The problem is caused
+by a too soon call of set_multicast_list to re-enable promisc mode.
+The FEC_R_CNTRL register changes are overwritten by fec_restart.
+
+This patch fixes this by moving the call behind the init of FEC_R_CNTRL
+register in fec_restart.
+
+Successful tested on a i.MX28 board.
+
+Signed-off-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int
+       /* Clear any outstanding interrupt. */
+       writel(0xffc00000, fep->hwp + FEC_IEVENT);
+-      /* Setup multicast filter. */
+-      set_multicast_list(ndev);
+-#ifndef CONFIG_M5272
+-      writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+-      writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+-#endif
+-
+       /* Set maximum receive buffer size. */
+       writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+@@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int
+       writel(rcntl, fep->hwp + FEC_R_CNTRL);
++      /* Setup multicast filter. */
++      set_multicast_list(ndev);
++#ifndef CONFIG_M5272
++      writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
++      writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
++#endif
++
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+               /* enable ENET endian swap */
+               ecntl |= (1 << 8);
diff --git a/queue-3.13/inet-frag-make-sure-forced-eviction-removes-all.patch b/queue-3.13/inet-frag-make-sure-forced-eviction-removes-all.patch
new file mode 100644 (file)
index 0000000..b4249ed
--- /dev/null
@@ -0,0 +1,58 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Florian Westphal <fw@strlen.de>
+Date: Thu, 6 Mar 2014 18:06:41 +0100
+Subject: inet: frag: make sure forced eviction removes all
+ frags
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit e588e2f286ed7da011ed357c24c5b9a554e26595 ]
+
+Quoting Alexander Aring:
+  While fragmentation and unloading of 6lowpan module I got this kernel Oops
+  after few seconds:
+
+  BUG: unable to handle kernel paging request at f88bbc30
+  [..]
+  Modules linked in: ipv6 [last unloaded: 6lowpan]
+  Call Trace:
+   [<c012af4c>] ? call_timer_fn+0x54/0xb3
+   [<c012aef8>] ? process_timeout+0xa/0xa
+   [<c012b66b>] run_timer_softirq+0x140/0x15f
+
+Problem is that incomplete frags are still around after unload; when
+their frag expire timer fires, we get crash.
+
+When a netns is removed (also done when unloading module), inet_frag
+calls the evictor with 'force' argument to purge remaining frags.
+
+The evictor loop terminates when accounted memory ('work') drops to 0
+or the lru-list becomes empty.  However, the mem accounting is done
+via percpu counters and may not be accurate, i.e. loop may terminate
+prematurely.
+
+Alter evictor to only stop once the lru list is empty when force is
+requested.
+
+Reported-by: Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
+Reported-by: Alexander Aring <alex.aring@gmail.com>
+Tested-by: Alexander Aring <alex.aring@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_fragment.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -208,7 +208,7 @@ int inet_frag_evictor(struct netns_frags
+       }
+       work = frag_mem_limit(nf) - nf->low_thresh;
+-      while (work > 0) {
++      while (work > 0 || force) {
+               spin_lock(&nf->lru_lock);
+               if (list_empty(&nf->lru_list)) {
diff --git a/queue-3.13/ip6mr-fix-mfc-notification-flags.patch b/queue-3.13/ip6mr-fix-mfc-notification-flags.patch
new file mode 100644 (file)
index 0000000..7d89300
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Wed, 19 Mar 2014 17:47:51 +0100
+Subject: ip6mr: fix mfc notification flags
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit f518338b16038beeb73e155e60d0f70beb9379f4 ]
+
+Commit 812e44dd1829 ("ip6mr: advertise new mfc entries via rtnl") reuses the
+function ip6mr_fill_mroute() to notify mfc events.
+But this function was used only for dump and thus was always setting the
+flag NLM_F_MULTI, which is wrong in case of a single notification.
+
+Libraries like libnl will wait forever for NLMSG_DONE.
+
+CC: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6mr.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
+ }
+ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
+-                           u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
++                           u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
++                           int flags)
+ {
+       struct nlmsghdr *nlh;
+       struct rtmsg *rtm;
+       int err;
+-      nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
++      nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
+       if (nlh == NULL)
+               return -EMSGSIZE;
+@@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6
+       if (skb == NULL)
+               goto errout;
+-      err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
++      err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
+       if (err < 0)
+               goto errout;
+@@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk
+                               if (ip6mr_fill_mroute(mrt, skb,
+                                                     NETLINK_CB(cb->skb).portid,
+                                                     cb->nlh->nlmsg_seq,
+-                                                    mfc, RTM_NEWROUTE) < 0)
++                                                    mfc, RTM_NEWROUTE,
++                                                    NLM_F_MULTI) < 0)
+                                       goto done;
+ next_entry:
+                               e++;
+@@ -2476,7 +2478,8 @@ next_entry:
+                       if (ip6mr_fill_mroute(mrt, skb,
+                                             NETLINK_CB(cb->skb).portid,
+                                             cb->nlh->nlmsg_seq,
+-                                            mfc, RTM_NEWROUTE) < 0) {
++                                            mfc, RTM_NEWROUTE,
++                                            NLM_F_MULTI) < 0) {
+                               spin_unlock_bh(&mfc_unres_lock);
+                               goto done;
+                       }
diff --git a/queue-3.13/ip_tunnel-fix-dst-ref-count.patch b/queue-3.13/ip_tunnel-fix-dst-ref-count.patch
new file mode 100644 (file)
index 0000000..8b7ebfa
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Pravin B Shelar <pshelar@nicira.com>
+Date: Sun, 23 Mar 2014 22:06:36 -0700
+Subject: ip_tunnel: Fix dst ref-count.
+
+From: Pravin B Shelar <pshelar@nicira.com>
+
+[ Upstream commit fbd02dd405d0724a0f25897ed4a6813297c9b96f ]
+
+Commit 10ddceb22ba (ip_tunnel:multicast process cause panic due
+to skb->_skb_refdst NULL pointer) removed dst-drop call from
+ip-tunnel-recv.
+
+Following commit reintroduce dst-drop and fix the original bug by
+checking loopback packet before releasing dst.
+Original bug: https://bugzilla.kernel.org/show_bug.cgi?id=70681
+
+CC: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/gre_demux.c      |    8 ++++++++
+ net/ipv4/ip_tunnel.c      |    3 ---
+ net/ipv4/ip_tunnel_core.c |    1 +
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/gre_demux.c
++++ b/net/ipv4/gre_demux.c
+@@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff
+       int i;
+       bool csum_err = false;
++#ifdef CONFIG_NET_IPGRE_BROADCAST
++      if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
++              /* Looped back packet, drop it! */
++              if (rt_is_output_route(skb_rtable(skb)))
++                      goto drop;
++      }
++#endif
++
+       if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+               goto drop;
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -411,9 +411,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunn
+ #ifdef CONFIG_NET_IPGRE_BROADCAST
+       if (ipv4_is_multicast(iph->daddr)) {
+-              /* Looped back packet, drop it! */
+-              if (rt_is_output_route(skb_rtable(skb)))
+-                      goto drop;
+               tunnel->dev->stats.multicast++;
+               skb->pkt_type = PACKET_BROADCAST;
+       }
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -109,6 +109,7 @@ int iptunnel_pull_header(struct sk_buff
+       secpath_reset(skb);
+       if (!skb->l4_rxhash)
+               skb->rxhash = 0;
++      skb_dst_drop(skb);
+       skb->vlan_tci = 0;
+       skb_set_queue_mapping(skb, 0);
+       skb->pkt_type = PACKET_HOST;
diff --git a/queue-3.13/ipmr-fix-mfc-notification-flags.patch b/queue-3.13/ipmr-fix-mfc-notification-flags.patch
new file mode 100644 (file)
index 0000000..63c752e
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Wed, 19 Mar 2014 17:47:50 +0100
+Subject: ipmr: fix mfc notification flags
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit 65886f439ab0fdc2dff20d1fa87afb98c6717472 ]
+
+Commit 8cd3ac9f9b7b ("ipmr: advertise new mfc entries via rtnl") reuses the
+function ipmr_fill_mroute() to notify mfc events.
+But this function was used only for dump and thus was always setting the
+flag NLM_F_MULTI, which is wrong in case of a single notification.
+
+Libraries like libnl will wait forever for NLMSG_DONE.
+
+CC: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ipmr.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -2253,13 +2253,14 @@ int ipmr_get_route(struct net *net, stru
+ }
+ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+-                          u32 portid, u32 seq, struct mfc_cache *c, int cmd)
++                          u32 portid, u32 seq, struct mfc_cache *c, int cmd,
++                          int flags)
+ {
+       struct nlmsghdr *nlh;
+       struct rtmsg *rtm;
+       int err;
+-      nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
++      nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
+       if (nlh == NULL)
+               return -EMSGSIZE;
+@@ -2327,7 +2328,7 @@ static void mroute_netlink_event(struct
+       if (skb == NULL)
+               goto errout;
+-      err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
++      err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
+       if (err < 0)
+               goto errout;
+@@ -2366,7 +2367,8 @@ static int ipmr_rtm_dumproute(struct sk_
+                               if (ipmr_fill_mroute(mrt, skb,
+                                                    NETLINK_CB(cb->skb).portid,
+                                                    cb->nlh->nlmsg_seq,
+-                                                   mfc, RTM_NEWROUTE) < 0)
++                                                   mfc, RTM_NEWROUTE,
++                                                   NLM_F_MULTI) < 0)
+                                       goto done;
+ next_entry:
+                               e++;
+@@ -2380,7 +2382,8 @@ next_entry:
+                       if (ipmr_fill_mroute(mrt, skb,
+                                            NETLINK_CB(cb->skb).portid,
+                                            cb->nlh->nlmsg_seq,
+-                                           mfc, RTM_NEWROUTE) < 0) {
++                                           mfc, RTM_NEWROUTE,
++                                           NLM_F_MULTI) < 0) {
+                               spin_unlock_bh(&mfc_unres_lock);
+                               goto done;
+                       }
diff --git a/queue-3.13/ipv6-avoid-unnecessary-temporary-addresses-being-generated.patch b/queue-3.13/ipv6-avoid-unnecessary-temporary-addresses-being-generated.patch
new file mode 100644 (file)
index 0000000..a85f03e
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Heiner Kallweit <heiner.kallweit@web.de>
+Date: Wed, 12 Mar 2014 22:13:19 +0100
+Subject: ipv6: Avoid unnecessary temporary addresses being generated
+
+From: Heiner Kallweit <heiner.kallweit@web.de>
+
+[ Upstream commit ecab67015ef6e3f3635551dcc9971cf363cc1cd5 ]
+
+tmp_prefered_lft is an offset to ifp->tstamp, not now. Therefore
+age needs to be added to the condition.
+
+Age calculation in ipv6_create_tempaddr is different from the one
+in addrconf_verify and doesn't consider ADDRCONF_TIMER_FUZZ_MINUS.
+This can cause age in ipv6_create_tempaddr to be less than the one
+in addrconf_verify and therefore unnecessary temporary address to
+be generated.
+Use age calculation as in addrconf_modify to avoid this.
+
+Signed-off-by: Heiner Kallweit <heiner.kallweit@web.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1078,8 +1078,11 @@ retry:
+        * Lifetime is greater than REGEN_ADVANCE time units.  In particular,
+        * an implementation must not create a temporary address with a zero
+        * Preferred Lifetime.
++       * Use age calculation as in addrconf_verify to avoid unnecessary
++       * temporary addresses being generated.
+        */
+-      if (tmp_prefered_lft <= regen_advance) {
++      age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
++      if (tmp_prefered_lft <= regen_advance + age) {
+               in6_ifa_put(ifp);
+               in6_dev_put(idev);
+               ret = -1;
diff --git a/queue-3.13/ipv6-don-t-set-dst_nocount-for-remotely-added-routes.patch b/queue-3.13/ipv6-don-t-set-dst_nocount-for-remotely-added-routes.patch
new file mode 100644 (file)
index 0000000..d9f7fb0
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Thu, 6 Mar 2014 17:51:57 +0100
+Subject: ipv6: don't set DST_NOCOUNT for remotely added routes
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit c88507fbad8055297c1d1e21e599f46960cbee39 ]
+
+DST_NOCOUNT should only be used if an authorized user adds routes
+locally. In case of routes which are added on behalf of router
+advertisments this flag must not get used as it allows an unlimited
+number of routes getting added remotely.
+
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1495,7 +1495,7 @@ int ip6_route_add(struct fib6_config *cf
+       if (!table)
+               goto out;
+-      rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
++      rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+       if (!rt) {
+               err = -ENOMEM;
diff --git a/queue-3.13/ipv6-fix-exthdrs-offload-registration.patch b/queue-3.13/ipv6-fix-exthdrs-offload-registration.patch
new file mode 100644 (file)
index 0000000..1d46774
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Anton Nayshtut <anton@swortex.com>
+Date: Wed, 5 Mar 2014 08:30:08 +0200
+Subject: ipv6: Fix exthdrs offload registration.
+
+From: Anton Nayshtut <anton@swortex.com>
+
+[ Upstream commit d2d273ffabd315eecefce21a4391d44b6e156b73 ]
+
+Without this fix, ipv6_exthdrs_offload_init doesn't register IPPROTO_DSTOPTS
+offload, but returns 0 (as the IPPROTO_ROUTING registration actually succeeds).
+
+This then causes the ipv6_gso_segment to drop IPv6 packets with IPPROTO_DSTOPTS
+header.
+
+The issue detected and the fix verified by running MS HCK Offload LSO test on
+top of QEMU Windows guests, as this test sends IPv6 packets with
+IPPROTO_DSTOPTS.
+
+Signed-off-by: Anton Nayshtut <anton@swortex.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/exthdrs_offload.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/exthdrs_offload.c
++++ b/net/ipv6/exthdrs_offload.c
+@@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(voi
+       int ret;
+       ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
+-      if (!ret)
++      if (ret)
+               goto out;
+       ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
+-      if (!ret)
++      if (ret)
+               goto out_rt;
+ out:
diff --git a/queue-3.13/ipv6-ip6_append_data_mtu-do-not-handle-the-mtu-of-the-second-fragment-properly.patch b/queue-3.13/ipv6-ip6_append_data_mtu-do-not-handle-the-mtu-of-the-second-fragment-properly.patch
new file mode 100644 (file)
index 0000000..945ccd9
--- /dev/null
@@ -0,0 +1,96 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: lucien <lucien.xin@gmail.com>
+Date: Mon, 17 Mar 2014 12:51:01 +0800
+Subject: ipv6: ip6_append_data_mtu do not handle the mtu of the second fragment properly
+
+From: lucien <lucien.xin@gmail.com>
+
+[ Upstream commit e367c2d03dba4c9bcafad24688fadb79dd95b218 ]
+
+In ip6_append_data_mtu(), when the xfrm mode is not tunnel(such as
+transport),the ipsec header need to be added in the first fragment, so the mtu
+will decrease to reserve space for it, then the second fragment come, the mtu
+should be turn back, as the commit 0c1833797a5a6ec23ea9261d979aa18078720b74
+said.  however, in the commit a493e60ac4bbe2e977e7129d6d8cbb0dd236be, it use
+*mtu = min(*mtu, ...) to change the mtu, which lead to the new mtu is alway
+equal with the first fragment's. and cannot turn back.
+
+when I test through  ping6 -c1 -s5000 $ip (mtu=1280):
+...frag (0|1232) ESP(spi=0x00002000,seq=0xb), length 1232
+...frag (1232|1216)
+...frag (2448|1216)
+...frag (3664|1216)
+...frag (4880|164)
+
+which should be:
+...frag (0|1232) ESP(spi=0x00001000,seq=0x1), length 1232
+...frag (1232|1232)
+...frag (2464|1232)
+...frag (3696|1232)
+...frag (4928|116)
+
+so delete the min() when change back the mtu.
+
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Fixes: 75a493e60ac4bb ("ipv6: ip6_append_data_mtu did not care about pmtudisc and frag_size")
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c |   14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1088,21 +1088,19 @@ static void ip6_append_data_mtu(unsigned
+                               unsigned int fragheaderlen,
+                               struct sk_buff *skb,
+                               struct rt6_info *rt,
+-                              bool pmtuprobe)
++                              unsigned int orig_mtu)
+ {
+       if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+               if (skb == NULL) {
+                       /* first fragment, reserve header_len */
+-                      *mtu = *mtu - rt->dst.header_len;
++                      *mtu = orig_mtu - rt->dst.header_len;
+               } else {
+                       /*
+                        * this fragment is not first, the headers
+                        * space is regarded as data space.
+                        */
+-                      *mtu = min(*mtu, pmtuprobe ?
+-                                 rt->dst.dev->mtu :
+-                                 dst_mtu(rt->dst.path));
++                      *mtu = orig_mtu;
+               }
+               *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+                             + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1119,7 +1117,7 @@ int ip6_append_data(struct sock *sk, int
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct inet_cork *cork;
+       struct sk_buff *skb, *skb_prev = NULL;
+-      unsigned int maxfraglen, fragheaderlen, mtu;
++      unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+       int exthdrlen;
+       int dst_exthdrlen;
+       int hh_len;
+@@ -1201,6 +1199,7 @@ int ip6_append_data(struct sock *sk, int
+               dst_exthdrlen = 0;
+               mtu = cork->fragsize;
+       }
++      orig_mtu = mtu;
+       hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+@@ -1298,8 +1297,7 @@ alloc_new_skb:
+                       if (skb == NULL || skb_prev == NULL)
+                               ip6_append_data_mtu(&mtu, &maxfraglen,
+                                                   fragheaderlen, skb, rt,
+-                                                  np->pmtudisc ==
+-                                                  IPV6_PMTUDISC_PROBE);
++                                                  orig_mtu);
+                       skb_prev = skb;
diff --git a/queue-3.13/ipv6-some-ipv6-statistic-counters-failed-to-disable-bh.patch b/queue-3.13/ipv6-some-ipv6-statistic-counters-failed-to-disable-bh.patch
new file mode 100644 (file)
index 0000000..29d0d1c
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Mon, 31 Mar 2014 20:14:10 +0200
+Subject: ipv6: some ipv6 statistic counters failed to disable bh
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 43a43b6040165f7b40b5b489fe61a4cb7f8c4980 ]
+
+After commit c15b1ccadb323ea ("ipv6: move DAD and addrconf_verify
+processing to workqueue") some counters are now updated in process context
+and thus need to disable bh before doing so, otherwise deadlocks can
+happen on 32-bit archs. Fabio Estevam noticed this while while mounting
+a NFS volume on an ARM board.
+
+As a compensation for missing this I looked after the other *_STATS_BH
+and found three other calls which need updating:
+
+1) icmp6_send: ip6_fragment -> icmpv6_send -> icmp6_send (error handling)
+2) ip6_push_pending_frames: rawv6_sendmsg -> rawv6_push_pending_frames -> ...
+   (only in case of icmp protocol with raw sockets in error handling)
+3) ping6_v6_sendmsg (error handling)
+
+Fixes: c15b1ccadb323ea ("ipv6: move DAD and addrconf_verify processing to workqueue")
+Reported-by: Fabio Estevam <festevam@gmail.com>
+Tested-by: Fabio Estevam <fabio.estevam@freescale.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/icmp.c       |    2 +-
+ net/ipv6/ip6_output.c |    4 ++--
+ net/ipv6/mcast.c      |   11 ++++++-----
+ net/ipv6/ping.c       |    4 ++--
+ 4 files changed, 11 insertions(+), 10 deletions(-)
+
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -516,7 +516,7 @@ static void icmp6_send(struct sk_buff *s
+                             np->tclass, NULL, &fl6, (struct rt6_info *)dst,
+                             MSG_DONTWAIT, np->dontfrag);
+       if (err) {
+-              ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
++              ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
+               ip6_flush_pending_frames(sk);
+       } else {
+               err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1553,8 +1553,8 @@ int ip6_push_pending_frames(struct sock
+       if (proto == IPPROTO_ICMPV6) {
+               struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+-              ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
+-              ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
++              ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
++              ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+       }
+       err = ip6_local_out(skb);
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1620,11 +1620,12 @@ static void mld_sendpack(struct sk_buff
+                     dst_output);
+ out:
+       if (!err) {
+-              ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
+-              ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+-              IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+-      } else
+-              IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
++              ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
++              ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
++              IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
++      } else {
++              IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++      }
+       rcu_read_unlock();
+       return;
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb,
+                             MSG_DONTWAIT, np->dontfrag);
+       if (err) {
+-              ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
+-                                 ICMP6_MIB_OUTERRORS);
++              ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
++                              ICMP6_MIB_OUTERRORS);
+               ip6_flush_pending_frames(sk);
+       } else {
+               err = icmpv6_push_pending_frames(sk, &fl6,
diff --git a/queue-3.13/isdnloop-several-buffer-overflows.patch b/queue-3.13/isdnloop-several-buffer-overflows.patch
new file mode 100644 (file)
index 0000000..ef680c1
--- /dev/null
@@ -0,0 +1,95 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 8 Apr 2014 12:23:09 +0300
+Subject: isdnloop: several buffer overflows
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 7563487cbf865284dcd35e9ef5a95380da046737 ]
+
+There are three buffer overflows addressed in this patch.
+
+1) In isdnloop_fake_err() we add an 'E' to a 60 character string and
+then copy it into a 60 character buffer.  I have made the destination
+buffer 64 characters and I'm changed the sprintf() to a snprintf().
+
+2) In isdnloop_parse_cmd(), p points to a 6 characters into a 60
+character buffer so we have 54 characters.  The ->eazlist[] is 11
+characters long.  I have modified the code to return if the source
+buffer is too long.
+
+3) In isdnloop_command() the cbuf[] array was 60 characters long but the
+max length of the string then can be up to 79 characters.  I made the
+cbuf array 80 characters long and changed the sprintf() to snprintf().
+I also removed the temporary "dial" buffer and changed it to use "p"
+directly.
+
+Unfortunately, we pass the "cbuf" string from isdnloop_command() to
+isdnloop_writecmd() which truncates anything over 60 characters to make
+it fit in card->omsg[].  (It can accept values up to 255 characters so
+long as there is a '\n' character every 60 characters).  For now I have
+just fixed the memory corruption bug and left the other problems in this
+driver alone.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/isdn/isdnloop/isdnloop.c |   17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[
+ static void
+ isdnloop_fake_err(isdnloop_card *card)
+ {
+-      char buf[60];
++      char buf[64];
+-      sprintf(buf, "E%s", card->omsg);
++      snprintf(buf, sizeof(buf), "E%s", card->omsg);
+       isdnloop_fake(card, buf, -1);
+       isdnloop_fake(card, "NAK", -1);
+ }
+@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
+       case 7:
+               /* 0x;EAZ */
+               p += 3;
++              if (strlen(p) >= sizeof(card->eazlist[0]))
++                      break;
+               strcpy(card->eazlist[ch - 1], p);
+               break;
+       case 8:
+@@ -1133,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_
+ {
+       ulong a;
+       int i;
+-      char cbuf[60];
++      char cbuf[80];
+       isdn_ctrl cmd;
+       isdnloop_cdef cdef;
+@@ -1198,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_
+                       break;
+               if ((c->arg & 255) < ISDNLOOP_BCH) {
+                       char *p;
+-                      char dial[50];
+                       char dcode[4];
+                       a = c->arg;
+@@ -1210,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_
+                       } else
+                               /* Normal Dial */
+                               strcpy(dcode, "CAL");
+-                      strcpy(dial, p);
+-                      sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+-                              dcode, dial, c->parm.setup.si1,
+-                              c->parm.setup.si2, c->parm.setup.eazmsn);
++                      snprintf(cbuf, sizeof(cbuf),
++                               "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
++                               dcode, p, c->parm.setup.si1,
++                               c->parm.setup.si2, c->parm.setup.eazmsn);
+                       i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
+               }
+               break;
diff --git a/queue-3.13/isdnloop-validate-nul-terminated-strings-from-user.patch b/queue-3.13/isdnloop-validate-nul-terminated-strings-from-user.patch
new file mode 100644 (file)
index 0000000..a45595e
--- /dev/null
@@ -0,0 +1,34 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+Date: Wed, 2 Apr 2014 12:48:42 +0900
+Subject: isdnloop: Validate NUL-terminated strings from user.
+
+From: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+
+[ Upstream commit 77bc6bed7121936bb2e019a8c336075f4c8eef62 ]
+
+Return -EINVAL unless all of user-given strings are correctly
+NUL-terminated.
+
+Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/isdn/isdnloop/isdnloop.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -1070,6 +1070,12 @@ isdnloop_start(isdnloop_card *card, isdn
+               return -EBUSY;
+       if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
+               return -EFAULT;
++
++      for (i = 0; i < 3; i++) {
++              if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
++                      return -EINVAL;
++      }
++
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
+       switch (sdef.ptype) {
+       case ISDN_PTYPE_EURO:
diff --git a/queue-3.13/net-cdc_ncm-fix-control-message-ordering.patch b/queue-3.13/net-cdc_ncm-fix-control-message-ordering.patch
new file mode 100644 (file)
index 0000000..986e779
--- /dev/null
@@ -0,0 +1,151 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+Date: Mon, 17 Mar 2014 16:25:18 +0100
+Subject: net: cdc_ncm: fix control message ordering
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+
+[ Upstream commit ff0992e9036e9810e7cd45234fa32ca1e79750e2 ]
+
+This is a context modified revert of commit 6a9612e2cb22
+("net: cdc_ncm: remove ncm_parm field") which introduced
+a NCM specification violation, causing setup errors for
+some devices. These errors resulted in the device and
+host disagreeing about shared settings, with complete
+failure to communicate as the end result.
+
+The NCM specification require that many of the NCM specific
+control reuests are sent only while the NCM Data Interface
+is in alternate setting 0. Reverting the commit ensures that
+we follow this requirement.
+
+Fixes: 6a9612e2cb22 ("net: cdc_ncm: remove ncm_parm field")
+Reported-and-tested-by: Pasi Kärkkäinen <pasik@iki.fi>
+Reported-by: Thomas Schäfer <tschaefer@t-online.de>
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/cdc_ncm.c   |   48 +++++++++++++++++++++-----------------------
+ include/linux/usb/cdc_ncm.h |    1 
+ 2 files changed, 24 insertions(+), 25 deletions(-)
+
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -69,7 +69,6 @@ static struct usb_driver cdc_ncm_driver;
+ static int cdc_ncm_setup(struct usbnet *dev)
+ {
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+-      struct usb_cdc_ncm_ntb_parameters ncm_parm;
+       u32 val;
+       u8 flags;
+       u8 iface_no;
+@@ -83,22 +82,22 @@ static int cdc_ncm_setup(struct usbnet *
+       err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
+                             USB_TYPE_CLASS | USB_DIR_IN
+                             |USB_RECIP_INTERFACE,
+-                            0, iface_no, &ncm_parm,
+-                            sizeof(ncm_parm));
++                            0, iface_no, &ctx->ncm_parm,
++                            sizeof(ctx->ncm_parm));
+       if (err < 0) {
+               dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
+               return err; /* GET_NTB_PARAMETERS is required */
+       }
+       /* read correct set of parameters according to device mode */
+-      ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
+-      ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
+-      ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
+-      ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
+-      ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
++      ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
++      ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
++      ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
++      ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
++      ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+       /* devices prior to NCM Errata shall set this field to zero */
+-      ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
+-      ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
++      ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
++      ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
+       /* there are some minor differences in NCM and MBIM defaults */
+       if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
+@@ -147,7 +146,7 @@ static int cdc_ncm_setup(struct usbnet *
+       }
+       /* inform device about NTB input size changes */
+-      if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
++      if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+               __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+               err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+@@ -163,14 +162,6 @@ static int cdc_ncm_setup(struct usbnet *
+               dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
+                       CDC_NCM_NTB_MAX_SIZE_TX);
+               ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+-
+-              /* Adding a pad byte here simplifies the handling in
+-               * cdc_ncm_fill_tx_frame, by making tx_max always
+-               * represent the real skb max size.
+-               */
+-              if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+-                      ctx->tx_max++;
+-
+       }
+       /*
+@@ -440,6 +431,10 @@ advance:
+               goto error2;
+       }
++      /* initialize data interface */
++      if (cdc_ncm_setup(dev))
++              goto error2;
++
+       /* configure data interface */
+       temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
+       if (temp) {
+@@ -454,12 +449,6 @@ advance:
+               goto error2;
+       }
+-      /* initialize data interface */
+-      if (cdc_ncm_setup(dev)) {
+-              dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
+-              goto error2;
+-      }
+-
+       usb_set_intfdata(ctx->data, dev);
+       usb_set_intfdata(ctx->control, dev);
+@@ -476,6 +465,15 @@ advance:
+       dev->hard_mtu = ctx->tx_max;
+       dev->rx_urb_size = ctx->rx_max;
++      /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
++       * outside the sane range. Adding a pad byte here if necessary
++       * simplifies the handling in cdc_ncm_fill_tx_frame, making
++       * tx_max always represent the real skb max size.
++       */
++      if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
++          ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
++              ctx->tx_max++;
++
+       return 0;
+ error2:
+--- a/include/linux/usb/cdc_ncm.h
++++ b/include/linux/usb/cdc_ncm.h
+@@ -88,6 +88,7 @@
+ #define cdc_ncm_data_intf_is_mbim(x)  ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
+ struct cdc_ncm_ctx {
++      struct usb_cdc_ncm_ntb_parameters ncm_parm;
+       struct hrtimer tx_timer;
+       struct tasklet_struct bh;
diff --git a/queue-3.13/net-fix-for-a-race-condition-in-the-inet-frag-code.patch b/queue-3.13/net-fix-for-a-race-condition-in-the-inet-frag-code.patch
new file mode 100644 (file)
index 0000000..51f232b
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Nikolay Aleksandrov <nikolay@redhat.com>
+Date: Mon, 3 Mar 2014 23:19:18 +0100
+Subject: net: fix for a race condition in the inet frag code
+
+From: Nikolay Aleksandrov <nikolay@redhat.com>
+
+[ Upstream commit 24b9bf43e93e0edd89072da51cf1fab95fc69dec ]
+
+I stumbled upon this very serious bug while hunting for another one,
+it's a very subtle race condition between inet_frag_evictor,
+inet_frag_intern and the IPv4/6 frag_queue and expire functions
+(basically the users of inet_frag_kill/inet_frag_put).
+
+What happens is that after a fragment has been added to the hash chain
+but before it's been added to the lru_list (inet_frag_lru_add) in
+inet_frag_intern, it may get deleted (either by an expired timer if
+the system load is high or the timer sufficiently low, or by the
+fraq_queue function for different reasons) before it's added to the
+lru_list, then after it gets added it's a matter of time for the
+evictor to get to a piece of memory which has been freed leading to a
+number of different bugs depending on what's left there.
+
+I've been able to trigger this on both IPv4 and IPv6 (which is normal
+as the frag code is the same), but it's been much more difficult to
+trigger on IPv4 due to the protocol differences about how fragments
+are treated.
+
+The setup I used to reproduce this is: 2 machines with 4 x 10G bonded
+in a RR bond, so the same flow can be seen on multiple cards at the
+same time. Then I used multiple instances of ping/ping6 to generate
+fragmented packets and flood the machines with them while running
+other processes to load the attacked machine.
+
+*It is very important to have the _same flow_ coming in on multiple CPUs
+concurrently. Usually the attacked machine would die in less than 30
+minutes, if configured properly to have many evictor calls and timeouts
+it could happen in 10 minutes or so.
+
+An important point to make is that any caller (frag_queue or timer) of
+inet_frag_kill will remove both the timer refcount and the
+original/guarding refcount thus removing everything that's keeping the
+frag from being freed at the next inet_frag_put.  All of this could
+happen before the frag was ever added to the LRU list, then it gets
+added and the evictor uses a freed fragment.
+
+An example for IPv6 would be if a fragment is being added and is at
+the stage of being inserted in the hash after the hash lock is
+released, but before inet_frag_lru_add executes (or is able to obtain
+the lru lock) another overlapping fragment for the same flow arrives
+at a different CPU which finds it in the hash, but since it's
+overlapping it drops it invoking inet_frag_kill and thus removing all
+guarding refcounts, and afterwards freeing it by invoking
+inet_frag_put which removes the last refcount added previously by
+inet_frag_find, then inet_frag_lru_add gets executed by
+inet_frag_intern and we have a freed fragment in the lru_list.
+
+The fix is simple, just move the lru_add under the hash chain locked
+region so when a removing function is called it'll have to wait for
+the fragment to be added to the lru_list, and then it'll remove it (it
+works because the hash chain removal is done before the lru_list one
+and there's no window between the two list adds when the frag can get
+dropped). With this fix applied I couldn't kill the same machine in 24
+hours with the same setup.
+
+Fixes: 3ef0eb0db4bf ("net: frag, move LRU list maintenance outside of
+rwlock")
+
+CC: Florian Westphal <fw@strlen.de>
+CC: Jesper Dangaard Brouer <brouer@redhat.com>
+CC: David S. Miller <davem@davemloft.net>
+
+Signed-off-by: Nikolay Aleksandrov <nikolay@redhat.com>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_fragment.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag
+       atomic_inc(&qp->refcnt);
+       hlist_add_head(&qp->list, &hb->chain);
++      inet_frag_lru_add(nf, qp);
+       spin_unlock(&hb->chain_lock);
+       read_unlock(&f->lock);
+-      inet_frag_lru_add(nf, qp);
++
+       return qp;
+ }
diff --git a/queue-3.13/net-micrel-ks8851-ml-add-vdd-supply-support.patch b/queue-3.13/net-micrel-ks8851-ml-add-vdd-supply-support.patch
new file mode 100644 (file)
index 0000000..7295848
--- /dev/null
@@ -0,0 +1,105 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Nishanth Menon <nm@ti.com>
+Date: Fri, 21 Mar 2014 01:52:48 -0500
+Subject: net: micrel : ks8851-ml: add vdd-supply support
+
+From: Nishanth Menon <nm@ti.com>
+
+[ Upstream commit ebf4ad955d3e26d4d2a33709624fc7b5b9d3b969 ]
+
+Few platforms use external regulator to keep the ethernet MAC supplied.
+So, request and enable the regulator for driver functionality.
+
+Fixes: 66fda75f47dc (regulator: core: Replace direct ops->disable usage)
+Reported-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Suggested-by: Markus Pargmann <mpa@pengutronix.de>
+Signed-off-by: Nishanth Menon <nm@ti.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/net/micrel-ks8851.txt |    1 
+ drivers/net/ethernet/micrel/ks8851.c                    |   30 +++++++++++++++-
+ 2 files changed, 30 insertions(+), 1 deletion(-)
+
+--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
++++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+@@ -7,3 +7,4 @@ Required properties:
+ Optional properties:
+ - local-mac-address : Ethernet mac address to use
++- vdd-supply: supply for Ethernet mac
+--- a/drivers/net/ethernet/micrel/ks8851.c
++++ b/drivers/net/ethernet/micrel/ks8851.c
+@@ -23,6 +23,7 @@
+ #include <linux/crc32.h>
+ #include <linux/mii.h>
+ #include <linux/eeprom_93cx6.h>
++#include <linux/regulator/consumer.h>
+ #include <linux/spi/spi.h>
+@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
+  * @rc_rxqcr: Cached copy of KS_RXQCR.
+  * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+  * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
++ * @vdd_reg:  Optional regulator supplying the chip
+  *
+  * The @lock ensures that the chip is protected when certain operations are
+  * in progress. When the read or write packet transfer is in progress, most
+@@ -130,6 +132,7 @@ struct ks8851_net {
+       struct spi_transfer     spi_xfer2[2];
+       struct eeprom_93cx6     eeprom;
++      struct regulator        *vdd_reg;
+ };
+ static int msg_enable;
+@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_devic
+       ks->spidev = spi;
+       ks->tx_space = 6144;
++      ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
++      if (IS_ERR(ks->vdd_reg)) {
++              ret = PTR_ERR(ks->vdd_reg);
++              if (ret == -EPROBE_DEFER)
++                      goto err_reg;
++      } else {
++              ret = regulator_enable(ks->vdd_reg);
++              if (ret) {
++                      dev_err(&spi->dev, "regulator enable fail: %d\n",
++                              ret);
++                      goto err_reg_en;
++              }
++      }
++
++
+       mutex_init(&ks->lock);
+       spin_lock_init(&ks->statelock);
+@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_devic
+ err_netdev:
+       free_irq(ndev->irq, ks);
+-err_id:
+ err_irq:
++err_id:
++      if (!IS_ERR(ks->vdd_reg))
++              regulator_disable(ks->vdd_reg);
++err_reg_en:
++      if (!IS_ERR(ks->vdd_reg))
++              regulator_put(ks->vdd_reg);
++err_reg:
+       free_netdev(ndev);
+       return ret;
+ }
+@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_devi
+       unregister_netdev(priv->netdev);
+       free_irq(spi->irq, priv);
++      if (!IS_ERR(priv->vdd_reg)) {
++              regulator_disable(priv->vdd_reg);
++              regulator_put(priv->vdd_reg);
++      }
+       free_netdev(priv->netdev);
+       return 0;
diff --git a/queue-3.13/net-sctp-fix-skb-leakage-in-cookie-echo-path-of.patch b/queue-3.13/net-sctp-fix-skb-leakage-in-cookie-echo-path-of.patch
new file mode 100644 (file)
index 0000000..7c9af76
--- /dev/null
@@ -0,0 +1,121 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Tue, 4 Mar 2014 16:35:51 +0100
+Subject: net: sctp: fix skb leakage in COOKIE ECHO path of
+ chunk->auth_chunk
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit c485658bae87faccd7aed540fd2ca3ab37992310 ]
+
+While working on ec0223ec48a9 ("net: sctp: fix sctp_sf_do_5_1D_ce to
+verify if we/peer is AUTH capable"), we noticed that there's a skb
+memory leakage in the error path.
+
+Running the same reproducer as in ec0223ec48a9 and by unconditionally
+jumping to the error label (to simulate an error condition) in
+sctp_sf_do_5_1D_ce() receive path lets kmemleak detector bark about
+the unfreed chunk->auth_chunk skb clone:
+
+Unreferenced object 0xffff8800b8f3a000 (size 256):
+  comm "softirq", pid 0, jiffies 4294769856 (age 110.757s)
+  hex dump (first 32 bytes):
+    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    89 ab 75 5e d4 01 58 13 00 00 00 00 00 00 00 00  ..u^..X.........
+  backtrace:
+    [<ffffffff816660be>] kmemleak_alloc+0x4e/0xb0
+    [<ffffffff8119f328>] kmem_cache_alloc+0xc8/0x210
+    [<ffffffff81566929>] skb_clone+0x49/0xb0
+    [<ffffffffa0467459>] sctp_endpoint_bh_rcv+0x1d9/0x230 [sctp]
+    [<ffffffffa046fdbc>] sctp_inq_push+0x4c/0x70 [sctp]
+    [<ffffffffa047e8de>] sctp_rcv+0x82e/0x9a0 [sctp]
+    [<ffffffff815abd38>] ip_local_deliver_finish+0xa8/0x210
+    [<ffffffff815a64af>] nf_reinject+0xbf/0x180
+    [<ffffffffa04b4762>] nfqnl_recv_verdict+0x1d2/0x2b0 [nfnetlink_queue]
+    [<ffffffffa04aa40b>] nfnetlink_rcv_msg+0x14b/0x250 [nfnetlink]
+    [<ffffffff815a3269>] netlink_rcv_skb+0xa9/0xc0
+    [<ffffffffa04aa7cf>] nfnetlink_rcv+0x23f/0x408 [nfnetlink]
+    [<ffffffff815a2bd8>] netlink_unicast+0x168/0x250
+    [<ffffffff815a2fa1>] netlink_sendmsg+0x2e1/0x3f0
+    [<ffffffff8155cc6b>] sock_sendmsg+0x8b/0xc0
+    [<ffffffff8155d449>] ___sys_sendmsg+0x369/0x380
+
+What happens is that commit bbd0d59809f9 clones the skb containing
+the AUTH chunk in sctp_endpoint_bh_rcv() when having the edge case
+that an endpoint requires COOKIE-ECHO chunks to be authenticated:
+
+  ---------- INIT[RANDOM; CHUNKS; HMAC-ALGO] ---------->
+  <------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] ---------
+  ------------------ AUTH; COOKIE-ECHO ---------------->
+  <-------------------- COOKIE-ACK ---------------------
+
+When we enter sctp_sf_do_5_1D_ce() and before we actually get to
+the point where we process (and subsequently free) a non-NULL
+chunk->auth_chunk, we could hit the "goto nomem_init" path from
+an error condition and thus leave the cloned skb around w/o
+freeing it.
+
+The fix is to centrally free such clones in sctp_chunk_destroy()
+handler that is invoked from sctp_chunk_free() after all refs have
+dropped; and also move both kfree_skb(chunk->auth_chunk) there,
+so that chunk->auth_chunk is either NULL (since sctp_chunkify()
+allocs new chunks through kmem_cache_zalloc()) or non-NULL with
+a valid skb pointer. chunk->skb and chunk->auth_chunk are the
+only skbs in the sctp_chunk structure that need to be handeled.
+
+While at it, we should use consume_skb() for both. It is the same
+as dev_kfree_skb() but more appropriately named as we are not
+a device but a protocol. Also, this effectively replaces the
+kfree_skb() from both invocations into consume_skb(). Functions
+are the same only that kfree_skb() assumes that the frame was
+being dropped after a failure (e.g. for tools like drop monitor),
+usage of consume_skb() seems more appropriate in function
+sctp_chunk_destroy() though.
+
+Fixes: bbd0d59809f9 ("[SCTP]: Implement the receive and verification of AUTH chunk")
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Cc: Vlad Yasevich <yasevich@gmail.com>
+Cc: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/sm_make_chunk.c |    4 ++--
+ net/sctp/sm_statefuns.c  |    5 -----
+ 2 files changed, 2 insertions(+), 7 deletions(-)
+
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1420,8 +1420,8 @@ static void sctp_chunk_destroy(struct sc
+       BUG_ON(!list_empty(&chunk->list));
+       list_del_init(&chunk->transmitted_list);
+-      /* Free the chunk skb data and the SCTP_chunk stub itself. */
+-      dev_kfree_skb(chunk->skb);
++      consume_skb(chunk->skb);
++      consume_skb(chunk->auth_chunk);
+       SCTP_DBG_OBJCNT_DEC(chunk);
+       kmem_cache_free(sctp_chunk_cachep, chunk);
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -761,7 +761,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(st
+               /* Make sure that we and the peer are AUTH capable */
+               if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+-                      kfree_skb(chunk->auth_chunk);
+                       sctp_association_free(new_asoc);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+               }
+@@ -776,10 +775,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(st
+               auth.transport = chunk->transport;
+               ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
+-
+-              /* We can now safely free the auth_chunk clone */
+-              kfree_skb(chunk->auth_chunk);
+-
+               if (ret != SCTP_IERROR_NO_ERROR) {
+                       sctp_association_free(new_asoc);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
diff --git a/queue-3.13/net-socket-error-on-a-negative-msg_namelen.patch b/queue-3.13/net-socket-error-on-a-negative-msg_namelen.patch
new file mode 100644 (file)
index 0000000..c5ee10d
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Matthew Leach <matthew.leach@arm.com>
+Date: Tue, 11 Mar 2014 11:58:27 +0000
+Subject: net: socket: error on a negative msg_namelen
+
+From: Matthew Leach <matthew.leach@arm.com>
+
+[ Upstream commit dbb490b96584d4e958533fb637f08b557f505657 ]
+
+When copying in a struct msghdr from the user, if the user has set the
+msg_namelen parameter to a negative value it gets clamped to a valid
+size due to a comparison between signed and unsigned values.
+
+Ensure the syscall errors when the user passes in a negative value.
+
+Signed-off-by: Matthew Leach <matthew.leach@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/socket.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1972,6 +1972,10 @@ static int copy_msghdr_from_user(struct
+ {
+       if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+               return -EFAULT;
++
++      if (kmsg->msg_namelen < 0)
++              return -EINVAL;
++
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+       return 0;
diff --git a/queue-3.13/net-unix-non-blocking-recvmsg-should-not-return.patch b/queue-3.13/net-unix-non-blocking-recvmsg-should-not-return.patch
new file mode 100644 (file)
index 0000000..099ab0c
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 25 Mar 2014 18:42:27 -0700
+Subject: net: unix: non blocking recvmsg() should not return
+ -EINTR
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit de1443916791d75fdd26becb116898277bb0273f ]
+
+Some applications didn't expect recvmsg() on a non blocking socket
+could return -EINTR. This possibility was added as a side effect
+of commit b3ca9b02b00704 ("net: fix multithreaded signal handling in
+unix recv routines").
+
+To hit this bug, you need to be a bit unlucky, as the u->readlock
+mutex is usually held for very small periods.
+
+Fixes: b3ca9b02b00704 ("net: fix multithreaded signal handling in unix recv routines")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Rainer Weikusat <rweikusat@mobileactivedefense.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/unix/af_unix.c |   17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1785,8 +1785,11 @@ static int unix_dgram_recvmsg(struct kio
+               goto out;
+       err = mutex_lock_interruptible(&u->readlock);
+-      if (err) {
+-              err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
++      if (unlikely(err)) {
++              /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++               * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++               */
++              err = noblock ? -EAGAIN : -ERESTARTSYS;
+               goto out;
+       }
+@@ -1911,6 +1914,7 @@ static int unix_stream_recvmsg(struct ki
+       struct unix_sock *u = unix_sk(sk);
+       struct sockaddr_un *sunaddr = msg->msg_name;
+       int copied = 0;
++      int noblock = flags & MSG_DONTWAIT;
+       int check_creds = 0;
+       int target;
+       int err = 0;
+@@ -1926,7 +1930,7 @@ static int unix_stream_recvmsg(struct ki
+               goto out;
+       target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+-      timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
++      timeo = sock_rcvtimeo(sk, noblock);
+       /* Lock the socket to prevent queue disordering
+        * while sleeps in memcpy_tomsg
+@@ -1938,8 +1942,11 @@ static int unix_stream_recvmsg(struct ki
+       }
+       err = mutex_lock_interruptible(&u->readlock);
+-      if (err) {
+-              err = sock_intr_errno(timeo);
++      if (unlikely(err)) {
++              /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++               * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++               */
++              err = noblock ? -EAGAIN : -ERESTARTSYS;
+               goto out;
+       }
diff --git a/queue-3.13/net-vxlan-fix-crash-when-interface-is-created-with-no-group.patch b/queue-3.13/net-vxlan-fix-crash-when-interface-is-created-with-no-group.patch
new file mode 100644 (file)
index 0000000..073ea09
--- /dev/null
@@ -0,0 +1,154 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Mike Rapoport <mike.rapoport@ravellosystems.com>
+Date: Tue, 1 Apr 2014 09:23:01 +0300
+Subject: net: vxlan: fix crash when interface is created with no group
+
+From: Mike Rapoport <mike.rapoport@ravellosystems.com>
+
+[ Upstream commit 5933a7bbb5de66482ea8aa874a7ebaf8e67603c4 ]
+
+If the vxlan interface is created without explicit group definition,
+there are corner cases which may cause kernel panic.
+
+For instance, in the following scenario:
+
+node A:
+$ ip link add dev vxlan42  address 2c:c2:60:00:10:20 type vxlan id 42
+$ ip addr add dev vxlan42 10.0.0.1/24
+$ ip link set up dev vxlan42
+$ arp -i vxlan42 -s 10.0.0.2 2c:c2:60:00:01:02
+$ bridge fdb add dev vxlan42 to 2c:c2:60:00:01:02 dst <IPv4 address>
+$ ping 10.0.0.2
+
+node B:
+$ ip link add dev vxlan42 address 2c:c2:60:00:01:02 type vxlan id 42
+$ ip addr add dev vxlan42 10.0.0.2/24
+$ ip link set up dev vxlan42
+$ arp -i vxlan42 -s 10.0.0.1 2c:c2:60:00:10:20
+
+node B crashes:
+
+ vxlan42: 2c:c2:60:00:10:20 migrated from 4011:eca4:c0a8:6466:c0a8:6415:8e09:2118 to (invalid address)
+ vxlan42: 2c:c2:60:00:10:20 migrated from 4011:eca4:c0a8:6466:c0a8:6415:8e09:2118 to (invalid address)
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000046
+ IP: [<ffffffff8143c459>] ip6_route_output+0x58/0x82
+ PGD 7bd89067 PUD 7bd4e067 PMD 0
+ Oops: 0000 [#1] SMP
+ Modules linked in:
+ CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.14.0-rc8-hvx-xen-00019-g97a5221-dirty #154
+ Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+ task: ffff88007c774f50 ti: ffff88007c79c000 task.ti: ffff88007c79c000
+ RIP: 0010:[<ffffffff8143c459>]  [<ffffffff8143c459>] ip6_route_output+0x58/0x82
+ RSP: 0018:ffff88007fd03668  EFLAGS: 00010282
+ RAX: 0000000000000000 RBX: ffffffff8186a000 RCX: 0000000000000040
+ RDX: 0000000000000000 RSI: ffff88007b0e4a80 RDI: ffff88007fd03754
+ RBP: ffff88007fd03688 R08: ffff88007b0e4a80 R09: 0000000000000000
+ R10: 0200000a0100000a R11: 0001002200000000 R12: ffff88007fd03740
+ R13: ffff88007b0e4a80 R14: ffff88007b0e4a80 R15: ffff88007bba0c50
+ FS:  0000000000000000(0000) GS:ffff88007fd00000(0000) knlGS:0000000000000000
+ CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+ CR2: 0000000000000046 CR3: 000000007bb60000 CR4: 00000000000006e0
+ Stack:
+  0000000000000000 ffff88007fd037a0 ffffffff8186a000 ffff88007fd03740
+  ffff88007fd036c8 ffffffff814320bb 0000000000006e49 ffff88007b8b7360
+  ffff88007bdbf200 ffff88007bcbc000 ffff88007b8b7000 ffff88007b8b7360
+ Call Trace:
+  <IRQ>
+  [<ffffffff814320bb>] ip6_dst_lookup_tail+0x2d/0xa4
+  [<ffffffff814322a5>] ip6_dst_lookup+0x10/0x12
+  [<ffffffff81323b4e>] vxlan_xmit_one+0x32a/0x68c
+  [<ffffffff814a325a>] ? _raw_spin_unlock_irqrestore+0x12/0x14
+  [<ffffffff8104c551>] ? lock_timer_base.isra.23+0x26/0x4b
+  [<ffffffff8132451a>] vxlan_xmit+0x66a/0x6a8
+  [<ffffffff8141a365>] ? ipt_do_table+0x35f/0x37e
+  [<ffffffff81204ba2>] ? selinux_ip_postroute+0x41/0x26e
+  [<ffffffff8139d0c1>] dev_hard_start_xmit+0x2ce/0x3ce
+  [<ffffffff8139d491>] __dev_queue_xmit+0x2d0/0x392
+  [<ffffffff813b380f>] ? eth_header+0x28/0xb5
+  [<ffffffff8139d569>] dev_queue_xmit+0xb/0xd
+  [<ffffffff813a5aa6>] neigh_resolve_output+0x134/0x152
+  [<ffffffff813db741>] ip_finish_output2+0x236/0x299
+  [<ffffffff813dc074>] ip_finish_output+0x98/0x9d
+  [<ffffffff813dc749>] ip_output+0x62/0x67
+  [<ffffffff813da9f2>] dst_output+0xf/0x11
+  [<ffffffff813dc11c>] ip_local_out+0x1b/0x1f
+  [<ffffffff813dcf1b>] ip_send_skb+0x11/0x37
+  [<ffffffff813dcf70>] ip_push_pending_frames+0x2f/0x33
+  [<ffffffff813ff732>] icmp_push_reply+0x106/0x115
+  [<ffffffff813ff9e4>] icmp_reply+0x142/0x164
+  [<ffffffff813ffb3b>] icmp_echo.part.16+0x46/0x48
+  [<ffffffff813c1d30>] ? nf_iterate+0x43/0x80
+  [<ffffffff813d8037>] ? xfrm4_policy_check.constprop.11+0x52/0x52
+  [<ffffffff813ffb62>] icmp_echo+0x25/0x27
+  [<ffffffff814005f7>] icmp_rcv+0x1d2/0x20a
+  [<ffffffff813d8037>] ? xfrm4_policy_check.constprop.11+0x52/0x52
+  [<ffffffff813d810d>] ip_local_deliver_finish+0xd6/0x14f
+  [<ffffffff813d8037>] ? xfrm4_policy_check.constprop.11+0x52/0x52
+  [<ffffffff813d7fde>] NF_HOOK.constprop.10+0x4c/0x53
+  [<ffffffff813d82bf>] ip_local_deliver+0x4a/0x4f
+  [<ffffffff813d7f7b>] ip_rcv_finish+0x253/0x26a
+  [<ffffffff813d7d28>] ? inet_add_protocol+0x3e/0x3e
+  [<ffffffff813d7fde>] NF_HOOK.constprop.10+0x4c/0x53
+  [<ffffffff813d856a>] ip_rcv+0x2a6/0x2ec
+  [<ffffffff8139a9a0>] __netif_receive_skb_core+0x43e/0x478
+  [<ffffffff812a346f>] ? virtqueue_poll+0x16/0x27
+  [<ffffffff8139aa2f>] __netif_receive_skb+0x55/0x5a
+  [<ffffffff8139aaaa>] process_backlog+0x76/0x12f
+  [<ffffffff8139add8>] net_rx_action+0xa2/0x1ab
+  [<ffffffff81047847>] __do_softirq+0xca/0x1d1
+  [<ffffffff81047ace>] irq_exit+0x3e/0x85
+  [<ffffffff8100b98b>] do_IRQ+0xa9/0xc4
+  [<ffffffff814a37ad>] common_interrupt+0x6d/0x6d
+  <EOI>
+  [<ffffffff810378db>] ? native_safe_halt+0x6/0x8
+  [<ffffffff810110c7>] default_idle+0x9/0xd
+  [<ffffffff81011694>] arch_cpu_idle+0x13/0x1c
+  [<ffffffff8107480d>] cpu_startup_entry+0xbc/0x137
+  [<ffffffff8102e741>] start_secondary+0x1a0/0x1a5
+ Code: 24 14 e8 f1 e5 01 00 31 d2 a8 32 0f 95 c2 49 8b 44 24 2c 49 0b 44 24 24 74 05 83 ca 04 eb 1c 4d 85 ed 74 17 49 8b 85 a8 02 00 00 <66> 8b 40 46 66 c1 e8 07 83 e0 07 c1 e0 03 09 c2 4c 89 e6 48 89
+ RIP  [<ffffffff8143c459>] ip6_route_output+0x58/0x82
+  RSP <ffff88007fd03668>
+ CR2: 0000000000000046
+ ---[ end trace 4612329caab37efd ]---
+
+When vxlan interface is created without explicit group definition, the
+default_dst protocol family is initialiazed to AF_UNSPEC and the driver
+assumes IPv4 configuration. On the other side, the default_dst protocol
+family is used to differentiate between IPv4 and IPv6 cases and, since,
+AF_UNSPEC != AF_INET, the processing takes the IPv6 path.
+
+Making the IPv4 assumption explicit by settting default_dst protocol
+family to AF_INET4 and preventing mixing of IPv4 and IPv6 addresses in
+snooped fdb entries fixes the corner case crashes.
+
+Signed-off-by: Mike Rapoport <mike.rapoport@ravellosystems.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -777,6 +777,9 @@ static int vxlan_fdb_add(struct ndmsg *n
+       if (err)
+               return err;
++      if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
++              return -EAFNOSUPPORT;
++
+       spin_lock_bh(&vxlan->hash_lock);
+       err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+                              port, vni, ifindex, ndm->ndm_flags);
+@@ -2486,9 +2489,10 @@ static int vxlan_newlink(struct net *net
+       vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+       dst->remote_vni = vni;
++      /* Unless IPv6 is explicitly requested, assume IPv4 */
++      dst->remote_ip.sa.sa_family = AF_INET;
+       if (data[IFLA_VXLAN_GROUP]) {
+               dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+-              dst->remote_ip.sa.sa_family = AF_INET;
+       } else if (data[IFLA_VXLAN_GROUP6]) {
+               if (!IS_ENABLED(CONFIG_IPV6))
+                       return -EPFNOSUPPORT;
diff --git a/queue-3.13/netlink-don-t-compare-the-nul-termination-in-nla_strcmp.patch b/queue-3.13/netlink-don-t-compare-the-nul-termination-in-nla_strcmp.patch
new file mode 100644 (file)
index 0000000..90b29aa
--- /dev/null
@@ -0,0 +1,55 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Pablo Neira <pablo@netfilter.org>
+Date: Tue, 1 Apr 2014 19:38:44 +0200
+Subject: netlink: don't compare the nul-termination in nla_strcmp
+
+From: Pablo Neira <pablo@netfilter.org>
+
+[ Upstream commit 8b7b932434f5eee495b91a2804f5b64ebb2bc835 ]
+
+nla_strcmp compares the string length plus one, so it's implicitly
+including the nul-termination in the comparison.
+
+ int nla_strcmp(const struct nlattr *nla, const char *str)
+ {
+        int len = strlen(str) + 1;
+        ...
+                d = memcmp(nla_data(nla), str, len);
+
+However, if NLA_STRING is used, userspace can send us a string without
+the nul-termination. This is a problem since the string
+comparison will not match as the last byte may be not the
+nul-termination.
+
+Fix this by skipping the comparison of the nul-termination if the
+attribute data is nul-terminated. Suggested by Thomas Graf.
+
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/nlattr.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla,
+  */
+ int nla_strcmp(const struct nlattr *nla, const char *str)
+ {
+-      int len = strlen(str) + 1;
+-      int d = nla_len(nla) - len;
++      int len = strlen(str);
++      char *buf = nla_data(nla);
++      int attrlen = nla_len(nla);
++      int d;
++      if (attrlen > 0 && buf[attrlen - 1] == '\0')
++              attrlen--;
++
++      d = attrlen - len;
+       if (d == 0)
+               d = memcmp(nla_data(nla), str, len);
diff --git a/queue-3.13/netpoll-fix-the-skb-check-in-pkt_is_ns.patch b/queue-3.13/netpoll-fix-the-skb-check-in-pkt_is_ns.patch
new file mode 100644 (file)
index 0000000..6a6127b
--- /dev/null
@@ -0,0 +1,31 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Li RongQing <roy.qing.li@gmail.com>
+Date: Fri, 21 Mar 2014 20:53:57 +0800
+Subject: netpoll: fix the skb check in pkt_is_ns
+
+From: Li RongQing <roy.qing.li@gmail.com>
+
+[ Not applicable upstream commit, the code here has been removed
+  upstream. ]
+
+Neighbor Solicitation is ipv6 protocol, so we should check
+skb->protocol with ETH_P_IPV6
+
+Signed-off-by: Li RongQing <roy.qing.li@gmail.com>
+Cc: WANG Cong <amwang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/netpoll.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *sk
+       struct nd_msg *msg;
+       struct ipv6hdr *hdr;
+-      if (skb->protocol != htons(ETH_P_ARP))
++      if (skb->protocol != htons(ETH_P_IPV6))
+               return false;
+       if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
+               return false;
diff --git a/queue-3.13/pkt_sched-fq-do-not-hold-qdisc-lock-while-allocating.patch b/queue-3.13/pkt_sched-fq-do-not-hold-qdisc-lock-while-allocating.patch
new file mode 100644 (file)
index 0000000..22ee441
--- /dev/null
@@ -0,0 +1,89 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 6 Mar 2014 22:57:52 -0800
+Subject: pkt_sched: fq: do not hold qdisc lock while allocating
+ memory
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2d8d40afd187bced0a3d056366fb58d66fe845e3 ]
+
+Resizing fq hash table allocates memory while holding qdisc spinlock,
+with BH disabled.
+
+This is definitely not good, as allocation might sleep.
+
+We can drop the lock and get it when needed, we hold RTNL so no other
+changes can happen at the same time.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_fq.c |   28 +++++++++++++++++++---------
+ 1 file changed, 19 insertions(+), 9 deletions(-)
+
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -578,9 +578,11 @@ static void fq_rehash(struct fq_sched_da
+       q->stat_gc_flows += fcnt;
+ }
+-static int fq_resize(struct fq_sched_data *q, u32 log)
++static int fq_resize(struct Qdisc *sch, u32 log)
+ {
++      struct fq_sched_data *q = qdisc_priv(sch);
+       struct rb_root *array;
++      void *old_fq_root;
+       u32 idx;
+       if (q->fq_root && log == q->fq_trees_log)
+@@ -593,13 +595,19 @@ static int fq_resize(struct fq_sched_dat
+       for (idx = 0; idx < (1U << log); idx++)
+               array[idx] = RB_ROOT;
+-      if (q->fq_root) {
+-              fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
+-              kfree(q->fq_root);
+-      }
++      sch_tree_lock(sch);
++
++      old_fq_root = q->fq_root;
++      if (old_fq_root)
++              fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
++
+       q->fq_root = array;
+       q->fq_trees_log = log;
++      sch_tree_unlock(sch);
++
++      kfree(old_fq_root);
++
+       return 0;
+ }
+@@ -675,9 +683,11 @@ static int fq_change(struct Qdisc *sch,
+               q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
+       }
+-      if (!err)
+-              err = fq_resize(q, fq_log);
+-
++      if (!err) {
++              sch_tree_unlock(sch);
++              err = fq_resize(sch, fq_log);
++              sch_tree_lock(sch);
++      }
+       while (sch->q.qlen > sch->limit) {
+               struct sk_buff *skb = fq_dequeue(sch);
+@@ -723,7 +733,7 @@ static int fq_init(struct Qdisc *sch, st
+       if (opt)
+               err = fq_change(sch, opt);
+       else
+-              err = fq_resize(q, q->fq_trees_log);
++              err = fq_resize(sch, q->fq_trees_log);
+       return err;
+ }
diff --git a/queue-3.13/rds-prevent-dereference-of-a-null-device-in-rds_iw_laddr_check.patch b/queue-3.13/rds-prevent-dereference-of-a-null-device-in-rds_iw_laddr_check.patch
new file mode 100644 (file)
index 0000000..3b7205c
--- /dev/null
@@ -0,0 +1,31 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Sat, 29 Mar 2014 20:39:35 -0400
+Subject: rds: prevent dereference of a NULL device in rds_iw_laddr_check
+
+From: Sasha Levin <sasha.levin@oracle.com>
+
+[ Upstream commit bf39b4247b8799935ea91d90db250ab608a58e50 ]
+
+Binding might result in a NULL device which is later dereferenced
+without checking.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rds/iw.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/rds/iw.c
++++ b/net/rds/iw.c
+@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 add
+       ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+       /* due to this, we will claim to support IB devices unless we
+          check node_type. */
+-      if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
++      if (ret || !cm_id->device ||
++          cm_id->device->node_type != RDMA_NODE_RNIC)
+               ret = -EADDRNOTAVAIL;
+       rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/queue-3.13/rtnetlink-fix-fdb-notification-flags.patch b/queue-3.13/rtnetlink-fix-fdb-notification-flags.patch
new file mode 100644 (file)
index 0000000..2751ddf
--- /dev/null
@@ -0,0 +1,62 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Wed, 19 Mar 2014 17:47:49 +0100
+Subject: rtnetlink: fix fdb notification flags
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit 1c104a6bebf3c16b6248408b84f91d09ac8a26b6 ]
+
+Commit 3ff661c38c84 ("net: rtnetlink notify events for FDB NTF_SELF adds and
+deletes") reuses the function nlmsg_populate_fdb_fill() to notify fdb events.
+But this function was used only for dump and thus was always setting the
+flag NLM_F_MULTI, which is wrong in case of a single notification.
+
+Libraries like libnl will wait forever for NLMSG_DONE.
+
+CC: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/rtnetlink.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2014,12 +2014,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
+ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+                                  struct net_device *dev,
+                                  u8 *addr, u32 pid, u32 seq,
+-                                 int type, unsigned int flags)
++                                 int type, unsigned int flags,
++                                 int nlflags)
+ {
+       struct nlmsghdr *nlh;
+       struct ndmsg *ndm;
+-      nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
++      nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
+       if (!nlh)
+               return -EMSGSIZE;
+@@ -2057,7 +2058,7 @@ static void rtnl_fdb_notify(struct net_d
+       if (!skb)
+               goto errout;
+-      err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
++      err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto errout;
+@@ -2282,7 +2283,8 @@ static int nlmsg_populate_fdb(struct sk_
+               err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+                                             portid, seq,
+-                                            RTM_NEWNEIGH, NTF_SELF);
++                                            RTM_NEWNEIGH, NTF_SELF,
++                                            NLM_F_MULTI);
+               if (err < 0)
+                       return err;
+ skip:
index 0dc05c05abc80bbe9110da58368b08b1ae84ffd7..184793211bc71c5a0f368ffe98df29fb6b52d75c 100644 (file)
@@ -1,4 +1,60 @@
 selinux-correctly-label-proc-inodes-in-use-before-the-policy-is-loaded.patch
+net-fix-for-a-race-condition-in-the-inet-frag-code.patch
+net-sctp-fix-skb-leakage-in-cookie-echo-path-of.patch
+bridge-multicast-add-sanity-check-for-query-source.patch
+tipc-allow-connection-shutdown-callback-to-be-invoked.patch
+tipc-fix-connection-refcount-leak.patch
+tipc-drop-subscriber-connection-id-invalidation.patch
+tipc-fix-memory-leak-during-module-removal.patch
+tipc-don-t-log-disabled-tasklet-handler-errors.patch
+inet-frag-make-sure-forced-eviction-removes-all.patch
+net-unix-non-blocking-recvmsg-should-not-return.patch
+ipv6-fix-exthdrs-offload-registration.patch
+ipv6-don-t-set-dst_nocount-for-remotely-added-routes.patch
+bnx2-fix-shutdown-sequence.patch
+pkt_sched-fq-do-not-hold-qdisc-lock-while-allocating.patch
+xen-netback-fix-issue-caused-by-using-gso_type-wrongly.patch
+vlan-set-correct-source-mac-address-with-tx-vlan-offload-enabled.patch
+skbuff-skb_segment-s-frag-nskb_frag.patch
+skbuff-skb_segment-s-skb_frag-frag.patch
+skbuff-skb_segment-s-skb-head_skb.patch
+skbuff-skb_segment-s-fskb-list_skb.patch
+skbuff-skb_segment-orphan-frags-before-copying.patch
+tcp-tcp_release_cb-should-release-socket-ownership.patch
+bridge-multicast-add-sanity-check-for-general-query-destination.patch
+bridge-multicast-enable-snooping-on-general-queries-only.patch
+net-socket-error-on-a-negative-msg_namelen.patch
+bonding-set-correct-vlan-id-for-alb-xmit-path.patch
+eth-fec-fix-lost-promiscuous-mode-after-reconnecting-cable.patch
+ipv6-avoid-unnecessary-temporary-addresses-being-generated.patch
+ipv6-ip6_append_data_mtu-do-not-handle-the-mtu-of-the-second-fragment-properly.patch
+net-cdc_ncm-fix-control-message-ordering.patch
+vxlan-fix-potential-null-dereference-in-arp_reduce.patch
+vxlan-fix-nonfunctional-neigh_reduce.patch
+tcp-syncookies-do-not-use-getnstimeofday.patch
+rtnetlink-fix-fdb-notification-flags.patch
+ipmr-fix-mfc-notification-flags.patch
+ip6mr-fix-mfc-notification-flags.patch
+net-micrel-ks8851-ml-add-vdd-supply-support.patch
+netpoll-fix-the-skb-check-in-pkt_is_ns.patch
+tipc-fix-spinlock-recursion-bug-for-failed-subscriptions.patch
+ip_tunnel-fix-dst-ref-count.patch
+tg3-do-not-include-vlan-acceleration-features-in-vlan_features.patch
+virtio-net-correct-error-handling-of-virtqueue_kick.patch
+usbnet-include-wait-queue-head-in-device-structure.patch
+vlan-set-hard_header_len-according-to-available-acceleration.patch
+vhost-fix-total-length-when-packets-are-too-short.patch
+vhost-validate-vhost_get_vq_desc-return-value.patch
+tcp-fix-get_timewait4_sock-delay-computation-on-64bit.patch
+xen-netback-remove-pointless-clause-from-if-statement.patch
+ipv6-some-ipv6-statistic-counters-failed-to-disable-bh.patch
+netlink-don-t-compare-the-nul-termination-in-nla_strcmp.patch
+xen-netback-disable-rogue-vif-in-kthread-context.patch
+call-efx_set_channels-before-efx-type-dimension_resources.patch
+net-vxlan-fix-crash-when-interface-is-created-with-no-group.patch
+isdnloop-validate-nul-terminated-strings-from-user.patch
+isdnloop-several-buffer-overflows.patch
+rds-prevent-dereference-of-a-null-device-in-rds_iw_laddr_check.patch
 powernow-k6-disable-cache-when-changing-frequency.patch
 powernow-k6-correctly-initialize-default-parameters.patch
 powernow-k6-reorder-frequencies.patch
diff --git a/queue-3.13/skbuff-skb_segment-orphan-frags-before-copying.patch b/queue-3.13/skbuff-skb_segment-orphan-frags-before-copying.patch
new file mode 100644 (file)
index 0000000..130e65e
--- /dev/null
@@ -0,0 +1,62 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Mon, 10 Mar 2014 19:28:08 +0200
+Subject: skbuff: skb_segment: orphan frags before copying
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit 1fd819ecb90cc9b822cd84d3056ddba315d3340f ]
+
+skb_segment copies frags around, so we need
+to copy them carefully to avoid accessing
+user memory after reporting completion to userspace
+through a callback.
+
+skb_segment doesn't normally happen on datapath:
+TSO needs to be disabled - so disabling zero copy
+in this case does not look like a big deal.
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2772,6 +2772,7 @@ struct sk_buff *skb_segment(struct sk_bu
+       skb_frag_t *frag = skb_shinfo(head_skb)->frags;
+       unsigned int mss = skb_shinfo(head_skb)->gso_size;
+       unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
++      struct sk_buff *frag_skb = head_skb;
+       unsigned int offset = doffset;
+       unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+       unsigned int headroom;
+@@ -2816,6 +2817,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                       i = 0;
+                       nfrags = skb_shinfo(list_skb)->nr_frags;
+                       frag = skb_shinfo(list_skb)->frags;
++                      frag_skb = list_skb;
+                       pos += skb_headlen(list_skb);
+                       while (pos < offset + len) {
+@@ -2903,6 +2905,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                               i = 0;
+                               nfrags = skb_shinfo(list_skb)->nr_frags;
+                               frag = skb_shinfo(list_skb)->frags;
++                              frag_skb = list_skb;
+                               BUG_ON(!nfrags);
+@@ -2917,6 +2920,9 @@ struct sk_buff *skb_segment(struct sk_bu
+                               goto err;
+                       }
++                      if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
++                              goto err;
++
+                       *nskb_frag = *frag;
+                       __skb_frag_ref(nskb_frag);
+                       size = skb_frag_size(nskb_frag);
diff --git a/queue-3.13/skbuff-skb_segment-s-frag-nskb_frag.patch b/queue-3.13/skbuff-skb_segment-s-frag-nskb_frag.patch
new file mode 100644 (file)
index 0000000..4e81871
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Mon, 10 Mar 2014 18:29:04 +0200
+Subject: skbuff: skb_segment: s/frag/nskb_frag/
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit 8cb19905e9287a93ce7c2cbbdf742a060b00e219 ]
+
+frag points at nskb, so name it appropriately
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |   18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2794,7 +2794,7 @@ struct sk_buff *skb_segment(struct sk_bu
+       do {
+               struct sk_buff *nskb;
+-              skb_frag_t *frag;
++              skb_frag_t *nskb_frag;
+               int hsize;
+               int size;
+@@ -2887,7 +2887,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                       continue;
+               }
+-              frag = skb_shinfo(nskb)->frags;
++              nskb_frag = skb_shinfo(nskb)->frags;
+               skb_copy_from_linear_data_offset(skb, offset,
+                                                skb_put(nskb, hsize), hsize);
+@@ -2915,13 +2915,13 @@ struct sk_buff *skb_segment(struct sk_bu
+                               goto err;
+                       }
+-                      *frag = *skb_frag;
+-                      __skb_frag_ref(frag);
+-                      size = skb_frag_size(frag);
++                      *nskb_frag = *skb_frag;
++                      __skb_frag_ref(nskb_frag);
++                      size = skb_frag_size(nskb_frag);
+                       if (pos < offset) {
+-                              frag->page_offset += offset - pos;
+-                              skb_frag_size_sub(frag, offset - pos);
++                              nskb_frag->page_offset += offset - pos;
++                              skb_frag_size_sub(nskb_frag, offset - pos);
+                       }
+                       skb_shinfo(nskb)->nr_frags++;
+@@ -2931,11 +2931,11 @@ struct sk_buff *skb_segment(struct sk_bu
+                               skb_frag++;
+                               pos += size;
+                       } else {
+-                              skb_frag_size_sub(frag, pos + size - (offset + len));
++                              skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
+                               goto skip_fraglist;
+                       }
+-                      frag++;
++                      nskb_frag++;
+               }
+ skip_fraglist:
diff --git a/queue-3.13/skbuff-skb_segment-s-fskb-list_skb.patch b/queue-3.13/skbuff-skb_segment-s-fskb-list_skb.patch
new file mode 100644 (file)
index 0000000..751feb7
--- /dev/null
@@ -0,0 +1,82 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Mon, 10 Mar 2014 19:27:59 +0200
+Subject: skbuff: skb_segment: s/fskb/list_skb/
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit 1a4cedaf65491e66e1e55b8428c89209da729209 ]
+
+fskb is unrelated to frag: it's coming from
+frag_list. Rename it list_skb to avoid confusion.
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |   26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2768,7 +2768,7 @@ struct sk_buff *skb_segment(struct sk_bu
+ {
+       struct sk_buff *segs = NULL;
+       struct sk_buff *tail = NULL;
+-      struct sk_buff *fskb = skb_shinfo(head_skb)->frag_list;
++      struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
+       skb_frag_t *frag = skb_shinfo(head_skb)->frags;
+       unsigned int mss = skb_shinfo(head_skb)->gso_size;
+       unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
+@@ -2809,14 +2809,14 @@ struct sk_buff *skb_segment(struct sk_bu
+               if (hsize > len || !sg)
+                       hsize = len;
+-              if (!hsize && i >= nfrags && skb_headlen(fskb) &&
+-                  (skb_headlen(fskb) == len || sg)) {
+-                      BUG_ON(skb_headlen(fskb) > len);
++              if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
++                  (skb_headlen(list_skb) == len || sg)) {
++                      BUG_ON(skb_headlen(list_skb) > len);
+                       i = 0;
+-                      nfrags = skb_shinfo(fskb)->nr_frags;
+-                      frag = skb_shinfo(fskb)->frags;
+-                      pos += skb_headlen(fskb);
++                      nfrags = skb_shinfo(list_skb)->nr_frags;
++                      frag = skb_shinfo(list_skb)->frags;
++                      pos += skb_headlen(list_skb);
+                       while (pos < offset + len) {
+                               BUG_ON(i >= nfrags);
+@@ -2830,8 +2830,8 @@ struct sk_buff *skb_segment(struct sk_bu
+                               frag++;
+                       }
+-                      nskb = skb_clone(fskb, GFP_ATOMIC);
+-                      fskb = fskb->next;
++                      nskb = skb_clone(list_skb, GFP_ATOMIC);
++                      list_skb = list_skb->next;
+                       if (unlikely(!nskb))
+                               goto err;
+@@ -2898,15 +2898,15 @@ struct sk_buff *skb_segment(struct sk_bu
+               while (pos < offset + len) {
+                       if (i >= nfrags) {
+-                              BUG_ON(skb_headlen(fskb));
++                              BUG_ON(skb_headlen(list_skb));
+                               i = 0;
+-                              nfrags = skb_shinfo(fskb)->nr_frags;
+-                              frag = skb_shinfo(fskb)->frags;
++                              nfrags = skb_shinfo(list_skb)->nr_frags;
++                              frag = skb_shinfo(list_skb)->frags;
+                               BUG_ON(!nfrags);
+-                              fskb = fskb->next;
++                              list_skb = list_skb->next;
+                       }
+                       if (unlikely(skb_shinfo(nskb)->nr_frags >=
diff --git a/queue-3.13/skbuff-skb_segment-s-skb-head_skb.patch b/queue-3.13/skbuff-skb_segment-s-skb-head_skb.patch
new file mode 100644 (file)
index 0000000..34eb182
--- /dev/null
@@ -0,0 +1,156 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Mon, 10 Mar 2014 18:29:19 +0200
+Subject: skbuff: skb_segment: s/skb/head_skb/
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit df5771ffefb13f8af5392bd54fd7e2b596a3a357 ]
+
+rename local variable to make it easier to tell at a glance that we are
+dealing with a head skb.
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |   51 +++++++++++++++++++++++++--------------------------
+ 1 file changed, 25 insertions(+), 26 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2756,41 +2756,42 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+ /**
+  *    skb_segment - Perform protocol segmentation on skb.
+- *    @skb: buffer to segment
++ *    @head_skb: buffer to segment
+  *    @features: features for the output path (see dev->features)
+  *
+  *    This function performs segmentation on the given skb.  It returns
+  *    a pointer to the first in a list of new skbs for the segments.
+  *    In case of error it returns ERR_PTR(err).
+  */
+-struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
++struct sk_buff *skb_segment(struct sk_buff *head_skb,
++                          netdev_features_t features)
+ {
+       struct sk_buff *segs = NULL;
+       struct sk_buff *tail = NULL;
+-      struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
+-      skb_frag_t *frag = skb_shinfo(skb)->frags;
+-      unsigned int mss = skb_shinfo(skb)->gso_size;
+-      unsigned int doffset = skb->data - skb_mac_header(skb);
++      struct sk_buff *fskb = skb_shinfo(head_skb)->frag_list;
++      skb_frag_t *frag = skb_shinfo(head_skb)->frags;
++      unsigned int mss = skb_shinfo(head_skb)->gso_size;
++      unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
+       unsigned int offset = doffset;
+-      unsigned int tnl_hlen = skb_tnl_header_len(skb);
++      unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+       unsigned int headroom;
+       unsigned int len;
+       __be16 proto;
+       bool csum;
+       int sg = !!(features & NETIF_F_SG);
+-      int nfrags = skb_shinfo(skb)->nr_frags;
++      int nfrags = skb_shinfo(head_skb)->nr_frags;
+       int err = -ENOMEM;
+       int i = 0;
+       int pos;
+-      proto = skb_network_protocol(skb);
++      proto = skb_network_protocol(head_skb);
+       if (unlikely(!proto))
+               return ERR_PTR(-EINVAL);
+       csum = !!can_checksum_protocol(features, proto);
+-      __skb_push(skb, doffset);
+-      headroom = skb_headroom(skb);
+-      pos = skb_headlen(skb);
++      __skb_push(head_skb, doffset);
++      headroom = skb_headroom(head_skb);
++      pos = skb_headlen(head_skb);
+       do {
+               struct sk_buff *nskb;
+@@ -2798,11 +2799,11 @@ struct sk_buff *skb_segment(struct sk_bu
+               int hsize;
+               int size;
+-              len = skb->len - offset;
++              len = head_skb->len - offset;
+               if (len > mss)
+                       len = mss;
+-              hsize = skb_headlen(skb) - offset;
++              hsize = skb_headlen(head_skb) - offset;
+               if (hsize < 0)
+                       hsize = 0;
+               if (hsize > len || !sg)
+@@ -2851,7 +2852,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                       __skb_push(nskb, doffset);
+               } else {
+                       nskb = __alloc_skb(hsize + doffset + headroom,
+-                                         GFP_ATOMIC, skb_alloc_rx_flag(skb),
++                                         GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
+                                          NUMA_NO_NODE);
+                       if (unlikely(!nskb))
+@@ -2867,12 +2868,12 @@ struct sk_buff *skb_segment(struct sk_bu
+                       segs = nskb;
+               tail = nskb;
+-              __copy_skb_header(nskb, skb);
+-              nskb->mac_len = skb->mac_len;
++              __copy_skb_header(nskb, head_skb);
++              nskb->mac_len = head_skb->mac_len;
+               skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
+-              skb_copy_from_linear_data_offset(skb, -tnl_hlen,
++              skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
+                                                nskb->data - tnl_hlen,
+                                                doffset + tnl_hlen);
+@@ -2881,7 +2882,7 @@ struct sk_buff *skb_segment(struct sk_bu
+               if (!sg) {
+                       nskb->ip_summed = CHECKSUM_NONE;
+-                      nskb->csum = skb_copy_and_csum_bits(skb, offset,
++                      nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
+                                                           skb_put(nskb, len),
+                                                           len, 0);
+                       continue;
+@@ -2889,10 +2890,11 @@ struct sk_buff *skb_segment(struct sk_bu
+               nskb_frag = skb_shinfo(nskb)->frags;
+-              skb_copy_from_linear_data_offset(skb, offset,
++              skb_copy_from_linear_data_offset(head_skb, offset,
+                                                skb_put(nskb, hsize), hsize);
+-              skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
++              skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
++                      SKBTX_SHARED_FRAG;
+               while (pos < offset + len) {
+                       if (i >= nfrags) {
+@@ -2949,15 +2951,12 @@ perform_csum_check:
+                                                 nskb->len - doffset, 0);
+                       nskb->ip_summed = CHECKSUM_NONE;
+               }
+-      } while ((offset += len) < skb->len);
++      } while ((offset += len) < head_skb->len);
+       return segs;
+ err:
+-      while ((skb = segs)) {
+-              segs = skb->next;
+-              kfree_skb(skb);
+-      }
++      kfree_skb_list(segs);
+       return ERR_PTR(err);
+ }
+ EXPORT_SYMBOL_GPL(skb_segment);
diff --git a/queue-3.13/skbuff-skb_segment-s-skb_frag-frag.patch b/queue-3.13/skbuff-skb_segment-s-skb_frag-frag.patch
new file mode 100644 (file)
index 0000000..784c59c
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Mon, 10 Mar 2014 18:29:14 +0200
+Subject: skbuff: skb_segment: s/skb_frag/frag/
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit 4e1beba12d094c6c761ba5c49032b9b9e46380e8 ]
+
+skb_frag can in fact point at either skb
+or fskb so rename it generally "frag".
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2768,7 +2768,7 @@ struct sk_buff *skb_segment(struct sk_bu
+       struct sk_buff *segs = NULL;
+       struct sk_buff *tail = NULL;
+       struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
+-      skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
++      skb_frag_t *frag = skb_shinfo(skb)->frags;
+       unsigned int mss = skb_shinfo(skb)->gso_size;
+       unsigned int doffset = skb->data - skb_mac_header(skb);
+       unsigned int offset = doffset;
+@@ -2814,19 +2814,19 @@ struct sk_buff *skb_segment(struct sk_bu
+                       i = 0;
+                       nfrags = skb_shinfo(fskb)->nr_frags;
+-                      skb_frag = skb_shinfo(fskb)->frags;
++                      frag = skb_shinfo(fskb)->frags;
+                       pos += skb_headlen(fskb);
+                       while (pos < offset + len) {
+                               BUG_ON(i >= nfrags);
+-                              size = skb_frag_size(skb_frag);
++                              size = skb_frag_size(frag);
+                               if (pos + size > offset + len)
+                                       break;
+                               i++;
+                               pos += size;
+-                              skb_frag++;
++                              frag++;
+                       }
+                       nskb = skb_clone(fskb, GFP_ATOMIC);
+@@ -2900,7 +2900,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                               i = 0;
+                               nfrags = skb_shinfo(fskb)->nr_frags;
+-                              skb_frag = skb_shinfo(fskb)->frags;
++                              frag = skb_shinfo(fskb)->frags;
+                               BUG_ON(!nfrags);
+@@ -2915,7 +2915,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                               goto err;
+                       }
+-                      *nskb_frag = *skb_frag;
++                      *nskb_frag = *frag;
+                       __skb_frag_ref(nskb_frag);
+                       size = skb_frag_size(nskb_frag);
+@@ -2928,7 +2928,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                       if (pos + size <= offset + len) {
+                               i++;
+-                              skb_frag++;
++                              frag++;
+                               pos += size;
+                       } else {
+                               skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
diff --git a/queue-3.13/tcp-fix-get_timewait4_sock-delay-computation-on-64bit.patch b/queue-3.13/tcp-fix-get_timewait4_sock-delay-computation-on-64bit.patch
new file mode 100644 (file)
index 0000000..66e006d
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 27 Mar 2014 07:19:19 -0700
+Subject: tcp: fix get_timewait4_sock() delay computation on 64bit
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit e2a1d3e47bb904082b758dec9d07edf241c45d05 ]
+
+It seems I missed one change in get_timewait4_sock() to compute
+the remaining time before deletion of IPV4 timewait socket.
+
+This could result in wrong output in /proc/net/tcp for tm->when field.
+
+Fixes: 96f817fedec4 ("tcp: shrink tcp6_timewait_sock by one cache line")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2629,7 +2629,7 @@ static void get_timewait4_sock(const str
+ {
+       __be32 dest, src;
+       __u16 destp, srcp;
+-      long delta = tw->tw_ttd - jiffies;
++      s32 delta = tw->tw_ttd - inet_tw_time_stamp();
+       dest  = tw->tw_daddr;
+       src   = tw->tw_rcv_saddr;
diff --git a/queue-3.13/tcp-syncookies-do-not-use-getnstimeofday.patch b/queue-3.13/tcp-syncookies-do-not-use-getnstimeofday.patch
new file mode 100644 (file)
index 0000000..954fe65
--- /dev/null
@@ -0,0 +1,54 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 19 Mar 2014 21:02:21 -0700
+Subject: tcp: syncookies: do not use getnstimeofday()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 632623153196bf183a69686ed9c07eee98ff1bf8 ]
+
+While it is true that getnstimeofday() uses about 40 cycles if TSC
+is available, it can use 1600 cycles if hpet is the clocksource.
+
+Switch to get_jiffies_64(), as this is more than enough, and
+go back to 60 seconds periods.
+
+Fixes: 8c27bd75f04f ("tcp: syncookies: reduce cookie lifetime to 128 seconds")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock
+ #ifdef CONFIG_SYN_COOKIES
+ #include <linux/ktime.h>
+-/* Syncookies use a monotonic timer which increments every 64 seconds.
++/* Syncookies use a monotonic timer which increments every 60 seconds.
+  * This counter is used both as a hash input and partially encoded into
+  * the cookie value.  A cookie is only validated further if the delta
+  * between the current counter value and the encoded one is less than this,
+- * i.e. a sent cookie is valid only at most for 128 seconds (or less if
++ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
+  * the counter advances immediately after a cookie is generated).
+  */
+ #define MAX_SYNCOOKIE_AGE 2
+ static inline u32 tcp_cookie_time(void)
+ {
+-      struct timespec now;
+-      getnstimeofday(&now);
+-      return now.tv_sec >> 6; /* 64 seconds granularity */
++      u64 val = get_jiffies_64();
++
++      do_div(val, 60 * HZ);
++      return val;
+ }
+ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
diff --git a/queue-3.13/tcp-tcp_release_cb-should-release-socket-ownership.patch b/queue-3.13/tcp-tcp_release_cb-should-release-socket-ownership.patch
new file mode 100644 (file)
index 0000000..3c9e6ab
--- /dev/null
@@ -0,0 +1,122 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Mon, 10 Mar 2014 09:50:11 -0700
+Subject: tcp: tcp_release_cb() should release socket ownership
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit c3f9b01849ef3bc69024990092b9f42e20df7797 ]
+
+Lars Persson reported following deadlock :
+
+-000 |M:0x0:0x802B6AF8(asm) <-- arch_spin_lock
+-001 |tcp_v4_rcv(skb = 0x8BD527A0) <-- sk = 0x8BE6B2A0
+-002 |ip_local_deliver_finish(skb = 0x8BD527A0)
+-003 |__netif_receive_skb_core(skb = 0x8BD527A0, ?)
+-004 |netif_receive_skb(skb = 0x8BD527A0)
+-005 |elk_poll(napi = 0x8C770500, budget = 64)
+-006 |net_rx_action(?)
+-007 |__do_softirq()
+-008 |do_softirq()
+-009 |local_bh_enable()
+-010 |tcp_rcv_established(sk = 0x8BE6B2A0, skb = 0x87D3A9E0, th = 0x814EBE14, ?)
+-011 |tcp_v4_do_rcv(sk = 0x8BE6B2A0, skb = 0x87D3A9E0)
+-012 |tcp_delack_timer_handler(sk = 0x8BE6B2A0)
+-013 |tcp_release_cb(sk = 0x8BE6B2A0)
+-014 |release_sock(sk = 0x8BE6B2A0)
+-015 |tcp_sendmsg(?, sk = 0x8BE6B2A0, ?, ?)
+-016 |sock_sendmsg(sock = 0x8518C4C0, msg = 0x87D8DAA8, size = 4096)
+-017 |kernel_sendmsg(?, ?, ?, ?, size = 4096)
+-018 |smb_send_kvec()
+-019 |smb_send_rqst(server = 0x87C4D400, rqst = 0x87D8DBA0)
+-020 |cifs_call_async()
+-021 |cifs_async_writev(wdata = 0x87FD6580)
+-022 |cifs_writepages(mapping = 0x852096E4, wbc = 0x87D8DC88)
+-023 |__writeback_single_inode(inode = 0x852095D0, wbc = 0x87D8DC88)
+-024 |writeback_sb_inodes(sb = 0x87D6D800, wb = 0x87E4A9C0, work = 0x87D8DD88)
+-025 |__writeback_inodes_wb(wb = 0x87E4A9C0, work = 0x87D8DD88)
+-026 |wb_writeback(wb = 0x87E4A9C0, work = 0x87D8DD88)
+-027 |wb_do_writeback(wb = 0x87E4A9C0, force_wait = 0)
+-028 |bdi_writeback_workfn(work = 0x87E4A9CC)
+-029 |process_one_work(worker = 0x8B045880, work = 0x87E4A9CC)
+-030 |worker_thread(__worker = 0x8B045880)
+-031 |kthread(_create = 0x87CADD90)
+-032 |ret_from_kernel_thread(asm)
+
+Bug occurs because __tcp_checksum_complete_user() enables BH, assuming
+it is running from softirq context.
+
+Lars trace involved a NIC without RX checksum support but other points
+are problematic as well, like the prequeue stuff.
+
+Problem is triggered by a timer, that found socket being owned by user.
+
+tcp_release_cb() should call tcp_write_timer_handler() or
+tcp_delack_timer_handler() in the appropriate context :
+
+BH disabled and socket lock held, but 'owned' field cleared,
+as if they were running from timer handlers.
+
+Fixes: 6f458dfb4092 ("tcp: improve latencies of timer triggered events")
+Reported-by: Lars Persson <lars.persson@axis.com>
+Tested-by: Lars Persson <lars.persson@axis.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h    |    5 +++++
+ net/core/sock.c       |    5 ++++-
+ net/ipv4/tcp_output.c |   11 +++++++++++
+ 3 files changed, 20 insertions(+), 1 deletion(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1474,6 +1474,11 @@ static inline void sk_wmem_free_skb(stru
+  */
+ #define sock_owned_by_user(sk)        ((sk)->sk_lock.owned)
++static inline void sock_release_ownership(struct sock *sk)
++{
++      sk->sk_lock.owned = 0;
++}
++
+ /*
+  * Macro so as to not evaluate some arguments when
+  * lockdep is not enabled.
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2384,10 +2384,13 @@ void release_sock(struct sock *sk)
+       if (sk->sk_backlog.tail)
+               __release_sock(sk);
++      /* Warning : release_cb() might need to release sk ownership,
++       * ie call sock_release_ownership(sk) before us.
++       */
+       if (sk->sk_prot->release_cb)
+               sk->sk_prot->release_cb(sk);
+-      sk->sk_lock.owned = 0;
++      sock_release_ownership(sk);
+       if (waitqueue_active(&sk->sk_lock.wq))
+               wake_up(&sk->sk_lock.wq);
+       spin_unlock_bh(&sk->sk_lock.slock);
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -765,6 +765,17 @@ void tcp_release_cb(struct sock *sk)
+       if (flags & (1UL << TCP_TSQ_DEFERRED))
+               tcp_tsq_handler(sk);
++      /* Here begins the tricky part :
++       * We are called from release_sock() with :
++       * 1) BH disabled
++       * 2) sk_lock.slock spinlock held
++       * 3) socket owned by us (sk->sk_lock.owned == 1)
++       *
++       * But following code is meant to be called from BH handlers,
++       * so we should keep BH disabled, but early release socket ownership
++       */
++      sock_release_ownership(sk);
++
+       if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
+               tcp_write_timer_handler(sk);
+               __sock_put(sk);
diff --git a/queue-3.13/tg3-do-not-include-vlan-acceleration-features-in-vlan_features.patch b/queue-3.13/tg3-do-not-include-vlan-acceleration-features-in-vlan_features.patch
new file mode 100644 (file)
index 0000000..dca4629
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Vlad Yasevich <vyasevic@redhat.com>
+Date: Mon, 24 Mar 2014 17:52:12 -0400
+Subject: tg3: Do not include vlan acceleration features in vlan_features
+
+From: Vlad Yasevich <vyasevic@redhat.com>
+
+[ Upstream commit 51dfe7b944998eaeb2b34d314f3a6b16a5fd621b ]
+
+Including hardware acceleration features in vlan_features breaks
+stacked vlans (Q-in-Q) by marking the bottom vlan interface as
+capable of acceleration.  This causes one of the tags to be lost
+and the packets are sent with a sing vlan header.
+
+CC: Nithin Nayak Sujir <nsujir@broadcom.com>
+CC: Michael Chan <mchan@broadcom.com>
+Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17485,8 +17485,6 @@ static int tg3_init_one(struct pci_dev *
+       tg3_init_bufmgr_config(tp);
+-      features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+-
+       /* 5700 B0 chips do not support checksumming correctly due
+        * to hardware bugs.
+        */
+@@ -17518,7 +17516,8 @@ static int tg3_init_one(struct pci_dev *
+                       features |= NETIF_F_TSO_ECN;
+       }
+-      dev->features |= features;
++      dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
++                       NETIF_F_HW_VLAN_CTAG_RX;
+       dev->vlan_features |= features;
+       /*
diff --git a/queue-3.13/tipc-allow-connection-shutdown-callback-to-be-invoked.patch b/queue-3.13/tipc-allow-connection-shutdown-callback-to-be-invoked.patch
new file mode 100644 (file)
index 0000000..1d0f9d7
--- /dev/null
@@ -0,0 +1,124 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Ying Xue <ying.xue@windriver.com>
+Date: Thu, 6 Mar 2014 14:40:16 +0100
+Subject: tipc: allow connection shutdown callback to be invoked
+ in advance
+
+From: Ying Xue <ying.xue@windriver.com>
+
+[ Upstream commit 6d4ebeb4df0176b1973875840a9f7e91394c0685 ]
+
+Currently connection shutdown callback function is called when
+connection instance is released in tipc_conn_kref_release(), and
+receiving packets and sending packets are running in different
+threads. Even if connection is closed by the thread of receiving
+packets, its shutdown callback may not be called immediately as
+the connection reference count is non-zero at that moment. So,
+although the connection is shut down by the thread of receiving
+packets, the thread of sending packets doesn't know it. Before
+its shutdown callback is invoked to tell the sending thread its
+connection has been closed, the sending thread may deliver
+messages by tipc_conn_sendmsg(), this is why the following error
+information appears:
+
+"Sending subscription event failed, no memory"
+
+To eliminate it, allow connection shutdown callback function to
+be called before connection id is removed in tipc_close_conn(),
+which makes the sending thread know the truth in time that its
+socket is closed so that it doesn't send message to it. We also
+remove the "Sending XXX failed..." error reporting for topology
+and config services.
+
+Signed-off-by: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
+Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/config.c |    9 ++-------
+ net/tipc/server.c |    8 +++-----
+ net/tipc/subscr.c |    8 ++------
+ 3 files changed, 7 insertions(+), 18 deletions(-)
+
+--- a/net/tipc/config.c
++++ b/net/tipc/config.c
+@@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid
+       struct tipc_cfg_msg_hdr *req_hdr;
+       struct tipc_cfg_msg_hdr *rep_hdr;
+       struct sk_buff *rep_buf;
+-      int ret;
+       /* Validate configuration message header (ignore invalid message) */
+       req_hdr = (struct tipc_cfg_msg_hdr *)buf;
+@@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid
+               memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
+               rep_hdr->tcm_len = htonl(rep_buf->len);
+               rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
+-
+-              ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
+-                                      rep_buf->len);
+-              if (ret < 0)
+-                      pr_err("Sending cfg reply message failed, no memory\n");
+-
++              tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
++                                rep_buf->len);
+               kfree_skb(rep_buf);
+       }
+ }
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct
+ static void tipc_conn_kref_release(struct kref *kref)
+ {
+       struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+-      struct tipc_server *s = con->server;
+       if (con->sock) {
+               tipc_sock_release_local(con->sock);
+@@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struc
+       }
+       tipc_clean_outqueues(con);
+-
+-      if (con->conid)
+-              s->tipc_conn_shutdown(con->conid, con->usr_data);
+-
+       kfree(con);
+ }
+@@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_
+       struct tipc_server *s = con->server;
+       if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
++              if (con->conid)
++                      s->tipc_conn_shutdown(con->conid, con->usr_data);
++
+               spin_lock_bh(&s->idr_lock);
+               idr_remove(&s->conn_idr, con->conid);
+               s->idr_in_use--;
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -96,20 +96,16 @@ static void subscr_send_event(struct tip
+ {
+       struct tipc_subscriber *subscriber = sub->subscriber;
+       struct kvec msg_sect;
+-      int ret;
+       msg_sect.iov_base = (void *)&sub->evt;
+       msg_sect.iov_len = sizeof(struct tipc_event);
+-
+       sub->evt.event = htohl(event, sub->swap);
+       sub->evt.found_lower = htohl(found_lower, sub->swap);
+       sub->evt.found_upper = htohl(found_upper, sub->swap);
+       sub->evt.port.ref = htohl(port_ref, sub->swap);
+       sub->evt.port.node = htohl(node, sub->swap);
+-      ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
+-                              msg_sect.iov_base, msg_sect.iov_len);
+-      if (ret < 0)
+-              pr_err("Sending subscription event failed, no memory\n");
++      tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
++                        msg_sect.iov_len);
+ }
+ /**
diff --git a/queue-3.13/tipc-don-t-log-disabled-tasklet-handler-errors.patch b/queue-3.13/tipc-don-t-log-disabled-tasklet-handler-errors.patch
new file mode 100644 (file)
index 0000000..31e9210
--- /dev/null
@@ -0,0 +1,32 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Erik Hugne <erik.hugne@ericsson.com>
+Date: Thu, 6 Mar 2014 14:40:21 +0100
+Subject: tipc: don't log disabled tasklet handler errors
+
+From: Erik Hugne <erik.hugne@ericsson.com>
+
+[ Upstream commit 2892505ea170094f982516bb38105eac45f274b1 ]
+
+Failure to schedule a TIPC tasklet with tipc_k_signal because the
+tasklet handler is disabled is not an error. It means TIPC is
+currently in the process of shutting down. We remove the error
+logging in this case.
+
+Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
+Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/handler.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/tipc/handler.c
++++ b/net/tipc/handler.c
+@@ -58,7 +58,6 @@ unsigned int tipc_k_signal(Handler routi
+       spin_lock_bh(&qitem_lock);
+       if (!handler_enabled) {
+-              pr_err("Signal request ignored by handler\n");
+               spin_unlock_bh(&qitem_lock);
+               return -ENOPROTOOPT;
+       }
diff --git a/queue-3.13/tipc-drop-subscriber-connection-id-invalidation.patch b/queue-3.13/tipc-drop-subscriber-connection-id-invalidation.patch
new file mode 100644 (file)
index 0000000..f94eb63
--- /dev/null
@@ -0,0 +1,54 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Erik Hugne <erik.hugne@ericsson.com>
+Date: Thu, 6 Mar 2014 14:40:19 +0100
+Subject: tipc: drop subscriber connection id invalidation
+
+From: Erik Hugne <erik.hugne@ericsson.com>
+
+[ Upstream commit edcc0511b5ee7235282a688cd604e3ae7f9e1fc9 ]
+
+When a topology server subscriber is disconnected, the associated
+connection id is set to zero. A check vs zero is then done in the
+subscription timeout function to see if the subscriber have been
+shut down. This is unnecessary, because all subscription timers
+will be cancelled when a subscriber terminates. Setting the
+connection id to zero is actually harmful because id zero is the
+identity of the topology server listening socket, and can cause a
+race that leads to this socket being closed instead.
+
+Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/subscr.c |   11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -149,14 +149,6 @@ static void subscr_timeout(struct tipc_s
+       /* The spin lock per subscriber is used to protect its members */
+       spin_lock_bh(&subscriber->lock);
+-      /* Validate if the connection related to the subscriber is
+-       * closed (in case subscriber is terminating)
+-       */
+-      if (subscriber->conid == 0) {
+-              spin_unlock_bh(&subscriber->lock);
+-              return;
+-      }
+-
+       /* Validate timeout (in case subscription is being cancelled) */
+       if (sub->timeout == TIPC_WAIT_FOREVER) {
+               spin_unlock_bh(&subscriber->lock);
+@@ -211,9 +203,6 @@ static void subscr_release(struct tipc_s
+       spin_lock_bh(&subscriber->lock);
+-      /* Invalidate subscriber reference */
+-      subscriber->conid = 0;
+-
+       /* Destroy any existing subscriptions for subscriber */
+       list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
+                                subscription_list) {
diff --git a/queue-3.13/tipc-fix-connection-refcount-leak.patch b/queue-3.13/tipc-fix-connection-refcount-leak.patch
new file mode 100644 (file)
index 0000000..d3517d1
--- /dev/null
@@ -0,0 +1,47 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Ying Xue <ying.xue@windriver.com>
+Date: Thu, 6 Mar 2014 14:40:17 +0100
+Subject: tipc: fix connection refcount leak
+
+From: Ying Xue <ying.xue@windriver.com>
+
+[ Upstream commit 4652edb70e8a7eebbe47fa931940f65522c36e8f ]
+
+When tipc_conn_sendmsg() calls tipc_conn_lookup() to query a
+connection instance, its reference count value is increased if
+it's found. But subsequently if it's found that the connection is
+closed, the work of sending message is not queued into its server
+send workqueue, and the connection reference count is not decreased.
+This will cause a reference count leak. To reproduce this problem,
+an application would need to open and closes topology server
+connections with high intensity.
+
+We fix this by immediately decrementing the connection reference
+count if a send fails due to the connection being closed.
+
+Signed-off-by: Ying Xue <ying.xue@windriver.com>
+Acked-by: Erik Hugne <erik.hugne@ericsson.com>
+Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/server.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -427,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server
+       list_add_tail(&e->list, &con->outqueue);
+       spin_unlock_bh(&con->outqueue_lock);
+-      if (test_bit(CF_CONNECTED, &con->flags))
++      if (test_bit(CF_CONNECTED, &con->flags)) {
+               if (!queue_work(s->send_wq, &con->swork))
+                       conn_put(con);
+-
++      } else {
++              conn_put(con);
++      }
+       return 0;
+ }
diff --git a/queue-3.13/tipc-fix-memory-leak-during-module-removal.patch b/queue-3.13/tipc-fix-memory-leak-during-module-removal.patch
new file mode 100644 (file)
index 0000000..5075ddd
--- /dev/null
@@ -0,0 +1,88 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Erik Hugne <erik.hugne@ericsson.com>
+Date: Thu, 6 Mar 2014 14:40:20 +0100
+Subject: tipc: fix memory leak during module removal
+
+From: Erik Hugne <erik.hugne@ericsson.com>
+
+[ Upstream commit 1bb8dce57f4d15233688c68990852a10eb1cd79f ]
+
+When the TIPC module is removed, the tasklet handler is disabled
+before all other subsystems. This will cause lingering publications
+in the name table because the node_down tasklets responsible to
+clean up publications from an unreachable node will never run.
+When the name table is shut down, these publications are detected
+and an error message is logged:
+tipc: nametbl_stop(): orphaned hash chain detected
+This is actually a memory leak, introduced with commit
+993b858e37b3120ee76d9957a901cca22312ffaa ("tipc: correct the order
+of stopping services at rmmod")
+
+Instead of just logging an error and leaking memory, we free
+the orphaned entries during nametable shutdown.
+
+Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
+Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/name_table.c |   37 ++++++++++++++++++++++++++++++++++---
+ 1 file changed, 34 insertions(+), 3 deletions(-)
+
+--- a/net/tipc/name_table.c
++++ b/net/tipc/name_table.c
+@@ -942,20 +942,51 @@ int tipc_nametbl_init(void)
+       return 0;
+ }
++/**
++ * tipc_purge_publications - remove all publications for a given type
++ *
++ * tipc_nametbl_lock must be held when calling this function
++ */
++static void tipc_purge_publications(struct name_seq *seq)
++{
++      struct publication *publ, *safe;
++      struct sub_seq *sseq;
++      struct name_info *info;
++
++      if (!seq->sseqs) {
++              nameseq_delete_empty(seq);
++              return;
++      }
++      sseq = seq->sseqs;
++      info = sseq->info;
++      list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
++              tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
++                                       publ->ref, publ->key);
++      }
++}
++
+ void tipc_nametbl_stop(void)
+ {
+       u32 i;
++      struct name_seq *seq;
++      struct hlist_head *seq_head;
++      struct hlist_node *safe;
+       if (!table.types)
+               return;
+-      /* Verify name table is empty, then release it */
++      /* Verify name table is empty and purge any lingering
++       * publications, then release the name table
++       */
+       write_lock_bh(&tipc_nametbl_lock);
+       for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
+               if (hlist_empty(&table.types[i]))
+                       continue;
+-              pr_err("nametbl_stop(): orphaned hash chain detected\n");
+-              break;
++              seq_head = &table.types[i];
++              hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
++                      tipc_purge_publications(seq);
++              }
++              continue;
+       }
+       kfree(table.types);
+       table.types = NULL;
diff --git a/queue-3.13/tipc-fix-spinlock-recursion-bug-for-failed-subscriptions.patch b/queue-3.13/tipc-fix-spinlock-recursion-bug-for-failed-subscriptions.patch
new file mode 100644 (file)
index 0000000..36c3e7a
--- /dev/null
@@ -0,0 +1,138 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Erik Hugne <erik.hugne@ericsson.com>
+Date: Mon, 24 Mar 2014 16:56:38 +0100
+Subject: tipc: fix spinlock recursion bug for failed subscriptions
+
+From: Erik Hugne <erik.hugne@ericsson.com>
+
+[ Upstream commit a5d0e7c037119484a7006b883618bfa87996cb41 ]
+
+If a topology event subscription fails for any reason, such as out
+of memory, max number reached or because we received an invalid
+request the correct behavior is to terminate the subscribers
+connection to the topology server. This is currently broken and
+produces the following oops:
+
+[27.953662] tipc: Subscription rejected, illegal request
+[27.955329] BUG: spinlock recursion on CPU#1, kworker/u4:0/6
+[27.957066]  lock: 0xffff88003c67f408, .magic: dead4ead, .owner: kworker/u4:0/6, .owner_cpu: 1
+[27.958054] CPU: 1 PID: 6 Comm: kworker/u4:0 Not tainted 3.14.0-rc6+ #5
+[27.960230] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+[27.960874] Workqueue: tipc_rcv tipc_recv_work [tipc]
+[27.961430]  ffff88003c67f408 ffff88003de27c18 ffffffff815c0207 ffff88003de1c050
+[27.962292]  ffff88003de27c38 ffffffff815beec5 ffff88003c67f408 ffffffff817f0a8a
+[27.963152]  ffff88003de27c58 ffffffff815beeeb ffff88003c67f408 ffffffffa0013520
+[27.964023] Call Trace:
+[27.964292]  [<ffffffff815c0207>] dump_stack+0x45/0x56
+[27.964874]  [<ffffffff815beec5>] spin_dump+0x8c/0x91
+[27.965420]  [<ffffffff815beeeb>] spin_bug+0x21/0x26
+[27.965995]  [<ffffffff81083df6>] do_raw_spin_lock+0x116/0x140
+[27.966631]  [<ffffffff815c6215>] _raw_spin_lock_bh+0x15/0x20
+[27.967256]  [<ffffffffa0008540>] subscr_conn_shutdown_event+0x20/0xa0 [tipc]
+[27.968051]  [<ffffffffa000fde4>] tipc_close_conn+0xa4/0xb0 [tipc]
+[27.968722]  [<ffffffffa00101ba>] tipc_conn_terminate+0x1a/0x30 [tipc]
+[27.969436]  [<ffffffffa00089a2>] subscr_conn_msg_event+0x1f2/0x2f0 [tipc]
+[27.970209]  [<ffffffffa0010000>] tipc_receive_from_sock+0x90/0xf0 [tipc]
+[27.970972]  [<ffffffffa000fa79>] tipc_recv_work+0x29/0x50 [tipc]
+[27.971633]  [<ffffffff8105dbf5>] process_one_work+0x165/0x3e0
+[27.972267]  [<ffffffff8105e869>] worker_thread+0x119/0x3a0
+[27.972896]  [<ffffffff8105e750>] ? manage_workers.isra.25+0x2a0/0x2a0
+[27.973622]  [<ffffffff810648af>] kthread+0xdf/0x100
+[27.974168]  [<ffffffff810647d0>] ? kthread_create_on_node+0x1a0/0x1a0
+[27.974893]  [<ffffffff815ce13c>] ret_from_fork+0x7c/0xb0
+[27.975466]  [<ffffffff810647d0>] ? kthread_create_on_node+0x1a0/0x1a0
+
+The recursion occurs when subscr_terminate tries to grab the
+subscriber lock, which is already taken by subscr_conn_msg_event.
+We fix this by checking if the request to establish a new
+subscription was successful, and if not we initiate termination of
+the subscriber after we have released the subscriber lock.
+
+Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
+Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/subscr.c |   29 +++++++++++++++--------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -263,9 +263,9 @@ static void subscr_cancel(struct tipc_su
+  *
+  * Called with subscriber lock held.
+  */
+-static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+-                                           struct tipc_subscriber *subscriber)
+-{
++static int subscr_subscribe(struct tipc_subscr *s,
++                          struct tipc_subscriber *subscriber,
++                          struct tipc_subscription **sub_p) {
+       struct tipc_subscription *sub;
+       int swap;
+@@ -276,23 +276,21 @@ static struct tipc_subscription *subscr_
+       if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
+               s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
+               subscr_cancel(s, subscriber);
+-              return NULL;
++              return 0;
+       }
+       /* Refuse subscription if global limit exceeded */
+       if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+               pr_warn("Subscription rejected, limit reached (%u)\n",
+                       TIPC_MAX_SUBSCRIPTIONS);
+-              subscr_terminate(subscriber);
+-              return NULL;
++              return -EINVAL;
+       }
+       /* Allocate subscription object */
+       sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+       if (!sub) {
+               pr_warn("Subscription rejected, no memory\n");
+-              subscr_terminate(subscriber);
+-              return NULL;
++              return -ENOMEM;
+       }
+       /* Initialize subscription object */
+@@ -306,8 +304,7 @@ static struct tipc_subscription *subscr_
+           (sub->seq.lower > sub->seq.upper)) {
+               pr_warn("Subscription rejected, illegal request\n");
+               kfree(sub);
+-              subscr_terminate(subscriber);
+-              return NULL;
++              return -EINVAL;
+       }
+       INIT_LIST_HEAD(&sub->nameseq_list);
+       list_add(&sub->subscription_list, &subscriber->subscription_list);
+@@ -320,8 +317,8 @@ static struct tipc_subscription *subscr_
+                            (Handler)subscr_timeout, (unsigned long)sub);
+               k_start_timer(&sub->timer, sub->timeout);
+       }
+-
+-      return sub;
++      *sub_p = sub;
++      return 0;
+ }
+ /* Handle one termination request for the subscriber */
+@@ -335,10 +332,14 @@ static void subscr_conn_msg_event(int co
+                                 void *usr_data, void *buf, size_t len)
+ {
+       struct tipc_subscriber *subscriber = usr_data;
+-      struct tipc_subscription *sub;
++      struct tipc_subscription *sub = NULL;
+       spin_lock_bh(&subscriber->lock);
+-      sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
++      if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
++              spin_unlock_bh(&subscriber->lock);
++              subscr_terminate(subscriber);
++              return;
++      }
+       if (sub)
+               tipc_nametbl_subscribe(sub);
+       spin_unlock_bh(&subscriber->lock);
diff --git a/queue-3.13/usbnet-include-wait-queue-head-in-device-structure.patch b/queue-3.13/usbnet-include-wait-queue-head-in-device-structure.patch
new file mode 100644 (file)
index 0000000..b2eb56a
--- /dev/null
@@ -0,0 +1,142 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Oliver Neukum <oneukum@suse.de>
+Date: Wed, 26 Mar 2014 14:32:51 +0100
+Subject: usbnet: include wait queue head in device structure
+
+From: Oliver Neukum <oneukum@suse.de>
+
+[ Upstream commit 14a0d635d18d0fb552dcc979d6d25106e6541f2e ]
+
+This fixes a race which happens by freeing an object on the stack.
+Quoting Julius:
+> The issue is
+> that it calls usbnet_terminate_urbs() before that, which temporarily
+> installs a waitqueue in dev->wait in order to be able to wait on the
+> tasklet to run and finish up some queues. The waiting itself looks
+> okay, but the access to 'dev->wait' is totally unprotected and can
+> race arbitrarily. I think in this case usbnet_bh() managed to succeed
+> it's dev->wait check just before usbnet_terminate_urbs() sets it back
+> to NULL. The latter then finishes and the waitqueue_t structure on its
+> stack gets overwritten by other functions halfway through the
+> wake_up() call in usbnet_bh().
+
+The fix is to just not allocate the data structure on the stack.
+As dev->wait is abused as a flag it also takes a runtime PM change
+to fix this bug.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.de>
+Reported-by: Grant Grundler <grundler@google.com>
+Tested-by: Grant Grundler <grundler@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/usbnet.c   |   33 +++++++++++++++++++--------------
+ include/linux/usb/usbnet.h |    2 +-
+ 2 files changed, 20 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -753,14 +753,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs)
+ // precondition: never called in_interrupt
+ static void usbnet_terminate_urbs(struct usbnet *dev)
+ {
+-      DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+       DECLARE_WAITQUEUE(wait, current);
+       int temp;
+       /* ensure there are no more active urbs */
+-      add_wait_queue(&unlink_wakeup, &wait);
++      add_wait_queue(&dev->wait, &wait);
+       set_current_state(TASK_UNINTERRUPTIBLE);
+-      dev->wait = &unlink_wakeup;
+       temp = unlink_urbs(dev, &dev->txq) +
+               unlink_urbs(dev, &dev->rxq);
+@@ -774,15 +772,14 @@ static void usbnet_terminate_urbs(struct
+                                 "waited for %d urb completions\n", temp);
+       }
+       set_current_state(TASK_RUNNING);
+-      dev->wait = NULL;
+-      remove_wait_queue(&unlink_wakeup, &wait);
++      remove_wait_queue(&dev->wait, &wait);
+ }
+ int usbnet_stop (struct net_device *net)
+ {
+       struct usbnet           *dev = netdev_priv(net);
+       struct driver_info      *info = dev->driver_info;
+-      int                     retval;
++      int                     retval, pm;
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
+       netif_stop_queue (net);
+@@ -792,6 +789,8 @@ int usbnet_stop (struct net_device *net)
+                  net->stats.rx_packets, net->stats.tx_packets,
+                  net->stats.rx_errors, net->stats.tx_errors);
++      /* to not race resume */
++      pm = usb_autopm_get_interface(dev->intf);
+       /* allow minidriver to stop correctly (wireless devices to turn off
+        * radio etc) */
+       if (info->stop) {
+@@ -818,6 +817,9 @@ int usbnet_stop (struct net_device *net)
+       dev->flags = 0;
+       del_timer_sync (&dev->delay);
+       tasklet_kill (&dev->bh);
++      if (!pm)
++              usb_autopm_put_interface(dev->intf);
++
+       if (info->manage_power &&
+           !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+               info->manage_power(dev, 0);
+@@ -1438,11 +1440,12 @@ static void usbnet_bh (unsigned long par
+       /* restart RX again after disabling due to high error rate */
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+-      // waiting for all pending urbs to complete?
+-      if (dev->wait) {
+-              if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
+-                      wake_up (dev->wait);
+-              }
++      /* waiting for all pending urbs to complete?
++       * only then can we forgo submitting anew
++       */
++      if (waitqueue_active(&dev->wait)) {
++              if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
++                      wake_up_all(&dev->wait);
+       // or are we maybe short a few urbs?
+       } else if (netif_running (dev->net) &&
+@@ -1581,6 +1584,7 @@ usbnet_probe (struct usb_interface *udev
+       dev->driver_name = name;
+       dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
+                               | NETIF_MSG_PROBE | NETIF_MSG_LINK);
++      init_waitqueue_head(&dev->wait);
+       skb_queue_head_init (&dev->rxq);
+       skb_queue_head_init (&dev->txq);
+       skb_queue_head_init (&dev->done);
+@@ -1792,9 +1796,10 @@ int usbnet_resume (struct usb_interface
+               spin_unlock_irq(&dev->txq.lock);
+               if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+-                      /* handle remote wakeup ASAP */
+-                      if (!dev->wait &&
+-                              netif_device_present(dev->net) &&
++                      /* handle remote wakeup ASAP
++                       * we cannot race against stop
++                       */
++                      if (netif_device_present(dev->net) &&
+                               !timer_pending(&dev->delay) &&
+                               !test_bit(EVENT_RX_HALT, &dev->flags))
+                                       rx_alloc_submit(dev, GFP_NOIO);
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -30,7 +30,7 @@ struct usbnet {
+       struct driver_info      *driver_info;
+       const char              *driver_name;
+       void                    *driver_priv;
+-      wait_queue_head_t       *wait;
++      wait_queue_head_t       wait;
+       struct mutex            phy_mutex;
+       unsigned char           suspend_count;
+       unsigned char           pkt_cnt, pkt_err;
diff --git a/queue-3.13/vhost-fix-total-length-when-packets-are-too-short.patch b/queue-3.13/vhost-fix-total-length-when-packets-are-too-short.patch
new file mode 100644 (file)
index 0000000..8a3864a
--- /dev/null
@@ -0,0 +1,63 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Thu, 27 Mar 2014 12:00:26 +0200
+Subject: vhost: fix total length when packets are too short
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit d8316f3991d207fe32881a9ac20241be8fa2bad0 ]
+
+When mergeable buffers are disabled, and the
+incoming packet is too large for the rx buffer,
+get_rx_bufs returns success.
+
+This was intentional in order for make recvmsg
+truncate the packet and then handle_rx would
+detect err != sock_len and drop it.
+
+Unfortunately we pass the original sock_len to
+recvmsg - which means we use parts of iov not fully
+validated.
+
+Fix this up by detecting this overrun and doing packet drop
+immediately.
+
+CVE-2014-0077
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -528,6 +528,12 @@ static int get_rx_bufs(struct vhost_virt
+       *iovcount = seg;
+       if (unlikely(log))
+               *log_num = nlogs;
++
++      /* Detect overrun */
++      if (unlikely(datalen > 0)) {
++              r = UIO_MAXIOV + 1;
++              goto err;
++      }
+       return headcount;
+ err:
+       vhost_discard_vq_desc(vq, headcount);
+@@ -583,6 +589,14 @@ static void handle_rx(struct vhost_net *
+               /* On error, stop handling until the next kick. */
+               if (unlikely(headcount < 0))
+                       break;
++              /* On overrun, truncate and discard */
++              if (unlikely(headcount > UIO_MAXIOV)) {
++                      msg.msg_iovlen = 1;
++                      err = sock->ops->recvmsg(NULL, sock, &msg,
++                                               1, MSG_DONTWAIT | MSG_TRUNC);
++                      pr_debug("Discarded rx packet: len %zd\n", sock_len);
++                      continue;
++              }
+               /* OK, now we need to know about added descriptors. */
+               if (!headcount) {
+                       if (unlikely(vhost_enable_notify(&net->dev, vq))) {
diff --git a/queue-3.13/vhost-validate-vhost_get_vq_desc-return-value.patch b/queue-3.13/vhost-validate-vhost_get_vq_desc-return-value.patch
new file mode 100644 (file)
index 0000000..5c4c71b
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Thu, 27 Mar 2014 12:53:37 +0200
+Subject: vhost: validate vhost_get_vq_desc return value
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit a39ee449f96a2cd44ce056d8a0a112211a9b1a1f ]
+
+vhost fails to validate negative error code
+from vhost_get_vq_desc causing
+a crash: we are using -EFAULT which is 0xfffffff2
+as vector size, which exceeds the allocated size.
+
+The code in question was introduced in commit
+8dd014adfea6f173c1ef6378f7e5e7924866c923
+    vhost-net: mergeable buffers support
+
+CVE-2014-0055
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -501,9 +501,13 @@ static int get_rx_bufs(struct vhost_virt
+                       r = -ENOBUFS;
+                       goto err;
+               }
+-              d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
++              r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+                                     ARRAY_SIZE(vq->iov) - seg, &out,
+                                     &in, log, log_num);
++              if (unlikely(r < 0))
++                      goto err;
++
++              d = r;
+               if (d == vq->num) {
+                       r = 0;
+                       goto err;
diff --git a/queue-3.13/virtio-net-correct-error-handling-of-virtqueue_kick.patch b/queue-3.13/virtio-net-correct-error-handling-of-virtqueue_kick.patch
new file mode 100644 (file)
index 0000000..958cbf6
--- /dev/null
@@ -0,0 +1,63 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 26 Mar 2014 13:03:00 +0800
+Subject: virtio-net: correct error handling of virtqueue_kick()
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit 681daee2443291419c57cccb0671f5f94a839005 ]
+
+Current error handling of virtqueue_kick() was wrong in two places:
+- The skb were freed immediately when virtqueue_kick() fail during
+  xmit. This may lead double free since the skb was not detached from
+  the virtqueue.
+- try_fill_recv() returns false when virtqueue_kick() fail. This will
+  lead unnecessary rescheduling of refill work.
+
+Actually, it's safe to just ignore the kick failure in those two
+places. So this patch fixes this by partially revert commit
+67975901183799af8e93ec60e322f9e2a1940b9b.
+
+Fixes 67975901183799af8e93ec60e322f9e2a1940b9b
+(virtio_net: verify if virtqueue_kick() succeeded).
+
+Cc: Heinz Graalfs <graalfs@linux.vnet.ibm.com>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/virtio_net.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -633,8 +633,7 @@ static bool try_fill_recv(struct receive
+       } while (rq->vq->num_free);
+       if (unlikely(rq->num > rq->max))
+               rq->max = rq->num;
+-      if (unlikely(!virtqueue_kick(rq->vq)))
+-              return false;
++      virtqueue_kick(rq->vq);
+       return !oom;
+ }
+@@ -840,7 +839,7 @@ static netdev_tx_t start_xmit(struct sk_
+       err = xmit_skb(sq, skb);
+       /* This should not happen! */
+-      if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
++      if (unlikely(err)) {
+               dev->stats.tx_fifo_errors++;
+               if (net_ratelimit())
+                       dev_warn(&dev->dev,
+@@ -849,6 +848,7 @@ static netdev_tx_t start_xmit(struct sk_
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
++      virtqueue_kick(sq->vq);
+       /* Don't wait up for transmitted skbs to be freed. */
+       skb_orphan(skb);
diff --git a/queue-3.13/vlan-set-correct-source-mac-address-with-tx-vlan-offload-enabled.patch b/queue-3.13/vlan-set-correct-source-mac-address-with-tx-vlan-offload-enabled.patch
new file mode 100644 (file)
index 0000000..a048997
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: =?UTF-8?q?Peter=20Bostr=C3=B6m?= <peter.bostrom@netrounds.com>
+Date: Mon, 10 Mar 2014 16:17:15 +0100
+Subject: vlan: Set correct source MAC address with TX VLAN offload enabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Peter=20Bostr=C3=B6m?= <peter.bostrom@netrounds.com>
+
+[ Upstream commit dd38743b4cc2f86be250eaf156cf113ba3dd531a ]
+
+With TX VLAN offload enabled the source MAC address for frames sent using the
+VLAN interface is currently set to the address of the real interface. This is
+wrong since the VLAN interface may be configured with a different address.
+
+The bug was introduced in commit 2205369a314e12fcec4781cc73ac9c08fc2b47de
+("vlan: Fix header ops passthru when doing TX VLAN offload.").
+
+This patch sets the source address before calling the create function of the
+real interface.
+
+Signed-off-by: Peter Boström <peter.bostrom@netrounds.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/8021q/vlan_dev.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -538,6 +538,9 @@ static int vlan_passthru_hard_header(str
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+       struct net_device *real_dev = vlan->real_dev;
++      if (saddr == NULL)
++              saddr = dev->dev_addr;
++
+       return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+ }
diff --git a/queue-3.13/vlan-set-hard_header_len-according-to-available-acceleration.patch b/queue-3.13/vlan-set-hard_header_len-according-to-available-acceleration.patch
new file mode 100644 (file)
index 0000000..19b3ae0
--- /dev/null
@@ -0,0 +1,52 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Vlad Yasevich <vyasevic@redhat.com>
+Date: Wed, 26 Mar 2014 11:47:56 -0400
+Subject: vlan: Set hard_header_len according to available acceleration
+
+From: Vlad Yasevich <vyasevic@redhat.com>
+
+[ Upstream commit fc0d48b8fb449ca007b2057328abf736cb516168 ]
+
+Currently, if the card supports CTAG acceleration we do not
+account for the vlan header even if we are configuring an
+8021AD vlan.  This may not be best since we'll do software
+tagging for 8021AD which will cause data copy on skb head expansion
+Configure the length based on available hw offload capabilities and
+vlan protocol.
+
+CC: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/8021q/vlan.c     |    4 +++-
+ net/8021q/vlan_dev.c |    3 ++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net
+ static void vlan_transfer_features(struct net_device *dev,
+                                  struct net_device *vlandev)
+ {
++      struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
++
+       vlandev->gso_max_size = dev->gso_max_size;
+-      if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
++      if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
+               vlandev->hard_header_len = dev->hard_header_len;
+       else
+               vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -592,7 +592,8 @@ static int vlan_dev_init(struct net_devi
+ #endif
+       dev->needed_headroom = real_dev->needed_headroom;
+-      if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
++      if (vlan_hw_offload_capable(real_dev->features,
++                                  vlan_dev_priv(dev)->vlan_proto)) {
+               dev->header_ops      = &vlan_passthru_header_ops;
+               dev->hard_header_len = real_dev->hard_header_len;
+       } else {
diff --git a/queue-3.13/vxlan-fix-nonfunctional-neigh_reduce.patch b/queue-3.13/vxlan-fix-nonfunctional-neigh_reduce.patch
new file mode 100644 (file)
index 0000000..eebcd69
--- /dev/null
@@ -0,0 +1,208 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: David Stevens <dlstevens@us.ibm.com>
+Date: Mon, 24 Mar 2014 10:39:58 -0400
+Subject: vxlan: fix nonfunctional neigh_reduce()
+
+From: David Stevens <dlstevens@us.ibm.com>
+
+[ Upstream commit 4b29dba9c085a4fb79058fb1c45a2f6257ca3dfa ]
+
+The VXLAN neigh_reduce() code is completely non-functional since
+check-in. Specific errors:
+
+1) The original code drops all packets with a multicast destination address,
+       even though neighbor solicitations are sent to the solicited-node
+       address, a multicast address. The code after this check was never run.
+2) The neighbor table lookup used the IPv6 header destination, which is the
+       solicited node address, rather than the target address from the
+       neighbor solicitation. So neighbor lookups would always fail if it
+       got this far. Also for L3MISSes.
+3) The code calls ndisc_send_na(), which does a send on the tunnel device.
+       The context for neigh_reduce() is the transmit path, vxlan_xmit(),
+       where the host or a bridge-attached neighbor is trying to transmit
+       a neighbor solicitation. To respond to it, the tunnel endpoint needs
+       to do a *receive* of the appropriate neighbor advertisement. Doing a
+       send, would only try to send the advertisement, encapsulated, to the
+       remote destinations in the fdb -- hosts that definitely did not do the
+       corresponding solicitation.
+4) The code uses the tunnel endpoint IPv6 forwarding flag to determine the
+       isrouter flag in the advertisement. This has nothing to do with whether
+       or not the target is a router, and generally won't be set since the
+       tunnel endpoint is bridging, not routing, traffic.
+
+       The patch below creates a proxy neighbor advertisement to respond to
+neighbor solicitions as intended, providing proper IPv6 support for neighbor
+reduction.
+
+Signed-off-by: David L Stevens <dlstevens@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c |  127 ++++++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 113 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1232,15 +1232,103 @@ out:
+ }
+ #if IS_ENABLED(CONFIG_IPV6)
++
++static struct sk_buff *vxlan_na_create(struct sk_buff *request,
++      struct neighbour *n, bool isrouter)
++{
++      struct net_device *dev = request->dev;
++      struct sk_buff *reply;
++      struct nd_msg *ns, *na;
++      struct ipv6hdr *pip6;
++      u8 *daddr;
++      int na_olen = 8; /* opt hdr + ETH_ALEN for target */
++      int ns_olen;
++      int i, len;
++
++      if (dev == NULL)
++              return NULL;
++
++      len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
++              sizeof(*na) + na_olen + dev->needed_tailroom;
++      reply = alloc_skb(len, GFP_ATOMIC);
++      if (reply == NULL)
++              return NULL;
++
++      reply->protocol = htons(ETH_P_IPV6);
++      reply->dev = dev;
++      skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
++      skb_push(reply, sizeof(struct ethhdr));
++      skb_set_mac_header(reply, 0);
++
++      ns = (struct nd_msg *)skb_transport_header(request);
++
++      daddr = eth_hdr(request)->h_source;
++      ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
++      for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
++              if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
++                      daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
++                      break;
++              }
++      }
++
++      /* Ethernet header */
++      memcpy(eth_hdr(reply)->h_dest, daddr, ETH_ALEN);
++      memcpy(eth_hdr(reply)->h_source, n->ha, ETH_ALEN);
++      eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
++      reply->protocol = htons(ETH_P_IPV6);
++
++      skb_pull(reply, sizeof(struct ethhdr));
++      skb_set_network_header(reply, 0);
++      skb_put(reply, sizeof(struct ipv6hdr));
++
++      /* IPv6 header */
++
++      pip6 = ipv6_hdr(reply);
++      memset(pip6, 0, sizeof(struct ipv6hdr));
++      pip6->version = 6;
++      pip6->priority = ipv6_hdr(request)->priority;
++      pip6->nexthdr = IPPROTO_ICMPV6;
++      pip6->hop_limit = 255;
++      pip6->daddr = ipv6_hdr(request)->saddr;
++      pip6->saddr = *(struct in6_addr *)n->primary_key;
++
++      skb_pull(reply, sizeof(struct ipv6hdr));
++      skb_set_transport_header(reply, 0);
++
++      na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
++
++      /* Neighbor Advertisement */
++      memset(na, 0, sizeof(*na)+na_olen);
++      na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
++      na->icmph.icmp6_router = isrouter;
++      na->icmph.icmp6_override = 1;
++      na->icmph.icmp6_solicited = 1;
++      na->target = ns->target;
++      memcpy(&na->opt[2], n->ha, ETH_ALEN);
++      na->opt[0] = ND_OPT_TARGET_LL_ADDR;
++      na->opt[1] = na_olen >> 3;
++
++      na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
++              &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
++              csum_partial(na, sizeof(*na)+na_olen, 0));
++
++      pip6->payload_len = htons(sizeof(*na)+na_olen);
++
++      skb_push(reply, sizeof(struct ipv6hdr));
++
++      reply->ip_summed = CHECKSUM_UNNECESSARY;
++
++      return reply;
++}
++
+ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ {
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+-      struct neighbour *n;
+-      union vxlan_addr ipa;
++      struct nd_msg *msg;
+       const struct ipv6hdr *iphdr;
+       const struct in6_addr *saddr, *daddr;
+-      struct nd_msg *msg;
+-      struct inet6_dev *in6_dev = NULL;
++      struct neighbour *n;
++      struct inet6_dev *in6_dev;
+       in6_dev = __in6_dev_get(dev);
+       if (!in6_dev)
+@@ -1253,19 +1341,20 @@ static int neigh_reduce(struct net_devic
+       saddr = &iphdr->saddr;
+       daddr = &iphdr->daddr;
+-      if (ipv6_addr_loopback(daddr) ||
+-          ipv6_addr_is_multicast(daddr))
+-              goto out;
+-
+       msg = (struct nd_msg *)skb_transport_header(skb);
+       if (msg->icmph.icmp6_code != 0 ||
+           msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+               goto out;
+-      n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
++      if (ipv6_addr_loopback(daddr) ||
++          ipv6_addr_is_multicast(&msg->target))
++              goto out;
++
++      n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
+       if (n) {
+               struct vxlan_fdb *f;
++              struct sk_buff *reply;
+               if (!(n->nud_state & NUD_CONNECTED)) {
+                       neigh_release(n);
+@@ -1279,13 +1368,23 @@ static int neigh_reduce(struct net_devic
+                       goto out;
+               }
+-              ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
+-                                       !!in6_dev->cnf.forwarding,
+-                                       true, false, false);
++              reply = vxlan_na_create(skb, n,
++                                      !!(f ? f->flags & NTF_ROUTER : 0));
++
+               neigh_release(n);
++
++              if (reply == NULL)
++                      goto out;
++
++              if (netif_rx_ni(reply) == NET_RX_DROP)
++                      dev->stats.rx_dropped++;
++
+       } else if (vxlan->flags & VXLAN_F_L3MISS) {
+-              ipa.sin6.sin6_addr = *daddr;
+-              ipa.sa.sa_family = AF_INET6;
++              union vxlan_addr ipa = {
++                      .sin6.sin6_addr = msg->target,
++                      .sa.sa_family = AF_INET6,
++              };
++
+               vxlan_ip_miss(dev, &ipa);
+       }
diff --git a/queue-3.13/vxlan-fix-potential-null-dereference-in-arp_reduce.patch b/queue-3.13/vxlan-fix-potential-null-dereference-in-arp_reduce.patch
new file mode 100644 (file)
index 0000000..5bbbb72
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: David Stevens <dlstevens@us.ibm.com>
+Date: Tue, 18 Mar 2014 12:32:29 -0400
+Subject: vxlan: fix potential NULL dereference in arp_reduce()
+
+From: David Stevens <dlstevens@us.ibm.com>
+
+[ Upstream commit 7346135dcd3f9b57f30a5512094848c678d7143e ]
+
+This patch fixes a NULL pointer dereference in the event of an
+skb allocation failure in arp_reduce().
+
+Signed-Off-By: David L Stevens <dlstevens@us.ibm.com>
+Acked-by: Cong Wang <cwang@twopensource.com>
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1208,6 +1208,9 @@ static int arp_reduce(struct net_device
+               neigh_release(n);
++              if (reply == NULL)
++                      goto out;
++
+               skb_reset_mac_header(reply);
+               __skb_pull(reply, skb_network_offset(reply));
+               reply->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/queue-3.13/xen-netback-disable-rogue-vif-in-kthread-context.patch b/queue-3.13/xen-netback-disable-rogue-vif-in-kthread-context.patch
new file mode 100644 (file)
index 0000000..0b3df23
--- /dev/null
@@ -0,0 +1,128 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Tue, 1 Apr 2014 12:46:12 +0100
+Subject: xen-netback: disable rogue vif in kthread context
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wei Liu <wei.liu2@citrix.com>
+
+[ Upstream commit e9d8b2c2968499c1f96563e6522c56958d5a1d0d ]
+
+When netback discovers frontend is sending malformed packet it will
+disables the interface which serves that frontend.
+
+However disabling a network interface involving taking a mutex which
+cannot be done in softirq context, so we need to defer this process to
+kthread context.
+
+This patch does the following:
+1. introduce a flag to indicate the interface is disabled.
+2. check that flag in TX path, don't do any work if it's true.
+3. check that flag in RX path, turn off that interface if it's true.
+
+The reason to disable it in RX path is because RX uses kthread. After
+this change the behavior of netback is still consistent -- it won't do
+any TX work for a rogue frontend, and the interface will be eventually
+turned off.
+
+Also change a "continue" to "break" after xenvif_fatal_tx_err, as it
+doesn't make sense to continue processing packets if frontend is rogue.
+
+This is a fix for XSA-90.
+
+Reported-by: Török Edwin <edwin@etorok.net>
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Cc: Ian Campbell <ian.campbell@citrix.com>
+Reviewed-by: David Vrabel <david.vrabel@citrix.com>
+Acked-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/common.h    |    5 +++++
+ drivers/net/xen-netback/interface.c |   11 +++++++++++
+ drivers/net/xen-netback/netback.c   |   16 ++++++++++++++--
+ 3 files changed, 30 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -113,6 +113,11 @@ struct xenvif {
+       domid_t          domid;
+       unsigned int     handle;
++      /* Is this interface disabled? True when backend discovers
++       * frontend is rogue.
++       */
++      bool disabled;
++
+       /* Use NAPI for guest TX */
+       struct napi_struct napi;
+       /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -67,6 +67,15 @@ static int xenvif_poll(struct napi_struc
+       struct xenvif *vif = container_of(napi, struct xenvif, napi);
+       int work_done;
++      /* This vif is rogue, we pretend we've there is nothing to do
++       * for this vif to deschedule it from NAPI. But this interface
++       * will be turned off in thread context later.
++       */
++      if (unlikely(vif->disabled)) {
++              napi_complete(napi);
++              return 0;
++      }
++
+       work_done = xenvif_tx_action(vif, budget);
+       if (work_done < budget) {
+@@ -323,6 +332,8 @@ struct xenvif *xenvif_alloc(struct devic
+       vif->ip_csum = 1;
+       vif->dev = dev;
++      vif->disabled = false;
++
+       vif->credit_bytes = vif->remaining_credit = ~0UL;
+       vif->credit_usec  = 0UL;
+       init_timer(&vif->credit_timeout);
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -752,7 +752,8 @@ static void xenvif_tx_err(struct xenvif
+ static void xenvif_fatal_tx_err(struct xenvif *vif)
+ {
+       netdev_err(vif->dev, "fatal error; disabling device\n");
+-      xenvif_carrier_off(vif);
++      vif->disabled = true;
++      xenvif_kick_thread(vif);
+ }
+ static int xenvif_count_requests(struct xenvif *vif,
+@@ -1479,7 +1480,7 @@ static unsigned xenvif_tx_build_gops(str
+                                  vif->tx.sring->req_prod, vif->tx.req_cons,
+                                  XEN_NETIF_TX_RING_SIZE);
+                       xenvif_fatal_tx_err(vif);
+-                      continue;
++                      break;
+               }
+               work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
+@@ -1873,7 +1874,18 @@ int xenvif_kthread(void *data)
+       while (!kthread_should_stop()) {
+               wait_event_interruptible(vif->wq,
+                                        rx_work_todo(vif) ||
++                                       vif->disabled ||
+                                        kthread_should_stop());
++
++              /* This frontend is found to be rogue, disable it in
++               * kthread context. Currently this is only set when
++               * netback finds out frontend sends malformed packet,
++               * but we cannot disable the interface in softirq
++               * context so we defer it here.
++               */
++              if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
++                      xenvif_carrier_off(vif);
++
+               if (kthread_should_stop())
+                       break;
diff --git a/queue-3.13/xen-netback-fix-issue-caused-by-using-gso_type-wrongly.patch b/queue-3.13/xen-netback-fix-issue-caused-by-using-gso_type-wrongly.patch
new file mode 100644 (file)
index 0000000..ab34e0d
--- /dev/null
@@ -0,0 +1,102 @@
+From foo@baz Thu Apr 10 22:03:04 PDT 2014
+From: Annie Li <annie.li@oracle.com>
+Date: Mon, 10 Mar 2014 22:58:34 +0800
+Subject: Xen-netback: Fix issue caused by using gso_type wrongly
+
+From: Annie Li <annie.li@oracle.com>
+
+[ Upstream commit 5bd076708664313f2bdbbc1cf71093313b7774a1 ]
+
+Current netback uses gso_type to check whether the skb contains
+gso offload, and this is wrong. Gso_size is the right one to
+check gso existence, and gso_type is only used to check gso type.
+
+Some skbs contains nonzero gso_type and zero gso_size, current
+netback would treat these skbs as gso and create wrong response
+for this. This also causes ssh failure to domu from other server.
+
+V2: use skb_is_gso function as Paul Durrant suggested
+
+Signed-off-by: Annie Li <annie.li@oracle.com>
+Acked-by: Wei Liu <wei.liu2@citrix.com>
+Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c |   34 +++++++++++++++-------------------
+ 1 file changed, 15 insertions(+), 19 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -338,7 +338,7 @@ static void xenvif_gop_frag_copy(struct
+       struct gnttab_copy *copy_gop;
+       struct xenvif_rx_meta *meta;
+       unsigned long bytes;
+-      int gso_type;
++      int gso_type = XEN_NETIF_GSO_TYPE_NONE;
+       /* Data must not cross a page boundary. */
+       BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
+@@ -397,12 +397,12 @@ static void xenvif_gop_frag_copy(struct
+               }
+               /* Leave a gap for the GSO descriptor. */
+-              if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+-                      gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+-              else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+-                      gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+-              else
+-                      gso_type = XEN_NETIF_GSO_TYPE_NONE;
++              if (skb_is_gso(skb)) {
++                      if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
++                              gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
++                      else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
++                              gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
++              }
+               if (*head && ((1 << gso_type) & vif->gso_mask))
+                       vif->rx.req_cons++;
+@@ -436,19 +436,15 @@ static int xenvif_gop_skb(struct sk_buff
+       int head = 1;
+       int old_meta_prod;
+       int gso_type;
+-      int gso_size;
+       old_meta_prod = npo->meta_prod;
+-      if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+-              gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+-              gso_size = skb_shinfo(skb)->gso_size;
+-      } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+-              gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+-              gso_size = skb_shinfo(skb)->gso_size;
+-      } else {
+-              gso_type = XEN_NETIF_GSO_TYPE_NONE;
+-              gso_size = 0;
++      gso_type = XEN_NETIF_GSO_TYPE_NONE;
++      if (skb_is_gso(skb)) {
++              if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
++                      gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
++              else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
++                      gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+       }
+       /* Set up a GSO prefix descriptor, if necessary */
+@@ -456,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff
+               req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+               meta = npo->meta + npo->meta_prod++;
+               meta->gso_type = gso_type;
+-              meta->gso_size = gso_size;
++              meta->gso_size = skb_shinfo(skb)->gso_size;
+               meta->size = 0;
+               meta->id = req->id;
+       }
+@@ -466,7 +462,7 @@ static int xenvif_gop_skb(struct sk_buff
+       if ((1 << gso_type) & vif->gso_mask) {
+               meta->gso_type = gso_type;
+-              meta->gso_size = gso_size;
++              meta->gso_size = skb_shinfo(skb)->gso_size;
+       } else {
+               meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
+               meta->gso_size = 0;
diff --git a/queue-3.13/xen-netback-remove-pointless-clause-from-if-statement.patch b/queue-3.13/xen-netback-remove-pointless-clause-from-if-statement.patch
new file mode 100644 (file)
index 0000000..71a0811
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Thu Apr 10 22:03:05 PDT 2014
+From: Paul Durrant <Paul.Durrant@citrix.com>
+Date: Fri, 28 Mar 2014 11:39:05 +0000
+Subject: xen-netback: remove pointless clause from if statement
+
+From: Paul Durrant <Paul.Durrant@citrix.com>
+
+[ Upstream commit 0576eddf24df716d8570ef8ca11452a9f98eaab2 ]
+
+This patch removes a test in start_new_rx_buffer() that checks whether
+a copy operation is less than MAX_BUFFER_OFFSET in length, since
+MAX_BUFFER_OFFSET is defined to be PAGE_SIZE and the only caller of
+start_new_rx_buffer() already limits copy operations to PAGE_SIZE or less.
+
+Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
+Cc: Ian Campbell <ian.campbell@citrix.com>
+Cc: Wei Liu <wei.liu2@citrix.com>
+Cc: Sander Eikelenboom <linux@eikelenboom.it>
+Reported-By: Sander Eikelenboom <linux@eikelenboom.it>
+Tested-By: Sander Eikelenboom <linux@eikelenboom.it>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -203,8 +203,8 @@ static bool start_new_rx_buffer(int offs
+        * into multiple copies tend to give large frags their
+        * own buffers as before.
+        */
+-      if ((offset + size > MAX_BUFFER_OFFSET) &&
+-          (size <= MAX_BUFFER_OFFSET) && offset && !head)
++      BUG_ON(size > MAX_BUFFER_OFFSET);
++      if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+               return true;
+       return false;