From 6f0284d1aff700e6b7eee9b98c0f0cab6771d5c7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 13 Feb 2013 10:24:04 -0800 Subject: [PATCH] 3.0-stable patches added patches: netback-correct-netbk_tx_err-to-handle-wrap-around.patch xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch xen-netback-shutdown-the-ring-if-it-contains-garbage.patch --- ...t-netbk_tx_err-to-handle-wrap-around.patch | 29 ++ queue-3.0/series | 4 + ...on-failure-in-xen_netbk_tx_check_gop.patch | 135 ++++++++++ ...on-failure-in-xen_netbk_get_requests.patch | 46 ++++ ...down-the-ring-if-it-contains-garbage.patch | 250 ++++++++++++++++++ 5 files changed, 464 insertions(+) create mode 100644 queue-3.0/netback-correct-netbk_tx_err-to-handle-wrap-around.patch create mode 100644 queue-3.0/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch create mode 100644 queue-3.0/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch create mode 100644 queue-3.0/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch diff --git a/queue-3.0/netback-correct-netbk_tx_err-to-handle-wrap-around.patch b/queue-3.0/netback-correct-netbk_tx_err-to-handle-wrap-around.patch new file mode 100644 index 00000000000..7fafcdce76f --- /dev/null +++ b/queue-3.0/netback-correct-netbk_tx_err-to-handle-wrap-around.patch @@ -0,0 +1,29 @@ +From 60ff7c97af22a21c483b4df993cc8c249929784c Mon Sep 17 00:00:00 2001 +From: Ian Campbell +Date: Wed, 6 Feb 2013 23:41:38 +0000 +Subject: netback: correct netbk_tx_err to handle wrap around. + + +From: Ian Campbell + +[ Upstream commit b9149729ebdcfce63f853aa54a404c6a8f6ebbf3 ] + +Signed-off-by: Ian Campbell +Acked-by: Jan Beulich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/netback.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -839,7 +839,7 @@ static void netbk_tx_err(struct xenvif * + + do { + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); +- if (cons >= end) ++ if (cons == end) + break; + txp = RING_GET_REQUEST(&vif->tx, cons++); + } while (1); diff --git a/queue-3.0/series b/queue-3.0/series index 3a5ffa3233a..8d9477dbaa6 100644 --- a/queue-3.0/series +++ b/queue-3.0/series @@ -20,3 +20,7 @@ tcp-fix-msg_sendpage_notlast-logic.patch bridge-pull-ip-header-into-skb-data-before-looking-into-ip-header.patch tg3-avoid-null-pointer-dereference-in-tg3_interrupt-in-netconsole-mode.patch tg3-fix-crc-errors-on-jumbo-frame-receive.patch +xen-netback-shutdown-the-ring-if-it-contains-garbage.patch +xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch +xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch +netback-correct-netbk_tx_err-to-handle-wrap-around.patch diff --git a/queue-3.0/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch b/queue-3.0/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch new file mode 100644 index 00000000000..19f9fceb9b3 --- /dev/null +++ b/queue-3.0/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch @@ -0,0 +1,135 @@ +From b284e80e04b7181353963ed98a6ae5b0eb24672f Mon Sep 17 00:00:00 2001 +From: Matthew Daley +Date: Wed, 6 Feb 2013 23:41:36 +0000 +Subject: xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop. + + +From: Matthew Daley + +[ Upstream commit 7d5145d8eb2b9791533ffe4dc003b129b9696c48 ] + +Signed-off-by: Matthew Daley +Reviewed-by: Konrad Rzeszutek Wilk +Acked-by: Ian Campbell +Acked-by: Jan Beulich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/netback.c | 40 +++++++++++++------------------------- + 1 file changed, 14 insertions(+), 26 deletions(-) + +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -143,7 +143,8 @@ void xen_netbk_remove_xenvif(struct xenv + atomic_dec(&netbk->netfront_count); + } + +-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); ++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, ++ u8 status); + static void make_tx_response(struct xenvif *vif, + struct xen_netif_tx_request *txp, + s8 st); +@@ -968,30 +969,20 @@ static int xen_netbk_tx_check_gop(struct + { + struct gnttab_copy *gop = *gopp; + int pending_idx = *((u16 *)skb->data); +- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; +- struct xenvif *vif = pending_tx_info[pending_idx].vif; +- struct xen_netif_tx_request *txp; + struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; + int i, err, start; + + /* Check status of header. */ + err = gop->status; +- if (unlikely(err)) { +- pending_ring_idx_t index; +- index = pending_index(netbk->pending_prod++); +- txp = &pending_tx_info[pending_idx].req; +- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); +- netbk->pending_ring[index] = pending_idx; +- xenvif_put(vif); +- } ++ if (unlikely(err)) ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + + /* Skip first skb fragment if it is on same page as header fragment. */ +- start = ((unsigned long)shinfo->frags[0].page == pending_idx); ++ start = ((unsigned long)shinfo->frags[i].page == pending_idx); + + for (i = start; i < nr_frags; i++) { + int j, newerr; +- pending_ring_idx_t index; + + pending_idx = (unsigned long)shinfo->frags[i].page; + +@@ -1000,16 +991,12 @@ static int xen_netbk_tx_check_gop(struct + if (likely(!newerr)) { + /* Had a previous error? Invalidate this fragment. */ + if (unlikely(err)) +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + continue; + } + + /* Error on this fragment: respond to client with an error. */ +- txp = &netbk->pending_tx_info[pending_idx].req; +- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); +- index = pending_index(netbk->pending_prod++); +- netbk->pending_ring[index] = pending_idx; +- xenvif_put(vif); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + + /* Not the first error? Preceding frags already invalidated. */ + if (err) +@@ -1017,10 +1004,10 @@ static int xen_netbk_tx_check_gop(struct + + /* First error: invalidate header and preceding fragments. */ + pending_idx = *((u16 *)skb->data); +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + for (j = start; j < i; j++) { + pending_idx = (unsigned long)shinfo->frags[i].page; +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + } + + /* Remember the error: invalidate all subsequent fragments. */ +@@ -1055,7 +1042,7 @@ static void xen_netbk_fill_frags(struct + + /* Take an extra reference to offset xen_netbk_idx_release */ + get_page(netbk->mmap_pages[pending_idx]); +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + } + } + +@@ -1440,7 +1427,7 @@ static void xen_netbk_tx_submit(struct x + txp->size -= data_len; + } else { + /* Schedule a response immediately. */ +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + } + + if (txp->flags & XEN_NETTXF_csum_blank) +@@ -1495,7 +1482,8 @@ static void xen_netbk_tx_action(struct x + + } + +-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) ++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, ++ u8 status) + { + struct xenvif *vif; + struct pending_tx_info *pending_tx_info; +@@ -1509,7 +1497,7 @@ static void xen_netbk_idx_release(struct + + vif = pending_tx_info->vif; + +- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); ++ make_tx_response(vif, &pending_tx_info->req, status); + + index = pending_index(netbk->pending_prod++); + netbk->pending_ring[index] = pending_idx; diff --git a/queue-3.0/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch b/queue-3.0/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch new file mode 100644 index 00000000000..7764cc23a24 --- /dev/null +++ b/queue-3.0/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch @@ -0,0 +1,46 @@ +From 1cdaab9fdd9884c696256e68a6ca21c07e8dfef9 Mon Sep 17 00:00:00 2001 +From: Ian Campbell +Date: Wed, 6 Feb 2013 23:41:37 +0000 +Subject: xen/netback: free already allocated memory on failure in xen_netbk_get_requests + + +From: Ian Campbell + +[ Upstream commit 4cc7c1cb7b11b6f3515bd9075527576a1eecc4aa ] + +Signed-off-by: Ian Campbell +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/netback.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -937,7 +937,7 @@ static struct gnttab_copy *xen_netbk_get + pending_idx = netbk->pending_ring[index]; + page = xen_netbk_alloc_page(netbk, skb, pending_idx); + if (!page) +- return NULL; ++ goto err; + + netbk->mmap_pages[pending_idx] = page; + +@@ -961,6 +961,17 @@ static struct gnttab_copy *xen_netbk_get + } + + return gop; ++err: ++ /* Unwind, freeing all pages and sending error responses. */ ++ while (i-- > start) { ++ xen_netbk_idx_release(netbk, (unsigned long)shinfo->frags[i].page, ++ XEN_NETIF_RSP_ERROR); ++ } ++ /* The head too, if necessary. */ ++ if (start) ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); ++ ++ return NULL; + } + + static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, diff --git a/queue-3.0/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch b/queue-3.0/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch new file mode 100644 index 00000000000..519338f5190 --- /dev/null +++ b/queue-3.0/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch @@ -0,0 +1,250 @@ +From cd670b23c148db94a7f8e3d2424aa2f51f5aac21 Mon Sep 17 00:00:00 2001 +From: Ian Campbell +Date: Wed, 6 Feb 2013 23:41:35 +0000 +Subject: xen/netback: shutdown the ring if it contains garbage. + + +From: Ian Campbell + +[ Upstream commit 48856286b64e4b66ec62b94e504d0b29c1ade664 ] + +A buggy or malicious frontend should not be able to confuse netback. +If we spot anything which is not as it should be then shutdown the +device and don't try to continue with the ring in a potentially +hostile state. Well behaved and non-hostile frontends will not be +penalised. + +As well as making the existing checks for such errors fatal also add a +new check that ensures that there isn't an insane number of requests +on the ring (i.e. more than would fit in the ring). If the ring +contains garbage then previously is was possible to loop over this +insane number, getting an error each time and therefore not generating +any more pending requests and therefore not exiting the loop in +xen_netbk_tx_build_gops for an externded period. + +Also turn various netdev_dbg calls which no precipitate a fatal error +into netdev_err, they are rate limited because the device is shutdown +afterwards. + +This fixes at least one known DoS/softlockup of the backend domain. + +Signed-off-by: Ian Campbell +Reviewed-by: Konrad Rzeszutek Wilk +Acked-by: Jan Beulich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/common.h | 3 + + drivers/net/xen-netback/interface.c | 23 ++++++++----- + drivers/net/xen-netback/netback.c | 62 ++++++++++++++++++++++++++---------- + 3 files changed, 62 insertions(+), 26 deletions(-) + +--- a/drivers/net/xen-netback/common.h ++++ b/drivers/net/xen-netback/common.h +@@ -152,6 +152,9 @@ void xen_netbk_queue_tx_skb(struct xenvi + /* Notify xenvif that ring now has space to send an skb to the frontend */ + void xenvif_notify_tx_completion(struct xenvif *vif); + ++/* Prevent the device from generating any further traffic. */ ++void xenvif_carrier_off(struct xenvif *vif); ++ + /* Returns number of ring slots required to send an skb to the frontend */ + unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); + +--- a/drivers/net/xen-netback/interface.c ++++ b/drivers/net/xen-netback/interface.c +@@ -342,17 +342,22 @@ err: + return err; + } + +-void xenvif_disconnect(struct xenvif *vif) ++void xenvif_carrier_off(struct xenvif *vif) + { + struct net_device *dev = vif->dev; +- if (netif_carrier_ok(dev)) { +- rtnl_lock(); +- netif_carrier_off(dev); /* discard queued packets */ +- if (netif_running(dev)) +- xenvif_down(vif); +- rtnl_unlock(); +- xenvif_put(vif); +- } ++ ++ rtnl_lock(); ++ netif_carrier_off(dev); /* discard queued packets */ ++ if (netif_running(dev)) ++ xenvif_down(vif); ++ rtnl_unlock(); ++ xenvif_put(vif); ++} ++ ++void xenvif_disconnect(struct xenvif *vif) ++{ ++ if (netif_carrier_ok(vif->dev)) ++ xenvif_carrier_off(vif); + + atomic_dec(&vif->refcnt); + wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -847,6 +847,13 @@ static void netbk_tx_err(struct xenvif * + xenvif_put(vif); + } + ++static void netbk_fatal_tx_err(struct xenvif *vif) ++{ ++ netdev_err(vif->dev, "fatal error; disabling device\n"); ++ xenvif_carrier_off(vif); ++ xenvif_put(vif); ++} ++ + static int netbk_count_requests(struct xenvif *vif, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txp, +@@ -860,19 +867,22 @@ static int netbk_count_requests(struct x + + do { + if (frags >= work_to_do) { +- netdev_dbg(vif->dev, "Need more frags\n"); ++ netdev_err(vif->dev, "Need more frags\n"); ++ netbk_fatal_tx_err(vif); + return -frags; + } + + if (unlikely(frags >= MAX_SKB_FRAGS)) { +- netdev_dbg(vif->dev, "Too many frags\n"); ++ netdev_err(vif->dev, "Too many frags\n"); ++ netbk_fatal_tx_err(vif); + return -frags; + } + + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), + sizeof(*txp)); + if (txp->size > first->size) { +- netdev_dbg(vif->dev, "Frags galore\n"); ++ netdev_err(vif->dev, "Frag is bigger than frame.\n"); ++ netbk_fatal_tx_err(vif); + return -frags; + } + +@@ -880,8 +890,9 @@ static int netbk_count_requests(struct x + frags++; + + if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { +- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", ++ netdev_err(vif->dev, "txp->offset: %x, size: %u\n", + txp->offset, txp->size); ++ netbk_fatal_tx_err(vif); + return -frags; + } + } while ((txp++)->flags & XEN_NETTXF_more_data); +@@ -1057,7 +1068,8 @@ static int xen_netbk_get_extras(struct x + + do { + if (unlikely(work_to_do-- <= 0)) { +- netdev_dbg(vif->dev, "Missing extra info\n"); ++ netdev_err(vif->dev, "Missing extra info\n"); ++ netbk_fatal_tx_err(vif); + return -EBADR; + } + +@@ -1066,8 +1078,9 @@ static int xen_netbk_get_extras(struct x + if (unlikely(!extra.type || + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + vif->tx.req_cons = ++cons; +- netdev_dbg(vif->dev, ++ netdev_err(vif->dev, + "Invalid extra type: %d\n", extra.type); ++ netbk_fatal_tx_err(vif); + return -EINVAL; + } + +@@ -1083,13 +1096,15 @@ static int netbk_set_skb_gso(struct xenv + struct xen_netif_extra_info *gso) + { + if (!gso->u.gso.size) { +- netdev_dbg(vif->dev, "GSO size must not be zero.\n"); ++ netdev_err(vif->dev, "GSO size must not be zero.\n"); ++ netbk_fatal_tx_err(vif); + return -EINVAL; + } + + /* Currently only TCPv4 S.O. is supported. */ + if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { +- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); ++ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); ++ netbk_fatal_tx_err(vif); + return -EINVAL; + } + +@@ -1226,9 +1241,25 @@ static unsigned xen_netbk_tx_build_gops( + + /* Get a netif from the list with work to do. */ + vif = poll_net_schedule_list(netbk); ++ /* This can sometimes happen because the test of ++ * list_empty(net_schedule_list) at the top of the ++ * loop is unlocked. Just go back and have another ++ * look. ++ */ + if (!vif) + continue; + ++ if (vif->tx.sring->req_prod - vif->tx.req_cons > ++ XEN_NETIF_TX_RING_SIZE) { ++ netdev_err(vif->dev, ++ "Impossible number of requests. " ++ "req_prod %d, req_cons %d, size %ld\n", ++ vif->tx.sring->req_prod, vif->tx.req_cons, ++ XEN_NETIF_TX_RING_SIZE); ++ netbk_fatal_tx_err(vif); ++ continue; ++ } ++ + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); + if (!work_to_do) { + xenvif_put(vif); +@@ -1256,17 +1287,14 @@ static unsigned xen_netbk_tx_build_gops( + work_to_do = xen_netbk_get_extras(vif, extras, + work_to_do); + idx = vif->tx.req_cons; +- if (unlikely(work_to_do < 0)) { +- netbk_tx_err(vif, &txreq, idx); ++ if (unlikely(work_to_do < 0)) + continue; +- } + } + + ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); +- if (unlikely(ret < 0)) { +- netbk_tx_err(vif, &txreq, idx - ret); ++ if (unlikely(ret < 0)) + continue; +- } ++ + idx += ret; + + if (unlikely(txreq.size < ETH_HLEN)) { +@@ -1278,11 +1306,11 @@ static unsigned xen_netbk_tx_build_gops( + + /* No crossing a page as the payload mustn't fragment. */ + if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { +- netdev_dbg(vif->dev, ++ netdev_err(vif->dev, + "txreq.offset: %x, size: %u, end: %lu\n", + txreq.offset, txreq.size, + (txreq.offset&~PAGE_MASK) + txreq.size); +- netbk_tx_err(vif, &txreq, idx); ++ netbk_fatal_tx_err(vif); + continue; + } + +@@ -1310,8 +1338,8 @@ static unsigned xen_netbk_tx_build_gops( + gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; + + if (netbk_set_skb_gso(vif, skb, gso)) { ++ /* Failure in netbk_set_skb_gso is fatal. */ + kfree_skb(skb); +- netbk_tx_err(vif, &txreq, idx); + continue; + } + } -- 2.47.3