From: Greg Kroah-Hartman Date: Tue, 12 Feb 2013 18:52:41 +0000 (-0800) Subject: 3.4-stable patches X-Git-Tag: v3.0.64~5 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ac8d9edd2649aa3bf2865b8a583bf697c7fbd538;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: atm-iphase-rename-fregt_t-ffreg_t.patch bridge-pull-ip-header-into-skb-data-before-looking-into-ip-header.patch ipv6-do-not-create-neighbor-entries-for-local-delivery.patch ipv6-fix-header-length-calculation-in-ip6_append_data.patch ipv6-fix-the-noflags-test-in-addrconf_get_prefix_route.patch isdn-gigaset-fix-zero-size-border-case-in-debug-dump.patch maintainers-stephen-hemminger-email-change.patch netback-correct-netbk_tx_err-to-handle-wrap-around.patch net-calxedaxgmac-throw-away-overrun-frames.patch net-loopback-fix-a-dst-refcounting-issue.patch net-mlx4_core-set-number-of-msix-vectors-under-sriov-mode-to-firmware-defaults.patch net-mlx4_en-fix-bridged-vswitch-configuration-for-non-sriov-mode.patch net-prevent-setting-ttl-0-via-ip_ttl.patch net-sctp-sctp_endpoint_free-zero-out-secret-key-data.patch net-sctp-sctp_setsockopt_auth_key-use-kzfree-instead-of-kfree.patch netxen-fix-off-by-one-bug-in-netxen_release_tx_buffer.patch packet-fix-leakage-of-tx_ring-memory.patch pktgen-correctly-handle-failures-when-adding-a-device.patch r8169-remove-the-obsolete-and-incorrect-amd-workaround.patch sctp-refactor-sctp_outq_teardown-to-insure-proper-re-initalization.patch tcp-fix-for-zero-packets_in_flight-was-too-broad.patch tcp-frto-should-not-set-snd_cwnd-to-0.patch tg3-avoid-null-pointer-dereference-in-tg3_interrupt-in-netconsole-mode.patch tg3-fix-crc-errors-on-jumbo-frame-receive.patch via-rhine-fix-bugs-in-napi-support.patch xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch xen-netback-shutdown-the-ring-if-it-contains-garbage.patch --- diff --git a/queue-3.4/atm-iphase-rename-fregt_t-ffreg_t.patch b/queue-3.4/atm-iphase-rename-fregt_t-ffreg_t.patch new file mode 100644 index 00000000000..0404fa889c7 --- /dev/null +++ b/queue-3.4/atm-iphase-rename-fregt_t-ffreg_t.patch @@ -0,0 +1,198 @@ +From ec95a4a732049c6559b30e2e74dae7096ee1353f Mon Sep 17 00:00:00 2001 +From: Heiko Carstens +Date: Fri, 8 Feb 2013 00:19:11 +0000 +Subject: atm/iphase: rename fregt_t -> ffreg_t + + +From: Heiko Carstens + +[ Upstream commit ab54ee80aa7585f9666ff4dd665441d7ce41f1e8 ] + +We have conflicting type qualifiers for "freg_t" in s390's ptrace.h and the +iphase atm device driver, which causes the compile error below. +Unfortunately the s390 typedef can't be renamed, since it's a user visible api, +nor can I change the include order in s390 code to avoid the conflict. + +So simply rename the iphase typedef to a new name. Fixes this compile error: + +In file included from drivers/atm/iphase.c:66:0: +drivers/atm/iphase.h:639:25: error: conflicting type qualifiers for 'freg_t' +In file included from next/arch/s390/include/asm/ptrace.h:9:0, + from next/arch/s390/include/asm/lowcore.h:12, + from next/arch/s390/include/asm/thread_info.h:30, + from include/linux/thread_info.h:54, + from include/linux/preempt.h:9, + from include/linux/spinlock.h:50, + from include/linux/seqlock.h:29, + from include/linux/time.h:5, + from include/linux/stat.h:18, + from include/linux/module.h:10, + from drivers/atm/iphase.c:43: +next/arch/s390/include/uapi/asm/ptrace.h:197:3: note: previous declaration of 'freg_t' was here + +Signed-off-by: Heiko Carstens +Acked-by: chas williams - CONTRACTOR +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/atm/iphase.h | 146 +++++++++++++++++++++++++-------------------------- + 1 file changed, 73 insertions(+), 73 deletions(-) + +--- a/drivers/atm/iphase.h ++++ b/drivers/atm/iphase.h +@@ -636,82 +636,82 @@ struct rx_buf_desc { + #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE + #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE + +-typedef volatile u_int freg_t; ++typedef volatile u_int ffreg_t; + typedef u_int rreg_t; + + typedef struct _ffredn_t { +- freg_t idlehead_high; /* Idle cell header (high) */ +- freg_t idlehead_low; /* Idle cell header (low) */ +- freg_t maxrate; /* Maximum rate */ +- freg_t stparms; /* Traffic Management Parameters */ +- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ +- freg_t rm_type; /* */ +- u_int filler5[0x17 - 0x06]; +- freg_t cmd_reg; /* Command register */ +- u_int filler18[0x20 - 0x18]; +- freg_t cbr_base; /* CBR Pointer Base */ +- freg_t vbr_base; /* VBR Pointer Base */ +- freg_t abr_base; /* ABR Pointer Base */ +- freg_t ubr_base; /* UBR Pointer Base */ +- u_int filler24; +- freg_t vbrwq_base; /* VBR Wait Queue Base */ +- freg_t abrwq_base; /* ABR Wait Queue Base */ +- freg_t ubrwq_base; /* UBR Wait Queue Base */ +- freg_t vct_base; /* Main VC Table Base */ +- freg_t vcte_base; /* Extended Main VC Table Base */ +- u_int filler2a[0x2C - 0x2A]; +- freg_t cbr_tab_beg; /* CBR Table Begin */ +- freg_t cbr_tab_end; /* CBR Table End */ +- freg_t cbr_pointer; /* CBR Pointer */ +- u_int filler2f[0x30 - 0x2F]; +- freg_t prq_st_adr; /* Packet Ready Queue Start Address */ +- freg_t prq_ed_adr; /* Packet Ready Queue End Address */ +- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ +- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ +- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ +- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ +- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ +- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ +- u_int filler38[0x40 - 0x38]; +- freg_t queue_base; /* Base address for PRQ and TCQ */ +- freg_t desc_base; /* Base address of descriptor table */ +- u_int filler42[0x45 - 0x42]; +- freg_t mode_reg_0; /* Mode register 0 */ +- freg_t mode_reg_1; /* Mode register 1 */ +- freg_t intr_status_reg;/* Interrupt Status register */ +- freg_t mask_reg; /* Mask Register */ +- freg_t cell_ctr_high1; /* Total cell transfer count (high) */ +- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ +- freg_t state_reg; /* Status register */ +- u_int filler4c[0x58 - 0x4c]; +- freg_t curr_desc_num; /* Contains the current descriptor num */ +- freg_t next_desc; /* Next descriptor */ +- freg_t next_vc; /* Next VC */ +- u_int filler5b[0x5d - 0x5b]; +- freg_t present_slot_cnt;/* Present slot count */ +- u_int filler5e[0x6a - 0x5e]; +- freg_t new_desc_num; /* New descriptor number */ +- freg_t new_vc; /* New VC */ +- freg_t sched_tbl_ptr; /* Schedule table pointer */ +- freg_t vbrwq_wptr; /* VBR wait queue write pointer */ +- freg_t vbrwq_rptr; /* VBR wait queue read pointer */ +- freg_t abrwq_wptr; /* ABR wait queue write pointer */ +- freg_t abrwq_rptr; /* ABR wait queue read pointer */ +- freg_t ubrwq_wptr; /* UBR wait queue write pointer */ +- freg_t ubrwq_rptr; /* UBR wait queue read pointer */ +- freg_t cbr_vc; /* CBR VC */ +- freg_t vbr_sb_vc; /* VBR SB VC */ +- freg_t abr_sb_vc; /* ABR SB VC */ +- freg_t ubr_sb_vc; /* UBR SB VC */ +- freg_t vbr_next_link; /* VBR next link */ +- freg_t abr_next_link; /* ABR next link */ +- freg_t ubr_next_link; /* UBR next link */ +- u_int filler7a[0x7c-0x7a]; +- freg_t out_rate_head; /* Out of rate head */ +- u_int filler7d[0xca-0x7d]; /* pad out to full address space */ +- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ +- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ +- u_int fillercc[0x100-0xcc]; /* pad out to full address space */ ++ ffreg_t idlehead_high; /* Idle cell header (high) */ ++ ffreg_t idlehead_low; /* Idle cell header (low) */ ++ ffreg_t maxrate; /* Maximum rate */ ++ ffreg_t stparms; /* Traffic Management Parameters */ ++ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ ++ ffreg_t rm_type; /* */ ++ u_int filler5[0x17 - 0x06]; ++ ffreg_t cmd_reg; /* Command register */ ++ u_int filler18[0x20 - 0x18]; ++ ffreg_t cbr_base; /* CBR Pointer Base */ ++ ffreg_t vbr_base; /* VBR Pointer Base */ ++ ffreg_t abr_base; /* ABR Pointer Base */ ++ ffreg_t ubr_base; /* UBR Pointer Base */ ++ u_int filler24; ++ ffreg_t vbrwq_base; /* VBR Wait Queue Base */ ++ ffreg_t abrwq_base; /* ABR Wait Queue Base */ ++ ffreg_t ubrwq_base; /* UBR Wait Queue Base */ ++ ffreg_t vct_base; /* Main VC Table Base */ ++ ffreg_t vcte_base; /* Extended Main VC Table Base */ ++ u_int filler2a[0x2C - 0x2A]; ++ ffreg_t cbr_tab_beg; /* CBR Table Begin */ ++ ffreg_t cbr_tab_end; /* CBR Table End */ ++ ffreg_t cbr_pointer; /* CBR Pointer */ ++ u_int filler2f[0x30 - 0x2F]; ++ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */ ++ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */ ++ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ ++ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ ++ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ ++ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ ++ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ ++ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ ++ u_int filler38[0x40 - 0x38]; ++ ffreg_t queue_base; /* Base address for PRQ and TCQ */ ++ ffreg_t desc_base; /* Base address of descriptor table */ ++ u_int filler42[0x45 - 0x42]; ++ ffreg_t mode_reg_0; /* Mode register 0 */ ++ ffreg_t mode_reg_1; /* Mode register 1 */ ++ ffreg_t intr_status_reg;/* Interrupt Status register */ ++ ffreg_t mask_reg; /* Mask Register */ ++ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */ ++ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */ ++ ffreg_t state_reg; /* Status register */ ++ u_int filler4c[0x58 - 0x4c]; ++ ffreg_t curr_desc_num; /* Contains the current descriptor num */ ++ ffreg_t next_desc; /* Next descriptor */ ++ ffreg_t next_vc; /* Next VC */ ++ u_int filler5b[0x5d - 0x5b]; ++ ffreg_t present_slot_cnt;/* Present slot count */ ++ u_int filler5e[0x6a - 0x5e]; ++ ffreg_t new_desc_num; /* New descriptor number */ ++ ffreg_t new_vc; /* New VC */ ++ ffreg_t sched_tbl_ptr; /* Schedule table pointer */ ++ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */ ++ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */ ++ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */ ++ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */ ++ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */ ++ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */ ++ ffreg_t cbr_vc; /* CBR VC */ ++ ffreg_t vbr_sb_vc; /* VBR SB VC */ ++ ffreg_t abr_sb_vc; /* ABR SB VC */ ++ ffreg_t ubr_sb_vc; /* UBR SB VC */ ++ ffreg_t vbr_next_link; /* VBR next link */ ++ ffreg_t abr_next_link; /* ABR next link */ ++ ffreg_t ubr_next_link; /* UBR next link */ ++ u_int filler7a[0x7c-0x7a]; ++ ffreg_t out_rate_head; /* Out of rate head */ ++ u_int filler7d[0xca-0x7d]; /* pad out to full address space */ ++ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ ++ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ ++ u_int fillercc[0x100-0xcc]; /* pad out to full address space */ + } ffredn_t; + + typedef struct _rfredn_t { diff --git a/queue-3.4/bridge-pull-ip-header-into-skb-data-before-looking-into-ip-header.patch b/queue-3.4/bridge-pull-ip-header-into-skb-data-before-looking-into-ip-header.patch new file mode 100644 index 00000000000..9902f990384 --- /dev/null +++ b/queue-3.4/bridge-pull-ip-header-into-skb-data-before-looking-into-ip-header.patch @@ -0,0 +1,33 @@ +From 7a3f5bcfe48ccc40fa7abe0bed6bf0bf525edef8 Mon Sep 17 00:00:00 2001 +From: Sarveshwar Bandi +Date: Wed, 10 Oct 2012 01:15:01 +0000 +Subject: bridge: Pull ip header into skb->data before looking into ip header. + + +From: Sarveshwar Bandi + +[ Upstream commit 6caab7b0544e83e6c160b5e80f5a4a7dd69545c7 ] + +If lower layer driver leaves the ip header in the skb fragment, it needs to +be first pulled into skb->data before inspecting ip header length or ip version +number. + +Signed-off-by: Sarveshwar Bandi +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/bridge/br_netfilter.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/net/bridge/br_netfilter.c ++++ b/net/bridge/br_netfilter.c +@@ -254,6 +254,9 @@ static int br_parse_ip_options(struct sk + struct net_device *dev = skb->dev; + u32 len; + ++ if (!pskb_may_pull(skb, sizeof(struct iphdr))) ++ goto inhdr_error; ++ + iph = ip_hdr(skb); + opt = &(IPCB(skb)->opt); + diff --git a/queue-3.4/ipv6-do-not-create-neighbor-entries-for-local-delivery.patch b/queue-3.4/ipv6-do-not-create-neighbor-entries-for-local-delivery.patch new file mode 100644 index 00000000000..04eae99a0d6 --- /dev/null +++ b/queue-3.4/ipv6-do-not-create-neighbor-entries-for-local-delivery.patch @@ -0,0 +1,39 @@ +From fffaba2bc44bf334754d6bf7bc729f39c9962282 Mon Sep 17 00:00:00 2001 +From: Marcelo Ricardo Leitner +Date: Tue, 29 Jan 2013 22:26:08 +0000 +Subject: ipv6: do not create neighbor entries for local delivery + + +From: Marcelo Ricardo Leitner + +[ Upstream commit bd30e947207e2ea0ff2c08f5b4a03025ddce48d3 ] + +They will be created at output, if ever needed. This avoids creating +empty neighbor entries when TPROXYing/Forwarding packets for addresses +that are not even directly reachable. + +Note that IPv4 already handles it this way. No neighbor entries are +created for local input. + +Tested by myself and customer. + +Signed-off-by: Jiri Pirko +Signed-off-by: Marcelo Ricardo Leitner +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv6/route.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -846,7 +846,8 @@ restart: + dst_hold(&rt->dst); + read_unlock_bh(&table->tb6_lock); + +- if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) ++ if (!dst_get_neighbour_noref_raw(&rt->dst) && ++ !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL))) + nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); + else if (!(rt->dst.flags & DST_HOST)) + nrt = rt6_alloc_clone(rt, &fl6->daddr); diff --git a/queue-3.4/ipv6-fix-header-length-calculation-in-ip6_append_data.patch b/queue-3.4/ipv6-fix-header-length-calculation-in-ip6_append_data.patch new file mode 100644 index 00000000000..1d35edd9577 --- /dev/null +++ b/queue-3.4/ipv6-fix-header-length-calculation-in-ip6_append_data.patch @@ -0,0 +1,47 @@ +From 77824abf7d2be1bf12499615aee393a8d5384097 Mon Sep 17 00:00:00 2001 +From: Romain KUNTZ +Date: Wed, 16 Jan 2013 12:47:40 +0000 +Subject: ipv6: fix header length calculation in ip6_append_data() + + +From: Romain KUNTZ + +[ Upstream commit 7efdba5bd9a2f3e2059beeb45c9fa55eefe1bced ] + +Commit 299b0767 (ipv6: Fix IPsec slowpath fragmentation problem) +has introduced a error in the header length calculation that +provokes corrupted packets when non-fragmentable extensions +headers (Destination Option or Routing Header Type 2) are used. + +rt->rt6i_nfheader_len is the length of the non-fragmentable +extension header, and it should be substracted to +rt->dst.header_len, and not to exthdrlen, as it was done before +commit 299b0767. + +This patch reverts to the original and correct behavior. It has +been successfully tested with and without IPsec on packets +that include non-fragmentable extensions headers. + +Signed-off-by: Romain Kuntz +Acked-by: Steffen Klassert +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv6/ip6_output.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1287,10 +1287,10 @@ int ip6_append_data(struct sock *sk, int + cork->length = 0; + sk->sk_sndmsg_page = NULL; + sk->sk_sndmsg_off = 0; +- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; ++ exthdrlen = (opt ? opt->opt_flen : 0); + length += exthdrlen; + transhdrlen += exthdrlen; +- dst_exthdrlen = rt->dst.header_len; ++ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len; + } else { + rt = (struct rt6_info *)cork->dst; + fl6 = &inet->cork.fl.u.ip6; diff --git a/queue-3.4/ipv6-fix-the-noflags-test-in-addrconf_get_prefix_route.patch b/queue-3.4/ipv6-fix-the-noflags-test-in-addrconf_get_prefix_route.patch new file mode 100644 index 00000000000..616445b2533 --- /dev/null +++ b/queue-3.4/ipv6-fix-the-noflags-test-in-addrconf_get_prefix_route.patch @@ -0,0 +1,34 @@ +From 40bf7c03b77e0aae0fba9aab7c26df427a1019ba Mon Sep 17 00:00:00 2001 +From: Romain Kuntz +Date: Wed, 9 Jan 2013 15:02:26 +0100 +Subject: ipv6: fix the noflags test in addrconf_get_prefix_route + + +From: Romain Kuntz + +[ Upstream commit 85da53bf1c336bb07ac038fb951403ab0478d2c5 ] + +The tests on the flags in addrconf_get_prefix_route() does no make +much sense: the 'noflags' parameter contains the set of flags that +must not match with the route flags, so the test must be done +against 'noflags', and not against 'flags'. + +Signed-off-by: Romain Kuntz +Acked-by: YOSHIFUJI Hideaki +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv6/addrconf.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -1736,7 +1736,7 @@ static struct rt6_info *addrconf_get_pre + continue; + if ((rt->rt6i_flags & flags) != flags) + continue; +- if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0)) ++ if ((rt->rt6i_flags & noflags) != 0) + continue; + dst_hold(&rt->dst); + break; diff --git a/queue-3.4/isdn-gigaset-fix-zero-size-border-case-in-debug-dump.patch b/queue-3.4/isdn-gigaset-fix-zero-size-border-case-in-debug-dump.patch new file mode 100644 index 00000000000..64ac64619e2 --- /dev/null +++ b/queue-3.4/isdn-gigaset-fix-zero-size-border-case-in-debug-dump.patch @@ -0,0 +1,32 @@ +From 9691b27751ccc3faae7cff9a8c8b28a9139caa80 Mon Sep 17 00:00:00 2001 +From: Tilman Schmidt +Date: Mon, 21 Jan 2013 11:57:21 +0000 +Subject: isdn/gigaset: fix zero size border case in debug dump + + +From: Tilman Schmidt + +[ Upstream commit d721a1752ba544df8d7d36959038b26bc92bdf80 ] + +If subtracting 12 from l leaves zero we'd do a zero size allocation, +leading to an oops later when we try to set the NUL terminator. + +Reported-by: Dan Carpenter +Signed-off-by: Tilman Schmidt +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/isdn/gigaset/capi.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/isdn/gigaset/capi.c ++++ b/drivers/isdn/gigaset/capi.c +@@ -264,6 +264,8 @@ static inline void dump_rawmsg(enum debu + CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l, + CAPIMSG_CONTROL(data)); + l -= 12; ++ if (l <= 0) ++ return; + dbgline = kmalloc(3 * l, GFP_ATOMIC); + if (!dbgline) + return; diff --git a/queue-3.4/maintainers-stephen-hemminger-email-change.patch b/queue-3.4/maintainers-stephen-hemminger-email-change.patch new file mode 100644 index 00000000000..4f8f8a77702 --- /dev/null +++ b/queue-3.4/maintainers-stephen-hemminger-email-change.patch @@ -0,0 +1,50 @@ +From 8f7a1c98a6e07d3a1daac6f261b96b6cf9b6b0f9 Mon Sep 17 00:00:00 2001 +From: Stephen Hemminger +Date: Wed, 16 Jan 2013 09:55:57 -0800 +Subject: MAINTAINERS: Stephen Hemminger email change + + +From: Stephen Hemminger + +[ Upstream commit adbbf69d1a54abf424e91875746a610dcc80017d ] + +I changed my email because the vyatta.com mail server is now +redirected to brocade.com; and the Brocade mail system +is not friendly to Linux desktop users. + +Signed-off-by: Stephen Hemminger +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + MAINTAINERS | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2627,7 +2627,7 @@ S: Maintained + F: drivers/net/ethernet/i825xx/eexpress.* + + ETHERNET BRIDGE +-M: Stephen Hemminger ++M: Stephen Hemminger + L: bridge@lists.linux-foundation.org + L: netdev@vger.kernel.org + W: http://www.linuxfoundation.org/en/Net:Bridge +@@ -4312,7 +4312,7 @@ S: Maintained + + MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) + M: Mirko Lindner +-M: Stephen Hemminger ++M: Stephen Hemminger + L: netdev@vger.kernel.org + S: Maintained + F: drivers/net/ethernet/marvell/sk* +@@ -4563,7 +4563,7 @@ S: Supported + F: drivers/infiniband/hw/nes/ + + NETEM NETWORK EMULATOR +-M: Stephen Hemminger ++M: Stephen Hemminger + L: netem@lists.linux-foundation.org + S: Maintained + F: net/sched/sch_netem.c diff --git a/queue-3.4/net-calxedaxgmac-throw-away-overrun-frames.patch b/queue-3.4/net-calxedaxgmac-throw-away-overrun-frames.patch new file mode 100644 index 00000000000..045835c5c60 --- /dev/null +++ b/queue-3.4/net-calxedaxgmac-throw-away-overrun-frames.patch @@ -0,0 +1,38 @@ +From c535f6260a9b2ed49bdd5151b57eb6a2e9a495a4 Mon Sep 17 00:00:00 2001 +From: Rob Herring +Date: Wed, 16 Jan 2013 13:36:37 +0000 +Subject: net: calxedaxgmac: throw away overrun frames + + +From: Rob Herring + +[ Upstream commit d6fb3be544b46a7611a3373fcaa62b5b0be01888 ] + +The xgmac driver assumes 1 frame per descriptor. If a frame larger than +the descriptor's buffer size is received, the frame will spill over into +the next descriptor. So check for received frames that span more than one +descriptor and discard them. This prevents a crash if we receive erroneous +large packets. + +Signed-off-by: Rob Herring +Cc: netdev@vger.kernel.org +Cc: linux-kernel@vger.kernel.org +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/calxeda/xgmac.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/net/ethernet/calxeda/xgmac.c ++++ b/drivers/net/ethernet/calxeda/xgmac.c +@@ -547,6 +547,10 @@ static int desc_get_rx_status(struct xgm + return -1; + } + ++ /* All frames should fit into a single buffer */ ++ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) ++ return -1; ++ + /* Check if packet has checksum already */ + if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && + !(ext_status & RXDESC_IP_PAYLOAD_MASK)) diff --git a/queue-3.4/net-loopback-fix-a-dst-refcounting-issue.patch b/queue-3.4/net-loopback-fix-a-dst-refcounting-issue.patch new file mode 100644 index 00000000000..9839a0bdbec --- /dev/null +++ b/queue-3.4/net-loopback-fix-a-dst-refcounting-issue.patch @@ -0,0 +1,80 @@ +From d812dc02280cea5f7c49cefdc2b1ebfd3ba7e48d Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Fri, 25 Jan 2013 07:44:41 +0000 +Subject: net: loopback: fix a dst refcounting issue + + +From: Eric Dumazet + +[ Upstream commit 794ed393b707f01858f5ebe2ae5eabaf89d00022 ] + +Ben Greear reported crashes in ip_rcv_finish() on a stress +test involving many macvlans. + +We tracked the bug to a dst use after free. ip_rcv_finish() +was calling dst->input() and got garbage for dst->input value. + +It appears the bug is in loopback driver, lacking +a skb_dst_force() before calling netif_rx(). + +As a result, a non refcounted dst, normally protected by a +RCU read_lock section, was escaping this section and could +be freed before the packet being processed. + + [] loopback_xmit+0x64/0x83 + [] dev_hard_start_xmit+0x26c/0x35e + [] dev_queue_xmit+0x2c4/0x37c + [] ? dev_hard_start_xmit+0x35e/0x35e + [] ? eth_header+0x28/0xb6 + [] neigh_resolve_output+0x176/0x1a7 + [] ip_finish_output2+0x297/0x30d + [] ? ip_finish_output2+0x137/0x30d + [] ip_finish_output+0x63/0x68 + [] ip_output+0x61/0x67 + [] dst_output+0x17/0x1b + [] ip_local_out+0x1e/0x23 + [] ip_queue_xmit+0x315/0x353 + [] ? ip_send_unicast_reply+0x2cc/0x2cc + [] tcp_transmit_skb+0x7ca/0x80b + [] tcp_connect+0x53c/0x587 + [] ? getnstimeofday+0x44/0x7d + [] ? ktime_get_real+0x11/0x3e + [] tcp_v4_connect+0x3c2/0x431 + [] __inet_stream_connect+0x84/0x287 + [] ? inet_stream_connect+0x22/0x49 + [] ? _local_bh_enable_ip+0x84/0x9f + [] ? local_bh_enable+0xd/0x11 + [] ? lock_sock_nested+0x6e/0x79 + [] ? inet_stream_connect+0x22/0x49 + [] inet_stream_connect+0x33/0x49 + [] sys_connect+0x75/0x98 + +This bug was introduced in linux-2.6.35, in commit +7fee226ad2397b (net: add a noref bit on skb dst) + +skb_dst_force() is enforced in dev_queue_xmit() for devices having a +qdisc. + +Reported-by: Ben Greear +Signed-off-by: Eric Dumazet +Tested-by: Ben Greear +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/loopback.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/net/loopback.c ++++ b/drivers/net/loopback.c +@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct + + skb_orphan(skb); + ++ /* Before queueing this packet to netif_rx(), ++ * make sure dst is refcounted. ++ */ ++ skb_dst_force(skb); ++ + skb->protocol = eth_type_trans(skb, dev); + + /* it's OK to use per_cpu_ptr() because BHs are off */ diff --git a/queue-3.4/net-mlx4_core-set-number-of-msix-vectors-under-sriov-mode-to-firmware-defaults.patch b/queue-3.4/net-mlx4_core-set-number-of-msix-vectors-under-sriov-mode-to-firmware-defaults.patch new file mode 100644 index 00000000000..b88b75bf00d --- /dev/null +++ b/queue-3.4/net-mlx4_core-set-number-of-msix-vectors-under-sriov-mode-to-firmware-defaults.patch @@ -0,0 +1,58 @@ +From 49c8f8f71dc9d141ed847e5a25caa745d91dc928 Mon Sep 17 00:00:00 2001 +From: Or Gerlitz +Date: Thu, 17 Jan 2013 05:30:43 +0000 +Subject: net/mlx4_core: Set number of msix vectors under SRIOV mode to firmware defaults + + +From: Or Gerlitz + +[ Upstream commit ca4c7b35f75492de7fbf5ee95be07481c348caee ] + +The lines + + if (mlx4_is_mfunc(dev)) { + nreq = 2; + } else { + +which hard code the number of requested msi-x vectors under multi-function +mode to two can be removed completely, since the firmware sets num_eqs and +reserved_eqs appropriately Thus, the code line: + + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); + +is by itself sufficient and correct for all cases. Currently, for mfunc +mode num_eqs = 32 and reserved_eqs = 28, hence four vectors will be enabled. + +This triples (one vector is used for the async events and commands EQ) the +horse power provided for processing of incoming packets on netdev RSS scheme, +IO initiators/targets commands processing flows, etc. + +Reviewed-by: Jack Morgenstein +Signed-off-by: Amir Vadai +Signed-off-by: Or Gerlitz +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/mellanox/mlx4/main.c | 11 ++--------- + 1 file changed, 2 insertions(+), 9 deletions(-) + +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -1526,15 +1526,8 @@ static void mlx4_enable_msi_x(struct mlx + int i; + + if (msi_x) { +- /* In multifunction mode each function gets 2 msi-X vectors +- * one for data path completions anf the other for asynch events +- * or command completions */ +- if (mlx4_is_mfunc(dev)) { +- nreq = 2; +- } else { +- nreq = min_t(int, dev->caps.num_eqs - +- dev->caps.reserved_eqs, nreq); +- } ++ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, ++ nreq); + + entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); + if (!entries) diff --git a/queue-3.4/net-mlx4_en-fix-bridged-vswitch-configuration-for-non-sriov-mode.patch b/queue-3.4/net-mlx4_en-fix-bridged-vswitch-configuration-for-non-sriov-mode.patch new file mode 100644 index 00000000000..22f5fcfe858 --- /dev/null +++ b/queue-3.4/net-mlx4_en-fix-bridged-vswitch-configuration-for-non-sriov-mode.patch @@ -0,0 +1,51 @@ +From 0bb0232602c5f03fe5a49a5093c74f9144c93b08 Mon Sep 17 00:00:00 2001 +From: Yan Burman +Date: Thu, 17 Jan 2013 05:30:42 +0000 +Subject: net/mlx4_en: Fix bridged vSwitch configuration for non SRIOV mode + + +From: Yan Burman + +[ Upstream commit 213815a1e6ae70b9648483b110bc5081795f99e8 ] + +Commit 5b4c4d36860e "mlx4_en: Allow communication between functions on +same host" introduced a regression under which a bridge acting as vSwitch +whose uplink is an mlx4 Ethernet device become non-operative in native +(non sriov) mode. This happens since broadcast ARP requests sent by VMs +were loopback-ed by the HW and hence the bridge learned VM source MACs +on both the VM and the uplink ports. + +The fix is to place the DMAC in the send WQE only under SRIOV/eSwitch +configuration or when the device is in selftest. + +Reviewed-by: Or Gerlitz +Signed-off-by: Yan Burman +Signed-off-by: Amir Vadai +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/mellanox/mlx4/en_tx.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +@@ -683,10 +683,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff + ring->tx_csum++; + } + +- /* Copy dst mac address to wqe */ +- ethh = (struct ethhdr *)skb->data; +- tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); +- tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); ++ if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) { ++ /* Copy dst mac address to wqe. This allows loopback in eSwitch, ++ * so that VFs and PF can communicate with each other ++ */ ++ ethh = (struct ethhdr *)skb->data; ++ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); ++ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); ++ } ++ + /* Handle LSO (TSO) packets */ + if (lso_header_size) { + /* Mark opcode as LSO */ diff --git a/queue-3.4/net-prevent-setting-ttl-0-via-ip_ttl.patch b/queue-3.4/net-prevent-setting-ttl-0-via-ip_ttl.patch new file mode 100644 index 00000000000..8e249f02154 --- /dev/null +++ b/queue-3.4/net-prevent-setting-ttl-0-via-ip_ttl.patch @@ -0,0 +1,50 @@ +From 8c43c92f0eab26bd99c961aafdf1a183dc94ba7e Mon Sep 17 00:00:00 2001 +From: Cong Wang +Date: Mon, 7 Jan 2013 21:17:00 +0000 +Subject: net: prevent setting ttl=0 via IP_TTL + + +From: Cong Wang + +[ Upstream commit c9be4a5c49cf51cc70a993f004c5bb30067a65ce ] + +A regression is introduced by the following commit: + + commit 4d52cfbef6266092d535237ba5a4b981458ab171 + Author: Eric Dumazet + Date: Tue Jun 2 00:42:16 2009 -0700 + + net: ipv4/ip_sockglue.c cleanups + + Pure cleanups + +but it is not a pure cleanup... + + - if (val != -1 && (val < 1 || val>255)) + + if (val != -1 && (val < 0 || val > 255)) + +Since there is no reason provided to allow ttl=0, change it back. + +Reported-by: nitin padalia +Cc: nitin padalia +Cc: Eric Dumazet +Cc: David S. Miller +Signed-off-by: Cong Wang +Acked-by: Eric Dumazet +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv4/ip_sockglue.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -589,7 +589,7 @@ static int do_ip_setsockopt(struct sock + case IP_TTL: + if (optlen < 1) + goto e_inval; +- if (val != -1 && (val < 0 || val > 255)) ++ if (val != -1 && (val < 1 || val > 255)) + goto e_inval; + inet->uc_ttl = val; + break; diff --git a/queue-3.4/net-sctp-sctp_endpoint_free-zero-out-secret-key-data.patch b/queue-3.4/net-sctp-sctp_endpoint_free-zero-out-secret-key-data.patch new file mode 100644 index 00000000000..2636d36a371 --- /dev/null +++ b/queue-3.4/net-sctp-sctp_endpoint_free-zero-out-secret-key-data.patch @@ -0,0 +1,43 @@ +From 432e339a07db1ae5e4f95d42843a0714736cd9ae Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Fri, 8 Feb 2013 03:04:35 +0000 +Subject: net: sctp: sctp_endpoint_free: zero out secret key data + + +From: Daniel Borkmann + +[ Upstream commit b5c37fe6e24eec194bb29d22fdd55d73bcc709bf ] + +On sctp_endpoint_destroy, previously used sensitive keying material +should be zeroed out before the memory is returned, as we already do +with e.g. auth keys when released. + +Signed-off-by: Daniel Borkmann +Acked-by: Vlad Yasevich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/sctp/endpointola.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/net/sctp/endpointola.c ++++ b/net/sctp/endpointola.c +@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endp + /* Final destructor for endpoint. */ + static void sctp_endpoint_destroy(struct sctp_endpoint *ep) + { ++ int i; ++ + SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); + + /* Free up the HMAC transform. */ +@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct + sctp_inq_free(&ep->base.inqueue); + sctp_bind_addr_free(&ep->base.bind_addr); + ++ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) ++ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); ++ + /* Remove and free the port */ + if (sctp_sk(ep->base.sk)->bind_hash) + sctp_put_port(ep->base.sk); diff --git a/queue-3.4/net-sctp-sctp_setsockopt_auth_key-use-kzfree-instead-of-kfree.patch b/queue-3.4/net-sctp-sctp_setsockopt_auth_key-use-kzfree-instead-of-kfree.patch new file mode 100644 index 00000000000..29ad1d4363c --- /dev/null +++ b/queue-3.4/net-sctp-sctp_setsockopt_auth_key-use-kzfree-instead-of-kfree.patch @@ -0,0 +1,35 @@ +From 838bcd67faa48ef502338e73d4aaf17db407e60d Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Fri, 8 Feb 2013 03:04:34 +0000 +Subject: net: sctp: sctp_setsockopt_auth_key: use kzfree instead of kfree + + +From: Daniel Borkmann + +[ Upstream commit 6ba542a291a5e558603ac51cda9bded347ce7627 ] + +In sctp_setsockopt_auth_key, we create a temporary copy of the user +passed shared auth key for the endpoint or association and after +internal setup, we free it right away. Since it's sensitive data, we +should zero out the key before returning the memory back to the +allocator. Thus, use kzfree instead of kfree, just as we do in +sctp_auth_key_put(). + +Signed-off-by: Daniel Borkmann +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/sctp/socket.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(stru + + ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); + out: +- kfree(authkey); ++ kzfree(authkey); + return ret; + } + diff --git a/queue-3.4/netback-correct-netbk_tx_err-to-handle-wrap-around.patch b/queue-3.4/netback-correct-netbk_tx_err-to-handle-wrap-around.patch new file mode 100644 index 00000000000..a0f5e7b106c --- /dev/null +++ b/queue-3.4/netback-correct-netbk_tx_err-to-handle-wrap-around.patch @@ -0,0 +1,29 @@ +From cd96f175b8ec528b0952b2134208a2a79dccb28a Mon Sep 17 00:00:00 2001 +From: Ian Campbell +Date: Wed, 6 Feb 2013 23:41:38 +0000 +Subject: netback: correct netbk_tx_err to handle wrap around. + + +From: Ian Campbell + +[ Upstream commit b9149729ebdcfce63f853aa54a404c6a8f6ebbf3 ] + +Signed-off-by: Ian Campbell +Acked-by: Jan Beulich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/netback.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -852,7 +852,7 @@ static void netbk_tx_err(struct xenvif * + + do { + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); +- if (cons >= end) ++ if (cons == end) + break; + txp = RING_GET_REQUEST(&vif->tx, cons++); + } while (1); diff --git a/queue-3.4/netxen-fix-off-by-one-bug-in-netxen_release_tx_buffer.patch b/queue-3.4/netxen-fix-off-by-one-bug-in-netxen_release_tx_buffer.patch new file mode 100644 index 00000000000..6d8a58d78c7 --- /dev/null +++ b/queue-3.4/netxen-fix-off-by-one-bug-in-netxen_release_tx_buffer.patch @@ -0,0 +1,60 @@ +From e3ed07dccc1825e4458e1bf2c09256315df32229 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 22 Jan 2013 06:33:05 +0000 +Subject: netxen: fix off by one bug in netxen_release_tx_buffer() + + +From: Eric Dumazet + +[ Upstream commit a05948f296ce103989b28a2606e47d2e287c3c89 ] + +Christoph Paasch found netxen could trigger a BUG in its dismantle +phase, in netxen_release_tx_buffer(), using full size TSO packets. + +cmd_buf->frag_count includes the skb->data part, so the loop must +start at index 1 instead of 0, or else we can make an out +of bound access to cmd_buff->frag_array[MAX_SKB_FRAGS + 2] + +Christoph provided the fixes in netxen_map_tx_skb() function. +In case of a dma mapping error, its better to clear the dma fields +so that we don't try to unmap them again in netxen_release_tx_buffer() + +Reported-by: Christoph Paasch +Signed-off-by: Eric Dumazet +Tested-by: Christoph Paasch +Cc: Sony Chacko +Cc: Rajesh Borundia +Signed-off-by: Christoph Paasch +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | 2 +- + drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 ++ + 2 files changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct ne + buffrag->length, PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } +- for (j = 0; j < cmd_buf->frag_count; j++) { ++ for (j = 1; j < cmd_buf->frag_count; j++) { + buffrag++; + if (buffrag->dma) { + pci_unmap_page(adapter->pdev, buffrag->dma, +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +@@ -1956,10 +1956,12 @@ unwind: + while (--i >= 0) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); ++ nf->dma = 0ULL; + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); ++ nf->dma = 0ULL; + + out_err: + return -ENOMEM; diff --git a/queue-3.4/packet-fix-leakage-of-tx_ring-memory.patch b/queue-3.4/packet-fix-leakage-of-tx_ring-memory.patch new file mode 100644 index 00000000000..97b17df5717 --- /dev/null +++ b/queue-3.4/packet-fix-leakage-of-tx_ring-memory.patch @@ -0,0 +1,56 @@ +From 91ecc552edae505868631b1970bc415c368dca48 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 1 Feb 2013 07:21:41 +0000 +Subject: packet: fix leakage of tx_ring memory + + +From: Phil Sutter + +[ Upstream commit 9665d5d62487e8e7b1f546c00e11107155384b9a ] + +When releasing a packet socket, the routine packet_set_ring() is reused +to free rings instead of allocating them. But when calling it for the +first time, it fills req->tp_block_nr with the value of rb->pg_vec_len +which in the second invocation makes it bail out since req->tp_block_nr +is greater zero but req->tp_block_size is zero. + +This patch solves the problem by passing a zeroed auto-variable to +packet_set_ring() upon each invocation from packet_release(). + +As far as I can tell, this issue exists even since 69e3c75 (net: TX_RING +and packet mmap), i.e. the original inclusion of TX ring support into +af_packet, but applies only to sockets with both RX and TX ring +allocated, which is probably why this was unnoticed all the time. + +Signed-off-by: Phil Sutter +Cc: Johann Baudy +Cc: Daniel Borkmann +Acked-by: Daniel Borkmann +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/packet/af_packet.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -2450,13 +2450,15 @@ static int packet_release(struct socket + + packet_flush_mclist(sk); + +- memset(&req_u, 0, sizeof(req_u)); +- +- if (po->rx_ring.pg_vec) ++ if (po->rx_ring.pg_vec) { ++ memset(&req_u, 0, sizeof(req_u)); + packet_set_ring(sk, &req_u, 1, 0); ++ } + +- if (po->tx_ring.pg_vec) ++ if (po->tx_ring.pg_vec) { ++ memset(&req_u, 0, sizeof(req_u)); + packet_set_ring(sk, &req_u, 1, 1); ++ } + + fanout_release(sk); + diff --git a/queue-3.4/pktgen-correctly-handle-failures-when-adding-a-device.patch b/queue-3.4/pktgen-correctly-handle-failures-when-adding-a-device.patch new file mode 100644 index 00000000000..898b71323df --- /dev/null +++ b/queue-3.4/pktgen-correctly-handle-failures-when-adding-a-device.patch @@ -0,0 +1,57 @@ +From 04747a3518f492e052af4c277d96e6a806cc25e3 Mon Sep 17 00:00:00 2001 +From: Cong Wang +Date: Sun, 27 Jan 2013 21:14:08 +0000 +Subject: pktgen: correctly handle failures when adding a device + + +From: Cong Wang + +[ Upstream commit 604dfd6efc9b79bce432f2394791708d8e8f6efc ] + +The return value of pktgen_add_device() is not checked, so +even if we fail to add some device, for example, non-exist one, +we still see "OK:...". This patch fixes it. + +After this patch, I got: + + # echo "add_device non-exist" > /proc/net/pktgen/kpktgend_0 + -bash: echo: write error: No such device + # cat /proc/net/pktgen/kpktgend_0 + Running: + Stopped: + Result: ERROR: can not add device non-exist + # echo "add_device eth0" > /proc/net/pktgen/kpktgend_0 + # cat /proc/net/pktgen/kpktgend_0 + Running: + Stopped: eth0 + Result: OK: add_device=eth0 + +(Candidate for -stable) + +Cc: David S. Miller +Signed-off-by: Cong Wang +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/core/pktgen.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -1802,10 +1802,13 @@ static ssize_t pktgen_thread_write(struc + return -EFAULT; + i += len; + mutex_lock(&pktgen_thread_lock); +- pktgen_add_device(t, f); ++ ret = pktgen_add_device(t, f); + mutex_unlock(&pktgen_thread_lock); +- ret = count; +- sprintf(pg_result, "OK: add_device=%s", f); ++ if (!ret) { ++ ret = count; ++ sprintf(pg_result, "OK: add_device=%s", f); ++ } else ++ sprintf(pg_result, "ERROR: can not add device %s", f); + goto out; + } + diff --git a/queue-3.4/r8169-remove-the-obsolete-and-incorrect-amd-workaround.patch b/queue-3.4/r8169-remove-the-obsolete-and-incorrect-amd-workaround.patch new file mode 100644 index 00000000000..3923f7ab2a8 --- /dev/null +++ b/queue-3.4/r8169-remove-the-obsolete-and-incorrect-amd-workaround.patch @@ -0,0 +1,58 @@ +From 6b040618eeb86909d4a7d90ab2f6eb86ab1a0708 Mon Sep 17 00:00:00 2001 +From: Timo Teräs +Date: Mon, 21 Jan 2013 22:30:35 +0000 +Subject: r8169: remove the obsolete and incorrect AMD workaround + + +From: =?UTF-8?q?Timo=20Ter=C3=A4s?= + +[ Upstream commit 5d0feaff230c0abfe4a112e6f09f096ed99e0b2d ] + +This was introduced in commit 6dccd16 "r8169: merge with version +6.001.00 of Realtek's r8169 driver". I did not find the version +6.001.00 online, but in 6.002.00 or any later r8169 from Realtek +this hunk is no longer present. + +Also commit 05af214 "r8169: fix Ethernet Hangup for RTL8110SC +rev d" claims to have fixed this issue otherwise. + +The magic compare mask of 0xfffe000 is dubious as it masks +parts of the Reserved part, and parts of the VLAN tag. But this +does not make much sense as the VLAN tag parts are perfectly +valid there. In matter of fact this seems to be triggered with +any VLAN tagged packet as RxVlanTag bit is matched. I would +suspect 0xfffe0000 was intended to test reserved part only. + +Finally, this hunk is evil as it can cause more packets to be +handled than what was NAPI quota causing net/core/dev.c: +net_rx_action(): WARN_ON_ONCE(work > weight) to trigger, and +mess up the NAPI state causing device to hang. + +As result, any system using VLANs and having high receive +traffic (so that NAPI poll budget limits rtl_rx) would result +in device hang. + +Signed-off-by: Timo Teräs +Acked-by: Francois Romieu +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/realtek/r8169.c | 7 ------- + 1 file changed, 7 deletions(-) + +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -5450,13 +5450,6 @@ process_pkt: + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + } +- +- /* Work around for AMD plateform. */ +- if ((desc->opts2 & cpu_to_le32(0xfffe000)) && +- (tp->mac_version == RTL_GIGA_MAC_VER_05)) { +- desc->opts2 = 0; +- cur_rx++; +- } + } + + count = cur_rx - tp->cur_rx; diff --git a/queue-3.4/sctp-refactor-sctp_outq_teardown-to-insure-proper-re-initalization.patch b/queue-3.4/sctp-refactor-sctp_outq_teardown-to-insure-proper-re-initalization.patch new file mode 100644 index 00000000000..ebe8e9dbbce --- /dev/null +++ b/queue-3.4/sctp-refactor-sctp_outq_teardown-to-insure-proper-re-initalization.patch @@ -0,0 +1,72 @@ +From 1d86635eec79f9766111be7792173d2e6d583744 Mon Sep 17 00:00:00 2001 +From: Neil Horman +Date: Thu, 17 Jan 2013 11:15:08 +0000 +Subject: sctp: refactor sctp_outq_teardown to insure proper re-initalization + + +From: Neil Horman + +[ Upstream commit 2f94aabd9f6c925d77aecb3ff020f1cc12ed8f86 ] + +Jamie Parsons reported a problem recently, in which the re-initalization of an +association (The duplicate init case), resulted in a loss of receive window +space. He tracked down the root cause to sctp_outq_teardown, which discarded +all the data on an outq during a re-initalization of the corresponding +association, but never reset the outq->outstanding_data field to zero. I wrote, +and he tested this fix, which does a proper full re-initalization of the outq, +fixing this problem, and hopefully future proofing us from simmilar issues down +the road. + +Signed-off-by: Neil Horman +Reported-by: Jamie Parsons +Tested-by: Jamie Parsons +CC: Jamie Parsons +CC: Vlad Yasevich +CC: "David S. Miller" +CC: netdev@vger.kernel.org +Acked-by: Vlad Yasevich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/sctp/outqueue.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +--- a/net/sctp/outqueue.c ++++ b/net/sctp/outqueue.c +@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_associat + + /* Free the outqueue structure and any related pending chunks. + */ +-void sctp_outq_teardown(struct sctp_outq *q) ++static void __sctp_outq_teardown(struct sctp_outq *q) + { + struct sctp_transport *transport; + struct list_head *lchunk, *temp; +@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq + sctp_chunk_free(chunk); + } + +- q->error = 0; +- + /* Throw away any leftover control chunks. */ + list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { + list_del_init(&chunk->list); +@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq + } + } + ++void sctp_outq_teardown(struct sctp_outq *q) ++{ ++ __sctp_outq_teardown(q); ++ sctp_outq_init(q->asoc, q); ++} ++ + /* Free the outqueue structure and any related pending chunks. */ + void sctp_outq_free(struct sctp_outq *q) + { + /* Throw away leftover chunks. */ +- sctp_outq_teardown(q); ++ __sctp_outq_teardown(q); + + /* If we were kmalloc()'d, free the memory. */ + if (q->malloced) diff --git a/queue-3.4/series b/queue-3.4/series index 5bcc8724918..e4ca7bfd356 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -5,3 +5,31 @@ kernel-resource.c-fix-stack-overflow-in-__reserve_region_with_split.patch bluetooth-fix-handling-of-unexpected-smp-pdus.patch efi-make-efi_enabled-a-function-to-query-efi-facilities.patch samsung-laptop-disable-on-efi-hardware.patch +net-prevent-setting-ttl-0-via-ip_ttl.patch +ipv6-fix-the-noflags-test-in-addrconf_get_prefix_route.patch +maintainers-stephen-hemminger-email-change.patch +ipv6-fix-header-length-calculation-in-ip6_append_data.patch +net-calxedaxgmac-throw-away-overrun-frames.patch +net-mlx4_en-fix-bridged-vswitch-configuration-for-non-sriov-mode.patch +net-mlx4_core-set-number-of-msix-vectors-under-sriov-mode-to-firmware-defaults.patch +isdn-gigaset-fix-zero-size-border-case-in-debug-dump.patch +netxen-fix-off-by-one-bug-in-netxen_release_tx_buffer.patch +r8169-remove-the-obsolete-and-incorrect-amd-workaround.patch +net-loopback-fix-a-dst-refcounting-issue.patch +pktgen-correctly-handle-failures-when-adding-a-device.patch +ipv6-do-not-create-neighbor-entries-for-local-delivery.patch +via-rhine-fix-bugs-in-napi-support.patch +packet-fix-leakage-of-tx_ring-memory.patch +atm-iphase-rename-fregt_t-ffreg_t.patch +sctp-refactor-sctp_outq_teardown-to-insure-proper-re-initalization.patch +net-sctp-sctp_setsockopt_auth_key-use-kzfree-instead-of-kfree.patch +net-sctp-sctp_endpoint_free-zero-out-secret-key-data.patch +xen-netback-shutdown-the-ring-if-it-contains-garbage.patch +xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch +xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch +netback-correct-netbk_tx_err-to-handle-wrap-around.patch +tcp-frto-should-not-set-snd_cwnd-to-0.patch +tcp-fix-for-zero-packets_in_flight-was-too-broad.patch +bridge-pull-ip-header-into-skb-data-before-looking-into-ip-header.patch +tg3-avoid-null-pointer-dereference-in-tg3_interrupt-in-netconsole-mode.patch +tg3-fix-crc-errors-on-jumbo-frame-receive.patch diff --git a/queue-3.4/tcp-fix-for-zero-packets_in_flight-was-too-broad.patch b/queue-3.4/tcp-fix-for-zero-packets_in_flight-was-too-broad.patch new file mode 100644 index 00000000000..5b06f5dc9ee --- /dev/null +++ b/queue-3.4/tcp-fix-for-zero-packets_in_flight-was-too-broad.patch @@ -0,0 +1,52 @@ +From b966c5b50d728a0fe67becadf5544a5239b32098 Mon Sep 17 00:00:00 2001 +From: Ilpo Järvinen +Date: Mon, 4 Feb 2013 02:14:25 +0000 +Subject: tcp: fix for zero packets_in_flight was too broad + + +From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= + +[ Upstream commit 6731d2095bd4aef18027c72ef845ab1087c3ba63 ] + +There are transients during normal FRTO procedure during which +the packets_in_flight can go to zero between write_queue state +updates and firing the resulting segments out. As FRTO processing +occurs during that window the check must be more precise to +not match "spuriously" :-). More specificly, e.g., when +packets_in_flight is zero but FLAG_DATA_ACKED is true the problematic +branch that set cwnd into zero would not be taken and new segments +might be sent out later. + +Signed-off-by: Ilpo Järvinen +Tested-by: Eric Dumazet +Acked-by: Neal Cardwell +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv4/tcp_input.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3619,8 +3619,7 @@ static int tcp_process_frto(struct sock + ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) + tp->undo_marker = 0; + +- if (!before(tp->snd_una, tp->frto_highmark) || +- !tcp_packets_in_flight(tp)) { ++ if (!before(tp->snd_una, tp->frto_highmark)) { + tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); + return 1; + } +@@ -3640,6 +3639,11 @@ static int tcp_process_frto(struct sock + } + } else { + if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { ++ if (!tcp_packets_in_flight(tp)) { ++ tcp_enter_frto_loss(sk, 2, flag); ++ return true; ++ } ++ + /* Prevent sending of new data. */ + tp->snd_cwnd = min(tp->snd_cwnd, + tcp_packets_in_flight(tp)); diff --git a/queue-3.4/tcp-frto-should-not-set-snd_cwnd-to-0.patch b/queue-3.4/tcp-frto-should-not-set-snd_cwnd-to-0.patch new file mode 100644 index 00000000000..3e440b5f543 --- /dev/null +++ b/queue-3.4/tcp-frto-should-not-set-snd_cwnd-to-0.patch @@ -0,0 +1,43 @@ +From 684377af90fbb1ed24f85359063a3d8c9fed4611 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sun, 3 Feb 2013 09:13:05 +0000 +Subject: tcp: frto should not set snd_cwnd to 0 + + +From: Eric Dumazet + +[ Upstream commit 2e5f421211ff76c17130b4597bc06df4eeead24f ] + +Commit 9dc274151a548 (tcp: fix ABC in tcp_slow_start()) +uncovered a bug in FRTO code : +tcp_process_frto() is setting snd_cwnd to 0 if the number +of in flight packets is 0. + +As Neal pointed out, if no packet is in flight we lost our +chance to disambiguate whether a loss timeout was spurious. + +We should assume it was a proper loss. + +Reported-by: Pasi Kärkkäinen +Signed-off-by: Neal Cardwell +Signed-off-by: Eric Dumazet +Cc: Ilpo Järvinen +Cc: Yuchung Cheng +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv4/tcp_input.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3619,7 +3619,8 @@ static int tcp_process_frto(struct sock + ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) + tp->undo_marker = 0; + +- if (!before(tp->snd_una, tp->frto_highmark)) { ++ if (!before(tp->snd_una, tp->frto_highmark) || ++ !tcp_packets_in_flight(tp)) { + tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); + return 1; + } diff --git a/queue-3.4/tg3-avoid-null-pointer-dereference-in-tg3_interrupt-in-netconsole-mode.patch b/queue-3.4/tg3-avoid-null-pointer-dereference-in-tg3_interrupt-in-netconsole-mode.patch new file mode 100644 index 00000000000..7e02da27419 --- /dev/null +++ b/queue-3.4/tg3-avoid-null-pointer-dereference-in-tg3_interrupt-in-netconsole-mode.patch @@ -0,0 +1,44 @@ +From 59beb7f7a982fd8d257d66ef26759236f1e47669 Mon Sep 17 00:00:00 2001 +From: Nithin Nayak Sujir +Date: Mon, 14 Jan 2013 17:10:59 +0000 +Subject: tg3: Avoid null pointer dereference in tg3_interrupt in netconsole mode + + +From: Nithin Nayak Sujir + +[ Upstream commit 9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 ] + +When netconsole is enabled, logging messages generated during tg3_open +can result in a null pointer dereference for the uninitialized tg3 +status block. Use the irq_sync flag to disable polling in the early +stages. irq_sync is cleared when the driver is enabling interrupts after +all initialization is completed. + +Signed-off-by: Nithin Nayak Sujir +Signed-off-by: Michael Chan +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/broadcom/tg3.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -6574,6 +6574,9 @@ static void tg3_poll_controller(struct n + int i; + struct tg3 *tp = netdev_priv(dev); + ++ if (tg3_irq_sync(tp)) ++ return; ++ + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); + } +@@ -15529,6 +15532,7 @@ static int __devinit tg3_init_one(struct + tp->pm_cap = pm_cap; + tp->rx_mode = TG3_DEF_RX_MODE; + tp->tx_mode = TG3_DEF_TX_MODE; ++ tp->irq_sync = 1; + + if (tg3_debug > 0) + tp->msg_enable = tg3_debug; diff --git a/queue-3.4/tg3-fix-crc-errors-on-jumbo-frame-receive.patch b/queue-3.4/tg3-fix-crc-errors-on-jumbo-frame-receive.patch new file mode 100644 index 00000000000..1e4ef630afe --- /dev/null +++ b/queue-3.4/tg3-fix-crc-errors-on-jumbo-frame-receive.patch @@ -0,0 +1,180 @@ +From 39d8e3d2f4dee728bfb2f9e90a4b0d41577521db Mon Sep 17 00:00:00 2001 +From: Nithin Nayak Sujir +Date: Mon, 14 Jan 2013 17:11:00 +0000 +Subject: tg3: Fix crc errors on jumbo frame receive + + +From: Nithin Nayak Sujir + +[ Upstream commit daf3ec688e057f6060fb9bb0819feac7a8bbf45c ] + +TG3_PHY_AUXCTL_SMDSP_ENABLE/DISABLE macros do a blind write to the phy +auxiliary control register and overwrite the EXT_PKT_LEN (bit 14) resulting +in intermittent crc errors on jumbo frames with some link partners. Change +the code to do a read/modify/write. + +Signed-off-by: Nithin Nayak Sujir +Signed-off-by: Michael Chan +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/broadcom/tg3.c | 60 +++++++++++++++++++++--------------- + 1 file changed, 36 insertions(+), 24 deletions(-) + +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -1136,14 +1136,26 @@ static int tg3_phy_auxctl_write(struct t + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); + } + +-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ +- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ +- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ +- MII_TG3_AUXCTL_ACTL_TX_6DB) +- +-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ +- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ +- MII_TG3_AUXCTL_ACTL_TX_6DB); ++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) ++{ ++ u32 val; ++ int err; ++ ++ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); ++ ++ if (err) ++ return err; ++ if (enable) ++ ++ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; ++ else ++ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; ++ ++ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, ++ val | MII_TG3_AUXCTL_ACTL_TX_6DB); ++ ++ return err; ++} + + static int tg3_bmcr_reset(struct tg3 *tp) + { +@@ -2076,7 +2088,7 @@ static void tg3_phy_apply_otp(struct tg3 + + otp = tp->phy_otp; + +- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) ++ if (tg3_phy_toggle_auxctl_smdsp(tp, true)) + return; + + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); +@@ -2101,7 +2113,7 @@ static void tg3_phy_apply_otp(struct tg3 + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); + } + + static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) +@@ -2137,9 +2149,9 @@ static void tg3_phy_eee_adjust(struct tg + + if (!tp->setlpicnt) { + if (current_link_up == 1 && +- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); + } + + val = tr32(TG3_CPMU_EEE_MODE); +@@ -2155,11 +2167,11 @@ static void tg3_phy_eee_enable(struct tg + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + tg3_flag(tp, 57765_CLASS)) && +- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); + } + + val = tr32(TG3_CPMU_EEE_MODE); +@@ -2303,7 +2315,7 @@ static int tg3_phy_reset_5703_4_5(struct + tg3_writephy(tp, MII_CTRL1000, + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + +- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); ++ err = tg3_phy_toggle_auxctl_smdsp(tp, true); + if (err) + return err; + +@@ -2324,7 +2336,7 @@ static int tg3_phy_reset_5703_4_5(struct + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); + +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); + + tg3_writephy(tp, MII_CTRL1000, phy9_orig); + +@@ -2413,10 +2425,10 @@ static int tg3_phy_reset(struct tg3 *tp) + + out: + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && +- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); + } + + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { +@@ -2425,14 +2437,14 @@ out: + } + + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { +- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { ++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); + } + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { +- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { ++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); +@@ -2441,7 +2453,7 @@ out: + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); + +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); + } + } + +@@ -3858,7 +3870,7 @@ static int tg3_phy_autoneg_cfg(struct tg + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + +- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); ++ err = tg3_phy_toggle_auxctl_smdsp(tp, true); + if (!err) { + u32 err2; + +@@ -3891,7 +3903,7 @@ static int tg3_phy_autoneg_cfg(struct tg + MII_TG3_DSP_CH34TP2_HIBW01); + } + +- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); ++ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); + if (!err) + err = err2; + } diff --git a/queue-3.4/via-rhine-fix-bugs-in-napi-support.patch b/queue-3.4/via-rhine-fix-bugs-in-napi-support.patch new file mode 100644 index 00000000000..3a54f1746cb --- /dev/null +++ b/queue-3.4/via-rhine-fix-bugs-in-napi-support.patch @@ -0,0 +1,50 @@ +From 31de82cb7cb2396ffbe0d5a2879b6c381ad4e3ff Mon Sep 17 00:00:00 2001 +From: "David S. Miller" +Date: Tue, 29 Jan 2013 22:58:04 -0500 +Subject: via-rhine: Fix bugs in NAPI support. + + +From: "David S. Miller" + +[ Upstream commit 559bcac35facfed49ab4f408e162971612dcfdf3 ] + +1) rhine_tx() should use dev_kfree_skb() not dev_kfree_skb_irq() + +2) rhine_slow_event_task's NAPI triggering logic is racey, it + should just hit the interrupt mask register. This is the + same as commit 7dbb491878a2c51d372a8890fa45a8ff80358af1 + ("r8169: avoid NAPI scheduling delay.") made to fix the same + problem in the r8169 driver. From Francois Romieu. + +Reported-by: Jamie Gloudon +Tested-by: Jamie Gloudon +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/via/via-rhine.c | 8 ++------ + 1 file changed, 2 insertions(+), 6 deletions(-) + +--- a/drivers/net/ethernet/via/via-rhine.c ++++ b/drivers/net/ethernet/via/via-rhine.c +@@ -1802,7 +1802,7 @@ static void rhine_tx(struct net_device * + rp->tx_skbuff[entry]->len, + PCI_DMA_TODEVICE); + } +- dev_kfree_skb_irq(rp->tx_skbuff[entry]); ++ dev_kfree_skb(rp->tx_skbuff[entry]); + rp->tx_skbuff[entry] = NULL; + entry = (++rp->dirty_tx) % TX_RING_SIZE; + } +@@ -2011,11 +2011,7 @@ static void rhine_slow_event_task(struct + if (intr_status & IntrPCIErr) + netif_warn(rp, hw, dev, "PCI error\n"); + +- napi_disable(&rp->napi); +- rhine_irq_disable(rp); +- /* Slow and safe. Consider __napi_schedule as a replacement ? */ +- napi_enable(&rp->napi); +- napi_schedule(&rp->napi); ++ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); + + out_unlock: + mutex_unlock(&rp->task_lock); diff --git a/queue-3.4/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch b/queue-3.4/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch new file mode 100644 index 00000000000..ad336015865 --- /dev/null +++ b/queue-3.4/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch @@ -0,0 +1,134 @@ +From 4687ceb955f4417066123a8059b35c5085a4e544 Mon Sep 17 00:00:00 2001 +From: Matthew Daley +Date: Wed, 6 Feb 2013 23:41:36 +0000 +Subject: xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop. + + +From: Matthew Daley + +[ Upstream commit 7d5145d8eb2b9791533ffe4dc003b129b9696c48 ] + +Signed-off-by: Matthew Daley +Reviewed-by: Konrad Rzeszutek Wilk +Acked-by: Ian Campbell +Acked-by: Jan Beulich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/netback.c | 38 +++++++++++++------------------------- + 1 file changed, 13 insertions(+), 25 deletions(-) + +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenv + atomic_dec(&netbk->netfront_count); + } + +-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); ++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, ++ u8 status); + static void make_tx_response(struct xenvif *vif, + struct xen_netif_tx_request *txp, + s8 st); +@@ -979,30 +980,20 @@ static int xen_netbk_tx_check_gop(struct + { + struct gnttab_copy *gop = *gopp; + u16 pending_idx = *((u16 *)skb->data); +- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; +- struct xenvif *vif = pending_tx_info[pending_idx].vif; +- struct xen_netif_tx_request *txp; + struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; + int i, err, start; + + /* Check status of header. */ + err = gop->status; +- if (unlikely(err)) { +- pending_ring_idx_t index; +- index = pending_index(netbk->pending_prod++); +- txp = &pending_tx_info[pending_idx].req; +- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); +- netbk->pending_ring[index] = pending_idx; +- xenvif_put(vif); +- } ++ if (unlikely(err)) ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + + /* Skip first skb fragment if it is on same page as header fragment. */ + start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + + for (i = start; i < nr_frags; i++) { + int j, newerr; +- pending_ring_idx_t index; + + pending_idx = frag_get_pending_idx(&shinfo->frags[i]); + +@@ -1011,16 +1002,12 @@ static int xen_netbk_tx_check_gop(struct + if (likely(!newerr)) { + /* Had a previous error? Invalidate this fragment. */ + if (unlikely(err)) +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + continue; + } + + /* Error on this fragment: respond to client with an error. */ +- txp = &netbk->pending_tx_info[pending_idx].req; +- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); +- index = pending_index(netbk->pending_prod++); +- netbk->pending_ring[index] = pending_idx; +- xenvif_put(vif); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + + /* Not the first error? Preceding frags already invalidated. */ + if (err) +@@ -1028,10 +1015,10 @@ static int xen_netbk_tx_check_gop(struct + + /* First error: invalidate header and preceding fragments. */ + pending_idx = *((u16 *)skb->data); +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + for (j = start; j < i; j++) { + pending_idx = frag_get_pending_idx(&shinfo->frags[j]); +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + } + + /* Remember the error: invalidate all subsequent fragments. */ +@@ -1065,7 +1052,7 @@ static void xen_netbk_fill_frags(struct + + /* Take an extra reference to offset xen_netbk_idx_release */ + get_page(netbk->mmap_pages[pending_idx]); +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + } + } + +@@ -1448,7 +1435,7 @@ static void xen_netbk_tx_submit(struct x + txp->size -= data_len; + } else { + /* Schedule a response immediately. */ +- xen_netbk_idx_release(netbk, pending_idx); ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + } + + if (txp->flags & XEN_NETTXF_csum_blank) +@@ -1503,7 +1490,8 @@ static void xen_netbk_tx_action(struct x + + } + +-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) ++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, ++ u8 status) + { + struct xenvif *vif; + struct pending_tx_info *pending_tx_info; +@@ -1517,7 +1505,7 @@ static void xen_netbk_idx_release(struct + + vif = pending_tx_info->vif; + +- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); ++ make_tx_response(vif, &pending_tx_info->req, status); + + index = pending_index(netbk->pending_prod++); + netbk->pending_ring[index] = pending_idx; diff --git a/queue-3.4/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch b/queue-3.4/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch new file mode 100644 index 00000000000..036f1a8065b --- /dev/null +++ b/queue-3.4/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch @@ -0,0 +1,46 @@ +From c44e0eea7ef736013a0aac90435fc5aecc171983 Mon Sep 17 00:00:00 2001 +From: Ian Campbell +Date: Wed, 6 Feb 2013 23:41:37 +0000 +Subject: xen/netback: free already allocated memory on failure in xen_netbk_get_requests + + +From: Ian Campbell + +[ Upstream commit 4cc7c1cb7b11b6f3515bd9075527576a1eecc4aa ] + +Signed-off-by: Ian Campbell +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/netback.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -950,7 +950,7 @@ static struct gnttab_copy *xen_netbk_get + pending_idx = netbk->pending_ring[index]; + page = xen_netbk_alloc_page(netbk, skb, pending_idx); + if (!page) +- return NULL; ++ goto err; + + gop->source.u.ref = txp->gref; + gop->source.domid = vif->domid; +@@ -972,6 +972,17 @@ static struct gnttab_copy *xen_netbk_get + } + + return gop; ++err: ++ /* Unwind, freeing all pages and sending error responses. */ ++ while (i-- > start) { ++ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), ++ XEN_NETIF_RSP_ERROR); ++ } ++ /* The head too, if necessary. */ ++ if (start) ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); ++ ++ return NULL; + } + + static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, diff --git a/queue-3.4/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch b/queue-3.4/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch new file mode 100644 index 00000000000..c91c5930f5f --- /dev/null +++ b/queue-3.4/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch @@ -0,0 +1,250 @@ +From b4e4ee56a66ff284fd111b09250a2c83aff55076 Mon Sep 17 00:00:00 2001 +From: Ian Campbell +Date: Wed, 6 Feb 2013 23:41:35 +0000 +Subject: xen/netback: shutdown the ring if it contains garbage. + + +From: Ian Campbell + +[ Upstream commit 48856286b64e4b66ec62b94e504d0b29c1ade664 ] + +A buggy or malicious frontend should not be able to confuse netback. +If we spot anything which is not as it should be then shutdown the +device and don't try to continue with the ring in a potentially +hostile state. Well behaved and non-hostile frontends will not be +penalised. + +As well as making the existing checks for such errors fatal also add a +new check that ensures that there isn't an insane number of requests +on the ring (i.e. more than would fit in the ring). If the ring +contains garbage then previously is was possible to loop over this +insane number, getting an error each time and therefore not generating +any more pending requests and therefore not exiting the loop in +xen_netbk_tx_build_gops for an externded period. + +Also turn various netdev_dbg calls which no precipitate a fatal error +into netdev_err, they are rate limited because the device is shutdown +afterwards. + +This fixes at least one known DoS/softlockup of the backend domain. + +Signed-off-by: Ian Campbell +Reviewed-by: Konrad Rzeszutek Wilk +Acked-by: Jan Beulich +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netback/common.h | 3 + + drivers/net/xen-netback/interface.c | 23 ++++++++----- + drivers/net/xen-netback/netback.c | 62 ++++++++++++++++++++++++++---------- + 3 files changed, 62 insertions(+), 26 deletions(-) + +--- a/drivers/net/xen-netback/common.h ++++ b/drivers/net/xen-netback/common.h +@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvi + /* Notify xenvif that ring now has space to send an skb to the frontend */ + void xenvif_notify_tx_completion(struct xenvif *vif); + ++/* Prevent the device from generating any further traffic. */ ++void xenvif_carrier_off(struct xenvif *vif); ++ + /* Returns number of ring slots required to send an skb to the frontend */ + unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); + +--- a/drivers/net/xen-netback/interface.c ++++ b/drivers/net/xen-netback/interface.c +@@ -343,17 +343,22 @@ err: + return err; + } + +-void xenvif_disconnect(struct xenvif *vif) ++void xenvif_carrier_off(struct xenvif *vif) + { + struct net_device *dev = vif->dev; +- if (netif_carrier_ok(dev)) { +- rtnl_lock(); +- netif_carrier_off(dev); /* discard queued packets */ +- if (netif_running(dev)) +- xenvif_down(vif); +- rtnl_unlock(); +- xenvif_put(vif); +- } ++ ++ rtnl_lock(); ++ netif_carrier_off(dev); /* discard queued packets */ ++ if (netif_running(dev)) ++ xenvif_down(vif); ++ rtnl_unlock(); ++ xenvif_put(vif); ++} ++ ++void xenvif_disconnect(struct xenvif *vif) ++{ ++ if (netif_carrier_ok(vif->dev)) ++ xenvif_carrier_off(vif); + + atomic_dec(&vif->refcnt); + wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -860,6 +860,13 @@ static void netbk_tx_err(struct xenvif * + xenvif_put(vif); + } + ++static void netbk_fatal_tx_err(struct xenvif *vif) ++{ ++ netdev_err(vif->dev, "fatal error; disabling device\n"); ++ xenvif_carrier_off(vif); ++ xenvif_put(vif); ++} ++ + static int netbk_count_requests(struct xenvif *vif, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txp, +@@ -873,19 +880,22 @@ static int netbk_count_requests(struct x + + do { + if (frags >= work_to_do) { +- netdev_dbg(vif->dev, "Need more frags\n"); ++ netdev_err(vif->dev, "Need more frags\n"); ++ netbk_fatal_tx_err(vif); + return -frags; + } + + if (unlikely(frags >= MAX_SKB_FRAGS)) { +- netdev_dbg(vif->dev, "Too many frags\n"); ++ netdev_err(vif->dev, "Too many frags\n"); ++ netbk_fatal_tx_err(vif); + return -frags; + } + + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), + sizeof(*txp)); + if (txp->size > first->size) { +- netdev_dbg(vif->dev, "Frags galore\n"); ++ netdev_err(vif->dev, "Frag is bigger than frame.\n"); ++ netbk_fatal_tx_err(vif); + return -frags; + } + +@@ -893,8 +903,9 @@ static int netbk_count_requests(struct x + frags++; + + if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { +- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", ++ netdev_err(vif->dev, "txp->offset: %x, size: %u\n", + txp->offset, txp->size); ++ netbk_fatal_tx_err(vif); + return -frags; + } + } while ((txp++)->flags & XEN_NETTXF_more_data); +@@ -1067,7 +1078,8 @@ static int xen_netbk_get_extras(struct x + + do { + if (unlikely(work_to_do-- <= 0)) { +- netdev_dbg(vif->dev, "Missing extra info\n"); ++ netdev_err(vif->dev, "Missing extra info\n"); ++ netbk_fatal_tx_err(vif); + return -EBADR; + } + +@@ -1076,8 +1088,9 @@ static int xen_netbk_get_extras(struct x + if (unlikely(!extra.type || + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + vif->tx.req_cons = ++cons; +- netdev_dbg(vif->dev, ++ netdev_err(vif->dev, + "Invalid extra type: %d\n", extra.type); ++ netbk_fatal_tx_err(vif); + return -EINVAL; + } + +@@ -1093,13 +1106,15 @@ static int netbk_set_skb_gso(struct xenv + struct xen_netif_extra_info *gso) + { + if (!gso->u.gso.size) { +- netdev_dbg(vif->dev, "GSO size must not be zero.\n"); ++ netdev_err(vif->dev, "GSO size must not be zero.\n"); ++ netbk_fatal_tx_err(vif); + return -EINVAL; + } + + /* Currently only TCPv4 S.O. is supported. */ + if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { +- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); ++ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); ++ netbk_fatal_tx_err(vif); + return -EINVAL; + } + +@@ -1236,9 +1251,25 @@ static unsigned xen_netbk_tx_build_gops( + + /* Get a netif from the list with work to do. */ + vif = poll_net_schedule_list(netbk); ++ /* This can sometimes happen because the test of ++ * list_empty(net_schedule_list) at the top of the ++ * loop is unlocked. Just go back and have another ++ * look. ++ */ + if (!vif) + continue; + ++ if (vif->tx.sring->req_prod - vif->tx.req_cons > ++ XEN_NETIF_TX_RING_SIZE) { ++ netdev_err(vif->dev, ++ "Impossible number of requests. " ++ "req_prod %d, req_cons %d, size %ld\n", ++ vif->tx.sring->req_prod, vif->tx.req_cons, ++ XEN_NETIF_TX_RING_SIZE); ++ netbk_fatal_tx_err(vif); ++ continue; ++ } ++ + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); + if (!work_to_do) { + xenvif_put(vif); +@@ -1266,17 +1297,14 @@ static unsigned xen_netbk_tx_build_gops( + work_to_do = xen_netbk_get_extras(vif, extras, + work_to_do); + idx = vif->tx.req_cons; +- if (unlikely(work_to_do < 0)) { +- netbk_tx_err(vif, &txreq, idx); ++ if (unlikely(work_to_do < 0)) + continue; +- } + } + + ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); +- if (unlikely(ret < 0)) { +- netbk_tx_err(vif, &txreq, idx - ret); ++ if (unlikely(ret < 0)) + continue; +- } ++ + idx += ret; + + if (unlikely(txreq.size < ETH_HLEN)) { +@@ -1288,11 +1316,11 @@ static unsigned xen_netbk_tx_build_gops( + + /* No crossing a page as the payload mustn't fragment. */ + if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { +- netdev_dbg(vif->dev, ++ netdev_err(vif->dev, + "txreq.offset: %x, size: %u, end: %lu\n", + txreq.offset, txreq.size, + (txreq.offset&~PAGE_MASK) + txreq.size); +- netbk_tx_err(vif, &txreq, idx); ++ netbk_fatal_tx_err(vif); + continue; + } + +@@ -1320,8 +1348,8 @@ static unsigned xen_netbk_tx_build_gops( + gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; + + if (netbk_set_skb_gso(vif, skb, gso)) { ++ /* Failure in netbk_set_skb_gso is fatal. */ + kfree_skb(skb); +- netbk_tx_err(vif, &txreq, idx); + continue; + } + }