]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.drivers/netxen-handle-dma-mapping-failures.patch
Merge branch 'master' of git://git.ipfire.org/ipfire-2.x
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / netxen-handle-dma-mapping-failures.patch
1 From 6f70340698333f14b1d9c9e913c5de8f66b72c55 Mon Sep 17 00:00:00 2001
2 From: Dhananjay Phadke <dhananjay@netxen.com>
3 Date: Wed, 14 Jan 2009 20:50:00 -0800
4 Subject: netxen: handle dma mapping failures
5 Acked-by: Karsten Keil <kkeil@novell.com>
6 Reference: bnc#472416
7
8 o Bail out if pci_map_single() fails while replenishing rx ring.
9 o Drop packet if pci_map_{single,page}() fail in tx.
10
11 Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
12 Signed-off-by: David S. Miller <davem@davemloft.net>
13 ---
14 drivers/net/netxen/netxen_nic.h | 1 -
15 drivers/net/netxen/netxen_nic_init.c | 68 ++++++++++++++++------------------
16 drivers/net/netxen/netxen_nic_main.c | 38 +++++++++++++++++-
17 3 files changed, 67 insertions(+), 40 deletions(-)
18
19 Index: linux-2.6.27-kketmp/drivers/net/netxen/netxen_nic.h
20 ===================================================================
21 --- linux-2.6.27-kketmp.orig/drivers/net/netxen/netxen_nic.h
22 +++ linux-2.6.27-kketmp/drivers/net/netxen/netxen_nic.h
23 @@ -860,7 +860,6 @@ struct nx_host_rds_ring {
24 u32 skb_size;
25 struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */
26 struct list_head free_list;
27 - int begin_alloc;
28 };
29
30 /*
31 Index: linux-2.6.27-kketmp/drivers/net/netxen/netxen_nic_init.c
32 ===================================================================
33 --- linux-2.6.27-kketmp.orig/drivers/net/netxen/netxen_nic_init.c
34 +++ linux-2.6.27-kketmp/drivers/net/netxen/netxen_nic_init.c
35 @@ -308,7 +308,6 @@ int netxen_alloc_sw_resources(struct net
36 }
37 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
38 INIT_LIST_HEAD(&rds_ring->free_list);
39 - rds_ring->begin_alloc = 0;
40 /*
41 * Now go through all of them, set reference handles
42 * and put them in the queues.
43 @@ -1437,7 +1436,6 @@ void netxen_post_rx_buffers(struct netxe
44 struct rcv_desc *pdesc;
45 struct netxen_rx_buffer *buffer;
46 int count = 0;
47 - int index = 0;
48 netxen_ctx_msg msg = 0;
49 dma_addr_t dma;
50 struct list_head *head;
51 @@ -1445,7 +1443,6 @@ void netxen_post_rx_buffers(struct netxe
52 rds_ring = &recv_ctx->rds_rings[ringid];
53
54 producer = rds_ring->producer;
55 - index = rds_ring->begin_alloc;
56 head = &rds_ring->free_list;
57
58 /* We can start writing rx descriptors into the phantom memory. */
59 @@ -1453,39 +1450,37 @@ void netxen_post_rx_buffers(struct netxe
60
61 skb = dev_alloc_skb(rds_ring->skb_size);
62 if (unlikely(!skb)) {
63 - rds_ring->begin_alloc = index;
64 break;
65 }
66
67 + if (!adapter->ahw.cut_through)
68 + skb_reserve(skb, 2);
69 +
70 + dma = pci_map_single(pdev, skb->data,
71 + rds_ring->dma_size, PCI_DMA_FROMDEVICE);
72 + if (pci_dma_mapping_error(pdev, dma)) {
73 + dev_kfree_skb_any(skb);
74 + break;
75 + }
76 +
77 + count++;
78 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
79 list_del(&buffer->list);
80
81 - count++; /* now there should be no failure */
82 - pdesc = &rds_ring->desc_head[producer];
83 -
84 - if (!adapter->ahw.cut_through)
85 - skb_reserve(skb, 2);
86 - /* This will be setup when we receive the
87 - * buffer after it has been filled FSL TBD TBD
88 - * skb->dev = netdev;
89 - */
90 - dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
91 - PCI_DMA_FROMDEVICE);
92 - pdesc->addr_buffer = cpu_to_le64(dma);
93 buffer->skb = skb;
94 buffer->state = NETXEN_BUFFER_BUSY;
95 buffer->dma = dma;
96 +
97 /* make a rcv descriptor */
98 + pdesc = &rds_ring->desc_head[producer];
99 + pdesc->addr_buffer = cpu_to_le64(dma);
100 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
101 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
102 - DPRINTK(INFO, "done writing descripter\n");
103 - producer =
104 - get_next_index(producer, rds_ring->max_rx_desc_count);
105 - index = get_next_index(index, rds_ring->max_rx_desc_count);
106 +
107 + producer = get_next_index(producer, rds_ring->max_rx_desc_count);
108 }
109 /* if we did allocate buffers, then write the count to Phantom */
110 if (count) {
111 - rds_ring->begin_alloc = index;
112 rds_ring->producer = producer;
113 /* Window = 1 */
114 adapter->pci_write_normalize(adapter,
115 @@ -1524,49 +1519,50 @@ static void netxen_post_rx_buffers_nodb(
116 struct rcv_desc *pdesc;
117 struct netxen_rx_buffer *buffer;
118 int count = 0;
119 - int index = 0;
120 struct list_head *head;
121 + dma_addr_t dma;
122
123 rds_ring = &recv_ctx->rds_rings[ringid];
124
125 producer = rds_ring->producer;
126 - index = rds_ring->begin_alloc;
127 head = &rds_ring->free_list;
128 /* We can start writing rx descriptors into the phantom memory. */
129 while (!list_empty(head)) {
130
131 skb = dev_alloc_skb(rds_ring->skb_size);
132 if (unlikely(!skb)) {
133 - rds_ring->begin_alloc = index;
134 break;
135 }
136
137 + if (!adapter->ahw.cut_through)
138 + skb_reserve(skb, 2);
139 +
140 + dma = pci_map_single(pdev, skb->data,
141 + rds_ring->dma_size, PCI_DMA_FROMDEVICE);
142 + if (pci_dma_mapping_error(pdev, dma)) {
143 + dev_kfree_skb_any(skb);
144 + break;
145 + }
146 +
147 + count++;
148 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
149 list_del(&buffer->list);
150
151 - count++; /* now there should be no failure */
152 - pdesc = &rds_ring->desc_head[producer];
153 - if (!adapter->ahw.cut_through)
154 - skb_reserve(skb, 2);
155 buffer->skb = skb;
156 buffer->state = NETXEN_BUFFER_BUSY;
157 - buffer->dma = pci_map_single(pdev, skb->data,
158 - rds_ring->dma_size,
159 - PCI_DMA_FROMDEVICE);
160 + buffer->dma = dma;
161
162 /* make a rcv descriptor */
163 + pdesc = &rds_ring->desc_head[producer];
164 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
165 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
166 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
167 - producer =
168 - get_next_index(producer, rds_ring->max_rx_desc_count);
169 - index = get_next_index(index, rds_ring->max_rx_desc_count);
170 - buffer = &rds_ring->rx_buf_arr[index];
171 +
172 + producer = get_next_index(producer, rds_ring->max_rx_desc_count);
173 }
174
175 /* if we did allocate buffers, then write the count to Phantom */
176 if (count) {
177 - rds_ring->begin_alloc = index;
178 rds_ring->producer = producer;
179 /* Window = 1 */
180 adapter->pci_write_normalize(adapter,
181 Index: linux-2.6.27-kketmp/drivers/net/netxen/netxen_nic_main.c
182 ===================================================================
183 --- linux-2.6.27-kketmp.orig/drivers/net/netxen/netxen_nic_main.c
184 +++ linux-2.6.27-kketmp/drivers/net/netxen/netxen_nic_main.c
185 @@ -1189,6 +1189,24 @@ static bool netxen_tso_check(struct net_
186 return tso;
187 }
188
189 +static void
190 +netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
191 + struct netxen_cmd_buffer *pbuf, int last)
192 +{
193 + int k;
194 + struct netxen_skb_frag *buffrag;
195 +
196 + buffrag = &pbuf->frag_array[0];
197 + pci_unmap_single(pdev, buffrag->dma,
198 + buffrag->length, PCI_DMA_TODEVICE);
199 +
200 + for (k = 1; k < last; k++) {
201 + buffrag = &pbuf->frag_array[k];
202 + pci_unmap_page(pdev, buffrag->dma,
203 + buffrag->length, PCI_DMA_TODEVICE);
204 + }
205 +}
206 +
207 static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
208 {
209 struct netxen_adapter *adapter = netdev_priv(netdev);
210 @@ -1197,6 +1215,8 @@ static int netxen_nic_xmit_frame(struct
211 struct netxen_cmd_buffer *pbuf;
212 struct netxen_skb_frag *buffrag;
213 struct cmd_desc_type0 *hwdesc;
214 + struct pci_dev *pdev = adapter->pdev;
215 + dma_addr_t temp_dma;
216 int i, k;
217
218 u32 producer, consumer;
219 @@ -1229,8 +1249,12 @@ static int netxen_nic_xmit_frame(struct
220 pbuf->skb = skb;
221 pbuf->frag_count = frag_count;
222 buffrag = &pbuf->frag_array[0];
223 - buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len,
224 + temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
225 PCI_DMA_TODEVICE);
226 + if (pci_dma_mapping_error(pdev, temp_dma))
227 + goto drop_packet;
228 +
229 + buffrag->dma = temp_dma;
230 buffrag->length = first_seg_len;
231 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
232 netxen_set_tx_port(hwdesc, adapter->portnum);
233 @@ -1242,7 +1266,6 @@ static int netxen_nic_xmit_frame(struct
234 struct skb_frag_struct *frag;
235 int len, temp_len;
236 unsigned long offset;
237 - dma_addr_t temp_dma;
238
239 /* move to next desc. if there is a need */
240 if ((i & 0x3) == 0) {
241 @@ -1258,8 +1281,12 @@ static int netxen_nic_xmit_frame(struct
242 offset = frag->page_offset;
243
244 temp_len = len;
245 - temp_dma = pci_map_page(adapter->pdev, frag->page, offset,
246 + temp_dma = pci_map_page(pdev, frag->page, offset,
247 len, PCI_DMA_TODEVICE);
248 + if (pci_dma_mapping_error(pdev, temp_dma)) {
249 + netxen_clean_tx_dma_mapping(pdev, pbuf, i);
250 + goto drop_packet;
251 + }
252
253 buffrag++;
254 buffrag->dma = temp_dma;
255 @@ -1334,6 +1361,11 @@ static int netxen_nic_xmit_frame(struct
256 netdev->trans_start = jiffies;
257
258 return NETDEV_TX_OK;
259 +
260 +drop_packet:
261 + adapter->stats.txdropped++;
262 + dev_kfree_skb_any(skb);
263 + return NETDEV_TX_OK;
264 }
265
266 static int netxen_nic_check_temp(struct netxen_adapter *adapter)