]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.25/patches.suse/SoN-17-net-ps_rx.patch
Changed checkfs to auto reboot after correctable fsck fixes.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.suse / SoN-17-net-ps_rx.patch
CommitLineData
00e5a55c
BS
1From: Peter Zijlstra <a.p.zijlstra@chello.nl>
2Subject: net: packet split receive api
3Patch-mainline: No
4References: FATE#303834, bnc#484306
5
6Add some packet-split receive hooks.
7
8For one this allows to do NUMA node affine page allocs. Later on these hooks
9will be extended to do emergency reserve allocations for fragments.
10
11Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
12Acked-by: Neil Brown <neilb@suse.de>
13Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
14
15bnx2.c part fixed by Jiri Bohac <jbohac@suse.cz> (bnc#484306)
16
17
18
19---
20 drivers/net/bnx2.c | 8 +++-----
21 drivers/net/e1000/e1000_main.c | 8 ++------
22 drivers/net/e1000e/netdev.c | 7 ++-----
23 drivers/net/igb/igb_main.c | 9 ++-------
24 drivers/net/ixgbe/ixgbe_main.c | 14 ++++++--------
25 drivers/net/sky2.c | 16 ++++++----------
26 include/linux/skbuff.h | 23 +++++++++++++++++++++++
27 net/core/skbuff.c | 20 ++++++++++++++++++++
28 8 files changed, 64 insertions(+), 41 deletions(-)
29
30--- a/drivers/net/bnx2.c
31+++ b/drivers/net/bnx2.c
32@@ -2476,7 +2476,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, stru
33 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
34 struct rx_bd *rxbd =
35 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
36- struct page *page = alloc_page(GFP_ATOMIC);
37+ struct page *page = netdev_alloc_page(bp->dev);
38
39 if (!page)
40 return -ENOMEM;
41@@ -2501,7 +2501,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struc
42 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
43 PCI_DMA_FROMDEVICE);
44
45- __free_page(page);
46+ netdev_free_page(bp->dev, page);
47 rx_pg->page = NULL;
48 }
49
50@@ -2833,23 +2833,20 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2
51 if (i == pages - 1)
52 frag_len -= 4;
53
54- skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
55+ skb_add_rx_frag(skb, i, rx_pg->page, 0, frag_len);
56 rx_pg->page = NULL;
57
58 err = bnx2_alloc_rx_page(bp, rxr,
59 RX_PG_RING_IDX(pg_prod));
60 if (unlikely(err)) {
61 rxr->rx_pg_cons = pg_cons;
62 rxr->rx_pg_prod = pg_prod;
63 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
64 pages - i);
65 return err;
66 }
67
68 frag_size -= frag_len;
69- skb->data_len += frag_len;
70- skb->truesize += frag_len;
71- skb->len += frag_len;
72
73 pg_prod = NEXT_RX_BD(pg_prod);
74 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
75--- a/drivers/net/e1000/e1000_main.c
76+++ b/drivers/net/e1000/e1000_main.c
77@@ -4349,12 +4349,8 @@ static bool e1000_clean_rx_irq_ps(struct
78 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
79 PAGE_SIZE, PCI_DMA_FROMDEVICE);
80 ps_page_dma->ps_page_dma[j] = 0;
81- skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
82- length);
83+ skb_add_rx_frag(skb, j, ps_page->ps_page[j], 0, length);
84 ps_page->ps_page[j] = NULL;
85- skb->len += length;
86- skb->data_len += length;
87- skb->truesize += length;
88 }
89
90 /* strip the ethernet crc, problem is we're using pages now so
91@@ -4553,7 +4549,7 @@ static void e1000_alloc_rx_buffers_ps(st
92 if (j < adapter->rx_ps_pages) {
93 if (likely(!ps_page->ps_page[j])) {
94 ps_page->ps_page[j] =
95- alloc_page(GFP_ATOMIC);
96+ netdev_alloc_page(netdev);
97 if (unlikely(!ps_page->ps_page[j])) {
98 adapter->alloc_rx_buff_failed++;
99 goto no_buffers;
100--- a/drivers/net/e1000e/netdev.c
101+++ b/drivers/net/e1000e/netdev.c
102@@ -258,7 +258,7 @@ static void e1000_alloc_rx_buffers_ps(st
103 continue;
104 }
105 if (!ps_page->page) {
106- ps_page->page = alloc_page(GFP_ATOMIC);
107+ ps_page->page = netdev_alloc_page(netdev);
108 if (!ps_page->page) {
109 adapter->alloc_rx_buff_failed++;
110 goto no_buffers;
111@@ -826,11 +826,8 @@ static bool e1000_clean_rx_irq_ps(struct
112 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
113 PCI_DMA_FROMDEVICE);
114 ps_page->dma = 0;
115- skb_fill_page_desc(skb, j, ps_page->page, 0, length);
116+ skb_add_rx_frag(skb, j, ps_page->page, 0, length);
117 ps_page->page = NULL;
118- skb->len += length;
119- skb->data_len += length;
120- skb->truesize += length;
121 }
122
123 /* strip the ethernet crc, problem is we're using pages now so
124--- a/drivers/net/igb/igb_main.c
125+++ b/drivers/net/igb/igb_main.c
126@@ -3885,7 +3885,7 @@ static bool igb_clean_rx_irq_adv(struct
127 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
128 buffer_info->page_dma = 0;
129
130- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
131+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags++,
132 buffer_info->page,
133 buffer_info->page_offset,
134 length);
135@@ -3895,11 +3895,6 @@ static bool igb_clean_rx_irq_adv(struct
136 buffer_info->page = NULL;
137 else
138 get_page(buffer_info->page);
139-
140- skb->len += length;
141- skb->data_len += length;
142-
143- skb->truesize += length;
144 }
145 send_up:
146 i++;
147@@ -3993,7 +3988,7 @@ static void igb_alloc_rx_buffers_adv(str
148
149 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
150 if (!buffer_info->page) {
151- buffer_info->page = alloc_page(GFP_ATOMIC);
152+ buffer_info->page = netdev_alloc_page(netdev);
153 if (!buffer_info->page) {
154 adapter->alloc_rx_buff_failed++;
155 goto no_buffers;
156--- a/drivers/net/ixgbe/ixgbe_main.c
157+++ b/drivers/net/ixgbe/ixgbe_main.c
158@@ -480,6 +480,7 @@ static void ixgbe_alloc_rx_buffers(struc
159 int cleaned_count)
160 {
161 struct pci_dev *pdev = adapter->pdev;
162+ struct net_device *netdev = adapter->netdev;
163 union ixgbe_adv_rx_desc *rx_desc;
164 struct ixgbe_rx_buffer *bi;
165 unsigned int i;
166@@ -494,7 +495,7 @@ static void ixgbe_alloc_rx_buffers(struc
167 if (!bi->page_dma &&
168 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
169 if (!bi->page) {
170- bi->page = alloc_page(GFP_ATOMIC);
171+ bi->page = netdev_alloc_page(netdev);
172 if (!bi->page) {
173 adapter->alloc_rx_page_failed++;
174 goto no_buffers;
175@@ -628,10 +629,10 @@ static bool ixgbe_clean_rx_irq(struct ix
176 pci_unmap_page(pdev, rx_buffer_info->page_dma,
177 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
178 rx_buffer_info->page_dma = 0;
179- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
180- rx_buffer_info->page,
181- rx_buffer_info->page_offset,
182- upper_len);
183+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
184+ rx_buffer_info->page,
185+ rx_buffer_info->page_offset,
186+ upper_len);
187
188 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
189 (page_count(rx_buffer_info->page) != 1))
190@@ -639,9 +640,6 @@ static bool ixgbe_clean_rx_irq(struct ix
191 else
192 get_page(rx_buffer_info->page);
193
194- skb->len += upper_len;
195- skb->data_len += upper_len;
196- skb->truesize += upper_len;
197 }
198
199 i++;
200--- a/drivers/net/sky2.c
201+++ b/drivers/net/sky2.c
202@@ -1272,7 +1272,7 @@ static struct sk_buff *sky2_rx_alloc(str
203 }
204
205 for (i = 0; i < sky2->rx_nfrags; i++) {
206- struct page *page = alloc_page(GFP_ATOMIC);
207+ struct page *page = netdev_alloc_page(sky2->netdev);
208
209 if (!page)
210 goto free_partial;
211@@ -2141,8 +2141,8 @@ static struct sk_buff *receive_copy(stru
212 }
213
214 /* Adjust length of skb with fragments to match received data */
215-static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
216- unsigned int length)
217+static void skb_put_frags(struct sky2_port *sky2, struct sk_buff *skb,
218+ unsigned int hdr_space, unsigned int length)
219 {
220 int i, num_frags;
221 unsigned int size;
222@@ -2159,15 +2159,11 @@ static void skb_put_frags(struct sk_buff
223
224 if (length == 0) {
225 /* don't need this page */
226- __free_page(frag->page);
227+ netdev_free_page(sky2->netdev, frag->page);
228 --skb_shinfo(skb)->nr_frags;
229 } else {
230 size = min(length, (unsigned) PAGE_SIZE);
231-
232- frag->size = size;
233- skb->data_len += size;
234- skb->truesize += size;
235- skb->len += size;
236+ skb_add_rx_frag(skb, i, frag->page, 0, size);
237 length -= size;
238 }
239 }
240@@ -2194,7 +2190,7 @@ static struct sk_buff *receive_new(struc
241 sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space);
242
243 if (skb_shinfo(skb)->nr_frags)
244- skb_put_frags(skb, hdr_space, length);
245+ skb_put_frags(sky2, skb, hdr_space, length);
246 else
247 skb_put(skb, length);
248 return skb;
249--- a/include/linux/skbuff.h
250+++ b/include/linux/skbuff.h
251@@ -829,6 +829,9 @@ static inline void skb_fill_page_desc(st
252 skb_shinfo(skb)->nr_frags = i + 1;
253 }
254
255+extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
256+ int off, int size);
257+
258 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
259 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
260 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
261@@ -1243,6 +1246,26 @@ static inline struct sk_buff *netdev_all
262 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
263 }
264
265+extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
266+
267+/**
268+ * netdev_alloc_page - allocate a page for ps-rx on a specific device
269+ * @dev: network device to receive on
270+ *
271+ * Allocate a new page node local to the specified device.
272+ *
273+ * %NULL is returned if there is no free memory.
274+ */
275+static inline struct page *netdev_alloc_page(struct net_device *dev)
276+{
277+ return __netdev_alloc_page(dev, GFP_ATOMIC);
278+}
279+
280+static inline void netdev_free_page(struct net_device *dev, struct page *page)
281+{
282+ __free_page(page);
283+}
284+
285 /**
286 * skb_clone_writable - is the header of a clone writable
287 * @skb: buffer to check
288--- a/net/core/skbuff.c
289+++ b/net/core/skbuff.c
290@@ -259,6 +259,26 @@ struct sk_buff *__netdev_alloc_skb(struc
291 return skb;
292 }
293
294+struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
295+{
296+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
297+ struct page *page;
298+
299+ page = alloc_pages_node(node, gfp_mask, 0);
300+ return page;
301+}
302+EXPORT_SYMBOL(__netdev_alloc_page);
303+
304+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
305+ int size)
306+{
307+ skb_fill_page_desc(skb, i, page, off, size);
308+ skb->len += size;
309+ skb->data_len += size;
310+ skb->truesize += size;
311+}
312+EXPORT_SYMBOL(skb_add_rx_frag);
313+
314 /**
315 * dev_alloc_skb - allocate an skbuff for receiving
316 * @length: length to allocate