]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From 4f40f2cba244e04c0f385c5ce60498b513b335dd Mon Sep 17 00:00:00 2001 |
2 | From: Eilon Greenstein <eilong@broadcom.com> | |
3 | Date: Wed, 14 Jan 2009 21:24:17 -0800 | |
4 | Subject: bnx2x: Using system page size for SGE | |
5 | Acked-by: Karsten Keil <kkeil@novell.com> | |
6 | Reference: bnc#472500 | |
7 | ||
8 | When the page size is not 4KB, the FW must be programmed to work with | |
9 | the right SGE boundaries and fragment list length. | |
10 | ||
11 | To avoid confusion with the BCM_PAGE_SIZE which is set to 4KB for the | |
12 | FW sake, another alias for the system page size was added to | |
13 | explicitly indicate that it is meant for the SGE | |
14 | ||
15 | Signed-off-by: Eilon Greenstein <eilong@broadcom.com> | |
16 | Signed-off-by: David S. Miller <davem@davemloft.net> | |
17 | --- | |
18 | drivers/net/bnx2x.h | 3 +++ | |
19 | drivers/net/bnx2x_main.c | 32 ++++++++++++++++---------------- | |
20 | 2 files changed, 19 insertions(+), 16 deletions(-) | |
21 | ||
22 | Index: linux-2.6.27-bnx2x_2/drivers/net/bnx2x.h | |
23 | =================================================================== | |
24 | --- linux-2.6.27-bnx2x_2.orig/drivers/net/bnx2x.h | |
25 | +++ linux-2.6.27-bnx2x_2/drivers/net/bnx2x.h | |
26 | @@ -150,6 +150,9 @@ struct sw_rx_page { | |
27 | ||
28 | #define PAGES_PER_SGE_SHIFT 0 | |
29 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) | |
30 | +#define SGE_PAGE_SIZE PAGE_SIZE | |
31 | +#define SGE_PAGE_SHIFT PAGE_SHIFT | |
32 | +#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr) | |
33 | ||
34 | #define BCM_RX_ETH_PAYLOAD_ALIGN 64 | |
35 | ||
36 | Index: linux-2.6.27-bnx2x_2/drivers/net/bnx2x_main.c | |
37 | =================================================================== | |
38 | --- linux-2.6.27-bnx2x_2.orig/drivers/net/bnx2x_main.c | |
39 | +++ linux-2.6.27-bnx2x_2/drivers/net/bnx2x_main.c | |
40 | @@ -974,7 +974,7 @@ static inline void bnx2x_free_rx_sge(str | |
41 | return; | |
42 | ||
43 | pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), | |
44 | - BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | |
45 | + SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | |
46 | __free_pages(page, PAGES_PER_SGE_SHIFT); | |
47 | ||
48 | sw_buf->page = NULL; | |
49 | @@ -1002,7 +1002,7 @@ static inline int bnx2x_alloc_rx_sge(str | |
50 | if (unlikely(page == NULL)) | |
51 | return -ENOMEM; | |
52 | ||
53 | - mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, | |
54 | + mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, | |
55 | PCI_DMA_FROMDEVICE); | |
56 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | |
57 | __free_pages(page, PAGES_PER_SGE_SHIFT); | |
58 | @@ -1098,9 +1098,9 @@ static void bnx2x_update_sge_prod(struct | |
59 | struct eth_fast_path_rx_cqe *fp_cqe) | |
60 | { | |
61 | struct bnx2x *bp = fp->bp; | |
62 | - u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | |
63 | + u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | |
64 | le16_to_cpu(fp_cqe->len_on_bd)) >> | |
65 | - BCM_PAGE_SHIFT; | |
66 | + SGE_PAGE_SHIFT; | |
67 | u16 last_max, last_elem, first_elem; | |
68 | u16 delta = 0; | |
69 | u16 i; | |
70 | @@ -1205,22 +1205,22 @@ static int bnx2x_fill_frag_skb(struct bn | |
71 | u16 cqe_idx) | |
72 | { | |
73 | struct sw_rx_page *rx_pg, old_rx_pg; | |
74 | - struct page *sge; | |
75 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | |
76 | u32 i, frag_len, frag_size, pages; | |
77 | int err; | |
78 | int j; | |
79 | ||
80 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | |
81 | - pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT; | |
82 | + pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; | |
83 | ||
84 | /* This is needed in order to enable forwarding support */ | |
85 | if (frag_size) | |
86 | - skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE, | |
87 | + skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | |
88 | max(frag_size, (u32)len_on_bd)); | |
89 | ||
90 | #ifdef BNX2X_STOP_ON_ERROR | |
91 | - if (pages > 8*PAGES_PER_SGE) { | |
92 | + if (pages > | |
93 | + min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) { | |
94 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | |
95 | pages, cqe_idx); | |
96 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | |
97 | @@ -1236,9 +1236,8 @@ static int bnx2x_fill_frag_skb(struct bn | |
98 | ||
99 | /* FW gives the indices of the SGE as if the ring is an array | |
100 | (meaning that "next" element will consume 2 indices) */ | |
101 | - frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE)); | |
102 | + frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); | |
103 | rx_pg = &fp->rx_page_ring[sge_idx]; | |
104 | - sge = rx_pg->page; | |
105 | old_rx_pg = *rx_pg; | |
106 | ||
107 | /* If we fail to allocate a substitute page, we simply stop | |
108 | @@ -1251,7 +1250,7 @@ static int bnx2x_fill_frag_skb(struct bn | |
109 | ||
110 | /* Unmap the page as we r going to pass it to the stack */ | |
111 | pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), | |
112 | - BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | |
113 | + SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | |
114 | ||
115 | /* Add one frag and update the appropriate fields in the skb */ | |
116 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | |
117 | @@ -4547,7 +4546,7 @@ static void bnx2x_set_client_config(stru | |
118 | ||
119 | if (bp->flags & TPA_ENABLE_FLAG) { | |
120 | tstorm_client.max_sges_for_packet = | |
121 | - BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT; | |
122 | + SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT; | |
123 | tstorm_client.max_sges_for_packet = | |
124 | ((tstorm_client.max_sges_for_packet + | |
125 | PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> | |
126 | @@ -4730,10 +4729,11 @@ static void bnx2x_init_internal_func(str | |
127 | bp->e1hov); | |
128 | } | |
129 | ||
130 | - /* Init CQ ring mapping and aggregation size */ | |
131 | - max_agg_size = min((u32)(bp->rx_buf_size + | |
132 | - 8*BCM_PAGE_SIZE*PAGES_PER_SGE), | |
133 | - (u32)0xffff); | |
134 | + /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ | |
135 | + max_agg_size = | |
136 | + min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * | |
137 | + SGE_PAGE_SIZE * PAGES_PER_SGE), | |
138 | + (u32)0xffff); | |
139 | for_each_queue(bp, i) { | |
140 | struct bnx2x_fastpath *fp = &bp->fp[i]; | |
141 |