]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/linux/0005-Drivers-net-hyperv-Enable-scatter-gather-I-O.patch
core117: Regenerate language cache
[ipfire-2.x.git] / src / patches / linux / 0005-Drivers-net-hyperv-Enable-scatter-gather-I-O.patch
CommitLineData
a7b0967d
MT
1From 4c06034001e20ff9f6e2a1a3dfa155bf3f31440c Mon Sep 17 00:00:00 2001
2From: KY Srinivasan <kys@microsoft.com>
3Date: Sat, 8 Mar 2014 19:23:13 -0800
4Subject: [PATCH 05/25] Drivers: net: hyperv: Enable scatter gather I/O
5
6Cleanup the code and enable scatter gather I/O.
7
8Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
9Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
10Signed-off-by: David S. Miller <davem@davemloft.net>
11---
12 drivers/net/hyperv/netvsc_drv.c | 153 ++++++++++++++++++++++++++++++----------
13 1 file changed, 114 insertions(+), 39 deletions(-)
14
15diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
16index 8e3a0b00099b..72961741be54 100644
17--- a/drivers/net/hyperv/netvsc_drv.c
18+++ b/drivers/net/hyperv/netvsc_drv.c
19@@ -140,22 +140,124 @@ static void netvsc_xmit_completion(void *context)
20 dev_kfree_skb_any(skb);
21 }
22
23+static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
24+ struct hv_page_buffer *pb)
25+{
26+ int j = 0;
27+
28+ /* Deal with compund pages by ignoring unused part
29+ * of the page.
30+ */
31+ page += (offset >> PAGE_SHIFT);
32+ offset &= ~PAGE_MASK;
33+
34+ while (len > 0) {
35+ unsigned long bytes;
36+
37+ bytes = PAGE_SIZE - offset;
38+ if (bytes > len)
39+ bytes = len;
40+ pb[j].pfn = page_to_pfn(page);
41+ pb[j].offset = offset;
42+ pb[j].len = bytes;
43+
44+ offset += bytes;
45+ len -= bytes;
46+
47+ if (offset == PAGE_SIZE && len) {
48+ page++;
49+ offset = 0;
50+ j++;
51+ }
52+ }
53+
54+ return j + 1;
55+}
56+
57+static void init_page_array(void *hdr, u32 len, struct sk_buff *skb,
58+ struct hv_page_buffer *pb)
59+{
60+ u32 slots_used = 0;
61+ char *data = skb->data;
62+ int frags = skb_shinfo(skb)->nr_frags;
63+ int i;
64+
65+ /* The packet is laid out thus:
66+ * 1. hdr
67+ * 2. skb linear data
68+ * 3. skb fragment data
69+ */
70+ if (hdr != NULL)
71+ slots_used += fill_pg_buf(virt_to_page(hdr),
72+ offset_in_page(hdr),
73+ len, &pb[slots_used]);
74+
75+ slots_used += fill_pg_buf(virt_to_page(data),
76+ offset_in_page(data),
77+ skb_headlen(skb), &pb[slots_used]);
78+
79+ for (i = 0; i < frags; i++) {
80+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
81+
82+ slots_used += fill_pg_buf(skb_frag_page(frag),
83+ frag->page_offset,
84+ skb_frag_size(frag), &pb[slots_used]);
85+ }
86+}
87+
88+static int count_skb_frag_slots(struct sk_buff *skb)
89+{
90+ int i, frags = skb_shinfo(skb)->nr_frags;
91+ int pages = 0;
92+
93+ for (i = 0; i < frags; i++) {
94+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
95+ unsigned long size = skb_frag_size(frag);
96+ unsigned long offset = frag->page_offset;
97+
98+ /* Skip unused frames from start of page */
99+ offset &= ~PAGE_MASK;
100+ pages += PFN_UP(offset + size);
101+ }
102+ return pages;
103+}
104+
105+static int netvsc_get_slots(struct sk_buff *skb)
106+{
107+ char *data = skb->data;
108+ unsigned int offset = offset_in_page(data);
109+ unsigned int len = skb_headlen(skb);
110+ int slots;
111+ int frag_slots;
112+
113+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
114+ frag_slots = count_skb_frag_slots(skb);
115+ return slots + frag_slots;
116+}
117+
118 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
119 {
120 struct net_device_context *net_device_ctx = netdev_priv(net);
121 struct hv_netvsc_packet *packet;
122 int ret;
123- unsigned int i, num_pages, npg_data;
124+ unsigned int num_data_pages;
125 u32 skb_length = skb->len;
126
127- /* Add multipages for skb->data and additional 2 for RNDIS */
128- npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
129- >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
130- num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
131+ /* We will atmost need two pages to describe the rndis
132+ * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
133+ * of pages in a single packet.
134+ */
135+ num_data_pages = netvsc_get_slots(skb) + 2;
136+ if (num_data_pages > MAX_PAGE_BUFFER_COUNT) {
137+ netdev_err(net, "Packet too big: %u\n", skb->len);
138+ dev_kfree_skb(skb);
139+ net->stats.tx_dropped++;
140+ return NETDEV_TX_OK;
141+ }
142
143 /* Allocate a netvsc packet based on # of frags. */
144 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
145- (num_pages * sizeof(struct hv_page_buffer)) +
146+ (num_data_pages * sizeof(struct hv_page_buffer)) +
147 sizeof(struct rndis_message) +
148 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
149 if (!packet) {
150@@ -170,44 +272,17 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
151 packet->vlan_tci = skb->vlan_tci;
152
153 packet->extension = (void *)(unsigned long)packet +
154- sizeof(struct hv_netvsc_packet) +
155- (num_pages * sizeof(struct hv_page_buffer));
156+ sizeof(struct hv_netvsc_packet) +
157+ (num_data_pages * sizeof(struct hv_page_buffer));
158
159 /* If the rndis msg goes beyond 1 page, we will add 1 later */
160- packet->page_buf_cnt = num_pages - 1;
161+ packet->page_buf_cnt = num_data_pages - 1;
162
163 /* Initialize it from the skb */
164 packet->total_data_buflen = skb->len;
165
166 /* Start filling in the page buffers starting after RNDIS buffer. */
167- packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
168- packet->page_buf[1].offset
169- = (unsigned long)skb->data & (PAGE_SIZE - 1);
170- if (npg_data == 1)
171- packet->page_buf[1].len = skb_headlen(skb);
172- else
173- packet->page_buf[1].len = PAGE_SIZE
174- - packet->page_buf[1].offset;
175-
176- for (i = 2; i <= npg_data; i++) {
177- packet->page_buf[i].pfn = virt_to_phys(skb->data
178- + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
179- packet->page_buf[i].offset = 0;
180- packet->page_buf[i].len = PAGE_SIZE;
181- }
182- if (npg_data > 1)
183- packet->page_buf[npg_data].len = (((unsigned long)skb->data
184- + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
185-
186- /* Additional fragments are after SKB data */
187- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
188- const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
189-
190- packet->page_buf[i+npg_data+1].pfn =
191- page_to_pfn(skb_frag_page(f));
192- packet->page_buf[i+npg_data+1].offset = f->page_offset;
193- packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
194- }
195+ init_page_array(NULL, 0, skb, &packet->page_buf[1]);
196
197 /* Set the completion routine */
198 packet->completion.send.send_completion = netvsc_xmit_completion;
199@@ -454,8 +529,8 @@ static int netvsc_probe(struct hv_device *dev,
200 net->netdev_ops = &device_ops;
201
202 /* TODO: Add GSO and Checksum offload */
203- net->hw_features = 0;
204- net->features = NETIF_F_HW_VLAN_CTAG_TX;
205+ net->hw_features = NETIF_F_SG;
206+ net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG;
207
208 SET_ETHTOOL_OPS(net, &ethtool_ops);
209 SET_NETDEV_DEV(net, &dev->device);
210--
2112.4.3
212