]>
Commit | Line | Data |
---|---|---|
00e5a55c BS |
1 | Commit-Id: 8c26376112fb4b8dfea42069b602c03d53366052 |
2 | From: Divy Le Ray <divy@chelsio.com> | |
3 | Date: Wed, 8 Oct 2008 17:37:33 -0700 | |
4 | Acked-by: Karsten Keil <kkeil@novell.com> | |
5 | Subject: [PATCH] cxgb3: Allocate multiqueues at init time | |
6 | Reference: bnc#446739 | |
7 | ||
8 | Allocate a queue set per core, up to the maximum of available qsets. | |
9 | Share the queue sets on multi port adapters. | |
10 | Rename MSI-X interrupt vectors ethX-N, N being the queue set number. | |
11 | ||
12 | commit f9ee3882969224aa9f086268020c31819be6ae99 | |
13 | cxgb3 - Limit multiqueue setting to msi-x | |
14 | Allow multiqueue setting in MSI-X mode only | |
15 | ||
16 | Signed-off-by: Divy Le Ray <divy@chelsio.com> | |
17 | Signed-off-by: David S. Miller <davem@davemloft.net> | |
18 | ||
19 | --- | |
20 | drivers/net/cxgb3/common.h | 1 | |
21 | drivers/net/cxgb3/cxgb3_ioctl.h | 2 | |
22 | drivers/net/cxgb3/cxgb3_main.c | 179 +++++++++++++++++++++++++++++++--------- | |
23 | 3 files changed, 145 insertions(+), 37 deletions(-) | |
24 | ||
25 | --- a/drivers/net/cxgb3/common.h | |
26 | +++ b/drivers/net/cxgb3/common.h | |
27 | @@ -358,6 +358,7 @@ struct qset_params { /* SGE queue set p | |
28 | unsigned int jumbo_size; /* # of entries in jumbo free list */ | |
29 | unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */ | |
30 | unsigned int cong_thres; /* FL congestion threshold */ | |
31 | + unsigned int vector; /* Interrupt (line or vector) number */ | |
32 | }; | |
33 | ||
34 | struct sge_params { | |
35 | --- a/drivers/net/cxgb3/cxgb3_ioctl.h | |
36 | +++ b/drivers/net/cxgb3/cxgb3_ioctl.h | |
37 | @@ -92,6 +92,8 @@ struct ch_qset_params { | |
38 | int32_t polling; | |
39 | int32_t lro; | |
40 | int32_t cong_thres; | |
41 | + int32_t vector; | |
42 | + int32_t qnum; | |
43 | }; | |
44 | ||
45 | struct ch_pktsched_params { | |
46 | --- a/drivers/net/cxgb3/cxgb3_main.c | |
47 | +++ b/drivers/net/cxgb3/cxgb3_main.c | |
48 | @@ -275,10 +275,10 @@ static void name_msix_vecs(struct adapte | |
49 | ||
50 | for (i = 0; i < pi->nqsets; i++, msi_idx++) { | |
51 | snprintf(adap->msix_info[msi_idx].desc, n, | |
52 | - "%s (queue %d)", d->name, i); | |
53 | + "%s-%d", d->name, pi->first_qset + i); | |
54 | adap->msix_info[msi_idx].desc[n] = 0; | |
55 | } | |
56 | - } | |
57 | + } | |
58 | } | |
59 | ||
60 | static int request_msix_data_irqs(struct adapter *adap) | |
61 | @@ -307,6 +307,22 @@ static int request_msix_data_irqs(struct | |
62 | return 0; | |
63 | } | |
64 | ||
65 | +static void free_irq_resources(struct adapter *adapter) | |
66 | +{ | |
67 | + if (adapter->flags & USING_MSIX) { | |
68 | + int i, n = 0; | |
69 | + | |
70 | + free_irq(adapter->msix_info[0].vec, adapter); | |
71 | + for_each_port(adapter, i) | |
72 | + n += adap2pinfo(adapter, i)->nqsets; | |
73 | + | |
74 | + for (i = 0; i < n; ++i) | |
75 | + free_irq(adapter->msix_info[i + 1].vec, | |
76 | + &adapter->sge.qs[i]); | |
77 | + } else | |
78 | + free_irq(adapter->pdev->irq, adapter); | |
79 | +} | |
80 | + | |
81 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, | |
82 | unsigned long n) | |
83 | { | |
84 | @@ -474,7 +490,10 @@ static int setup_sge_qsets(struct adapte | |
85 | struct port_info *pi = netdev_priv(dev); | |
86 | ||
87 | pi->qs = &adap->sge.qs[pi->first_qset]; | |
88 | - for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { | |
89 | + for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; | |
90 | + ++j, ++qset_idx) { | |
91 | + if (!pi->rx_csum_offload) | |
92 | + adap->params.sge.qset[qset_idx].lro = 0; | |
93 | err = t3_sge_alloc_qset(adap, qset_idx, 1, | |
94 | (adap->flags & USING_MSIX) ? qset_idx + 1 : | |
95 | irq_idx, | |
96 | @@ -782,11 +801,12 @@ static void init_port_mtus(struct adapte | |
97 | t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); | |
98 | } | |
99 | ||
100 | -static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, | |
101 | +static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, | |
102 | int hi, int port) | |
103 | { | |
104 | struct sk_buff *skb; | |
105 | struct mngt_pktsched_wr *req; | |
106 | + int ret; | |
107 | ||
108 | skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); | |
109 | req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); | |
110 | @@ -797,20 +817,28 @@ static void send_pktsched_cmd(struct ada | |
111 | req->min = lo; | |
112 | req->max = hi; | |
113 | req->binding = port; | |
114 | - t3_mgmt_tx(adap, skb); | |
115 | + ret = t3_mgmt_tx(adap, skb); | |
116 | + | |
117 | + return ret; | |
118 | } | |
119 | ||
120 | -static void bind_qsets(struct adapter *adap) | |
121 | +static int bind_qsets(struct adapter *adap) | |
122 | { | |
123 | - int i, j; | |
124 | + int i, j, err = 0; | |
125 | ||
126 | for_each_port(adap, i) { | |
127 | const struct port_info *pi = adap2pinfo(adap, i); | |
128 | ||
129 | - for (j = 0; j < pi->nqsets; ++j) | |
130 | - send_pktsched_cmd(adap, 1, pi->first_qset + j, -1, | |
131 | - -1, i); | |
132 | + for (j = 0; j < pi->nqsets; ++j) { | |
133 | + int ret = send_pktsched_cmd(adap, 1, | |
134 | + pi->first_qset + j, -1, | |
135 | + -1, i); | |
136 | + if (ret) | |
137 | + err = ret; | |
138 | + } | |
139 | } | |
140 | + | |
141 | + return err; | |
142 | } | |
143 | ||
144 | #define FW_FNAME "t3fw-%d.%d.%d.bin" | |
145 | @@ -989,9 +1017,16 @@ static int cxgb_up(struct adapter *adap) | |
146 | t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); | |
147 | } | |
148 | ||
149 | - if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX) | |
150 | - bind_qsets(adap); | |
151 | - adap->flags |= QUEUES_BOUND; | |
152 | + if (!(adap->flags & QUEUES_BOUND)) { | |
153 | + err = bind_qsets(adap); | |
154 | + if (err) { | |
155 | + CH_ERR(adap, "failed to bind qsets, err %d\n", err); | |
156 | + t3_intr_disable(adap); | |
157 | + free_irq_resources(adap); | |
158 | + goto out; | |
159 | + } | |
160 | + adap->flags |= QUEUES_BOUND; | |
161 | + } | |
162 | ||
163 | out: | |
164 | return err; | |
165 | @@ -1010,19 +1045,7 @@ static void cxgb_down(struct adapter *ad | |
166 | t3_intr_disable(adapter); | |
167 | spin_unlock_irq(&adapter->work_lock); | |
168 | ||
169 | - if (adapter->flags & USING_MSIX) { | |
170 | - int i, n = 0; | |
171 | - | |
172 | - free_irq(adapter->msix_info[0].vec, adapter); | |
173 | - for_each_port(adapter, i) | |
174 | - n += adap2pinfo(adapter, i)->nqsets; | |
175 | - | |
176 | - for (i = 0; i < n; ++i) | |
177 | - free_irq(adapter->msix_info[i + 1].vec, | |
178 | - &adapter->sge.qs[i]); | |
179 | - } else | |
180 | - free_irq(adapter->pdev->irq, adapter); | |
181 | - | |
182 | + free_irq_resources(adapter); | |
183 | flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ | |
184 | quiesce_rx(adapter); | |
185 | } | |
186 | @@ -1333,8 +1356,8 @@ static unsigned long collect_sge_port_st | |
187 | int i; | |
188 | unsigned long tot = 0; | |
189 | ||
190 | - for (i = 0; i < p->nqsets; ++i) | |
191 | - tot += adapter->sge.qs[i + p->first_qset].port_stats[idx]; | |
192 | + for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) | |
193 | + tot += adapter->sge.qs[i].port_stats[idx]; | |
194 | return tot; | |
195 | } | |
196 | ||
197 | @@ -1538,7 +1561,7 @@ static int set_settings(struct net_devic | |
198 | struct link_config *lc = &p->link_config; | |
199 | ||
200 | if (!(lc->supported & SUPPORTED_Autoneg)) | |
201 | - return -EOPNOTSUPP; /* can't change speed/duplex */ | |
202 | + return -EOPNOTSUPP; /* can't change speed/duplex */ | |
203 | ||
204 | if (cmd->autoneg == AUTONEG_DISABLE) { | |
205 | int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); | |
206 | @@ -1617,8 +1640,10 @@ static int set_rx_csum(struct net_device | |
207 | struct adapter *adap = p->adapter; | |
208 | int i; | |
209 | ||
210 | - for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) | |
211 | + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { | |
212 | + adap->params.sge.qset[i].lro = 0; | |
213 | adap->sge.qs[i].lro_enabled = 0; | |
214 | + } | |
215 | } | |
216 | return 0; | |
217 | } | |
218 | @@ -1824,6 +1849,8 @@ static int cxgb_extension_ioctl(struct n | |
219 | int i; | |
220 | struct qset_params *q; | |
221 | struct ch_qset_params t; | |
222 | + int q1 = pi->first_qset; | |
223 | + int nqsets = pi->nqsets; | |
224 | ||
225 | if (!capable(CAP_NET_ADMIN)) | |
226 | return -EPERM; | |
227 | @@ -1846,6 +1873,16 @@ static int cxgb_extension_ioctl(struct n | |
228 | || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, | |
229 | MAX_RSPQ_ENTRIES)) | |
230 | return -EINVAL; | |
231 | + | |
232 | + if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0) | |
233 | + for_each_port(adapter, i) { | |
234 | + pi = adap2pinfo(adapter, i); | |
235 | + if (t.qset_idx >= pi->first_qset && | |
236 | + t.qset_idx < pi->first_qset + pi->nqsets && | |
237 | + !pi->rx_csum_offload) | |
238 | + return -EINVAL; | |
239 | + } | |
240 | + | |
241 | if ((adapter->flags & FULL_INIT_DONE) && | |
242 | (t.rspq_size >= 0 || t.fl_size[0] >= 0 || | |
243 | t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || | |
244 | @@ -1853,6 +1890,20 @@ static int cxgb_extension_ioctl(struct n | |
245 | t.polling >= 0 || t.cong_thres >= 0)) | |
246 | return -EBUSY; | |
247 | ||
248 | + /* Allow setting of any available qset when offload enabled */ | |
249 | + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | |
250 | + q1 = 0; | |
251 | + for_each_port(adapter, i) { | |
252 | + pi = adap2pinfo(adapter, i); | |
253 | + nqsets += pi->first_qset + pi->nqsets; | |
254 | + } | |
255 | + } | |
256 | + | |
257 | + if (t.qset_idx < q1) | |
258 | + return -EINVAL; | |
259 | + if (t.qset_idx > q1 + nqsets - 1) | |
260 | + return -EINVAL; | |
261 | + | |
262 | q = &adapter->params.sge.qset[t.qset_idx]; | |
263 | ||
264 | if (t.rspq_size >= 0) | |
265 | @@ -1902,13 +1953,26 @@ static int cxgb_extension_ioctl(struct n | |
266 | case CHELSIO_GET_QSET_PARAMS:{ | |
267 | struct qset_params *q; | |
268 | struct ch_qset_params t; | |
269 | + int q1 = pi->first_qset; | |
270 | + int nqsets = pi->nqsets; | |
271 | + int i; | |
272 | ||
273 | if (copy_from_user(&t, useraddr, sizeof(t))) | |
274 | return -EFAULT; | |
275 | - if (t.qset_idx >= SGE_QSETS) | |
276 | + | |
277 | + /* Display qsets for all ports when offload enabled */ | |
278 | + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | |
279 | + q1 = 0; | |
280 | + for_each_port(adapter, i) { | |
281 | + pi = adap2pinfo(adapter, i); | |
282 | + nqsets = pi->first_qset + pi->nqsets; | |
283 | + } | |
284 | + } | |
285 | + | |
286 | + if (t.qset_idx >= nqsets) | |
287 | return -EINVAL; | |
288 | ||
289 | - q = &adapter->params.sge.qset[t.qset_idx]; | |
290 | + q = &adapter->params.sge.qset[q1 + t.qset_idx]; | |
291 | t.rspq_size = q->rspq_size; | |
292 | t.txq_size[0] = q->txq_size[0]; | |
293 | t.txq_size[1] = q->txq_size[1]; | |
294 | @@ -1919,6 +1983,12 @@ static int cxgb_extension_ioctl(struct n | |
295 | t.lro = q->lro; | |
296 | t.intr_lat = q->coalesce_usecs; | |
297 | t.cong_thres = q->cong_thres; | |
298 | + t.qnum = q1; | |
299 | + | |
300 | + if (adapter->flags & USING_MSIX) | |
301 | + t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; | |
302 | + else | |
303 | + t.vector = adapter->pdev->irq; | |
304 | ||
305 | if (copy_to_user(useraddr, &t, sizeof(t))) | |
306 | return -EFAULT; | |
307 | @@ -2264,8 +2334,8 @@ static void t3_synchronize_rx(struct ada | |
308 | { | |
309 | int i; | |
310 | ||
311 | - for (i = 0; i < p->nqsets; i++) { | |
312 | - struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq; | |
313 | + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { | |
314 | + struct sge_rspq *q = &adap->sge.qs[i].rspq; | |
315 | ||
316 | spin_lock_irq(&q->lock); | |
317 | spin_unlock_irq(&q->lock); | |
318 | @@ -2572,6 +2642,42 @@ static struct pci_error_handlers t3_err_ | |
319 | .resume = t3_io_resume, | |
320 | }; | |
321 | ||
322 | +/* | |
323 | + * Set the number of qsets based on the number of CPUs and the number of ports, | |
324 | + * not to exceed the number of available qsets, assuming there are enough qsets | |
325 | + * per port in HW. | |
326 | + */ | |
327 | +static void set_nqsets(struct adapter *adap) | |
328 | +{ | |
329 | + int i, j = 0; | |
330 | + int num_cpus = num_online_cpus(); | |
331 | + int hwports = adap->params.nports; | |
332 | + int nqsets = SGE_QSETS; | |
333 | + | |
334 | + if (adap->params.rev > 0 && adap->flags & USING_MSIX) { | |
335 | + if (hwports == 2 && | |
336 | + (hwports * nqsets > SGE_QSETS || | |
337 | + num_cpus >= nqsets / hwports)) | |
338 | + nqsets /= hwports; | |
339 | + if (nqsets > num_cpus) | |
340 | + nqsets = num_cpus; | |
341 | + if (nqsets < 1 || hwports == 4) | |
342 | + nqsets = 1; | |
343 | + } else | |
344 | + nqsets = 1; | |
345 | + | |
346 | + for_each_port(adap, i) { | |
347 | + struct port_info *pi = adap2pinfo(adap, i); | |
348 | + | |
349 | + pi->first_qset = j; | |
350 | + pi->nqsets = nqsets; | |
351 | + j = pi->first_qset + nqsets; | |
352 | + | |
353 | + dev_info(&adap->pdev->dev, | |
354 | + "Port %d using %d queue sets.\n", i, nqsets); | |
355 | + } | |
356 | +} | |
357 | + | |
358 | static int __devinit cxgb_enable_msix(struct adapter *adap) | |
359 | { | |
360 | struct msix_entry entries[SGE_QSETS + 1]; | |
361 | @@ -2729,9 +2835,6 @@ static int __devinit init_one(struct pci | |
362 | pi = netdev_priv(netdev); | |
363 | pi->adapter = adapter; | |
364 | pi->rx_csum_offload = 1; | |
365 | - pi->nqsets = 1; | |
366 | - pi->first_qset = i; | |
367 | - pi->activity = 0; | |
368 | pi->port_id = i; | |
369 | netif_carrier_off(netdev); | |
370 | netdev->irq = pdev->irq; | |
371 | @@ -2808,6 +2911,8 @@ static int __devinit init_one(struct pci | |
372 | else if (msi > 0 && pci_enable_msi(pdev) == 0) | |
373 | adapter->flags |= USING_MSI; | |
374 | ||
375 | + set_nqsets(adapter); | |
376 | + | |
377 | err = sysfs_create_group(&adapter->port[0]->dev.kobj, | |
378 | &cxgb3_attr_group); | |
379 |