]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.drivers/open-fcoe-dcb-support
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.drivers / open-fcoe-dcb-support
1 From: Hannes Reinecke <hare@suse.de>
2 Date: Wed, 17 Sep 2008 16:49:40 +0200
3 Subject: FCoE: Add DCB support
4 References: FATE#303913
5
6 Signed-off-by: Hannes Reinecke <hare@suse.de>
7 ---
8 drivers/net/ixgbe/Makefile | 3
9 drivers/net/ixgbe/ixgbe.h | 26
10 drivers/net/ixgbe/ixgbe_dcb.c | 332 ++++++++++
11 drivers/net/ixgbe/ixgbe_dcb.h | 189 ++++++
12 drivers/net/ixgbe/ixgbe_dcb_82598.c | 398 +++++++++++++
13 drivers/net/ixgbe/ixgbe_dcb_82598.h | 98 +++
14 drivers/net/ixgbe/ixgbe_dcb_nl.c | 611 ++++++++++++++++++++
15 drivers/net/ixgbe/ixgbe_ethtool.c | 37 +
16 drivers/net/ixgbe/ixgbe_main.c | 192 +++++-
17 include/linux/dcbnl.h | 324 ++++++++++
18 include/linux/netdevice.h | 8
19 include/linux/rtnetlink.h | 5
20 include/net/dcbnl.h | 40 +
21 net/Kconfig | 1
22 net/Makefile | 3
23 net/dcb/Kconfig | 12
24 net/dcb/Makefile | 1
25 net/dcb/dcbnl.c | 1091 ++++++++++++++++++++++++++++++++++++
26 18 files changed, 3349 insertions(+), 22 deletions(-)
27 create mode 100644 drivers/net/ixgbe/ixgbe_dcb.c
28 create mode 100644 drivers/net/ixgbe/ixgbe_dcb.h
29 create mode 100644 drivers/net/ixgbe/ixgbe_dcb_82598.c
30 create mode 100644 drivers/net/ixgbe/ixgbe_dcb_82598.h
31 create mode 100644 drivers/net/ixgbe/ixgbe_dcb_nl.c
32 create mode 100644 include/linux/dcbnl.h
33 create mode 100644 include/net/dcbnl.h
34 create mode 100644 net/dcb/Kconfig
35 create mode 100644 net/dcb/Makefile
36 create mode 100644 net/dcb/dcbnl.c
37
38 --- /dev/null
39 +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
40 @@ -0,0 +1,398 @@
41 +/*******************************************************************************
42 +
43 + Intel 10 Gigabit PCI Express Linux driver
44 + Copyright(c) 1999 - 2007 Intel Corporation.
45 +
46 + This program is free software; you can redistribute it and/or modify it
47 + under the terms and conditions of the GNU General Public License,
48 + version 2, as published by the Free Software Foundation.
49 +
50 + This program is distributed in the hope it will be useful, but WITHOUT
51 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
52 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
53 + more details.
54 +
55 + You should have received a copy of the GNU General Public License along with
56 + this program; if not, write to the Free Software Foundation, Inc.,
57 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
58 +
59 + The full GNU General Public License is included in this distribution in
60 + the file called "COPYING".
61 +
62 + Contact Information:
63 + Linux NICS <linux.nics@intel.com>
64 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
65 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
66 +
67 +*******************************************************************************/
68 +
69 +#include "ixgbe.h"
70 +#include "ixgbe_type.h"
71 +#include "ixgbe_dcb.h"
72 +#include "ixgbe_dcb_82598.h"
73 +
74 +/**
75 + * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
76 + * @hw: pointer to hardware structure
77 + * @stats: pointer to statistics structure
78 + * @tc_count: Number of elements in bwg_array.
79 + *
80 + * This function returns the status data for each of the Traffic Classes in use.
81 + */
82 +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
83 + struct ixgbe_hw_stats *stats,
84 + u8 tc_count)
85 +{
86 + int tc;
87 +
88 + if (tc_count > MAX_TRAFFIC_CLASS)
89 + return DCB_ERR_PARAM;
90 +
91 + /* Statistics pertaining to each traffic class */
92 + for (tc = 0; tc < tc_count; tc++) {
93 + /* Transmitted Packets */
94 + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
95 + /* Transmitted Bytes */
96 + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
97 + /* Received Packets */
98 + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
99 + /* Received Bytes */
100 + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
101 + }
102 +
103 + return 0;
104 +}
105 +
106 +/**
107 + * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
108 + * @hw: pointer to hardware structure
109 + * @stats: pointer to statistics structure
110 + * @tc_count: Number of elements in bwg_array.
111 + *
112 + * This function returns the CBFC status data for each of the Traffic Classes.
113 + */
114 +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
115 + struct ixgbe_hw_stats *stats,
116 + u8 tc_count)
117 +{
118 + int tc;
119 +
120 + if (tc_count > MAX_TRAFFIC_CLASS)
121 + return DCB_ERR_PARAM;
122 +
123 + for (tc = 0; tc < tc_count; tc++) {
124 + /* Priority XOFF Transmitted */
125 + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
126 + /* Priority XOFF Received */
127 + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
128 + }
129 +
130 + return 0;
131 +}
132 +
133 +/**
134 + * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
135 + * @hw: pointer to hardware structure
136 + * @dcb_config: pointer to ixgbe_dcb_config structure
137 + *
138 + * Configure packet buffers for DCB mode.
139 + */
140 +s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
141 + struct ixgbe_dcb_config *dcb_config)
142 +{
143 + s32 ret_val = 0;
144 + u32 value = IXGBE_RXPBSIZE_64KB;
145 + u8 i = 0;
146 +
147 + /* Setup Rx packet buffer sizes */
148 + switch (dcb_config->rx_pba_cfg) {
149 + case pba_80_48:
150 + /* Setup the first four at 80KB */
151 + value = IXGBE_RXPBSIZE_80KB;
152 + for (; i < 4; i++)
153 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
154 + /* Setup the last four at 48KB...don't re-init i */
155 + value = IXGBE_RXPBSIZE_48KB;
156 + /* Fall Through */
157 + case pba_equal:
158 + default:
159 + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
160 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
161 +
162 + /* Setup Tx packet buffer sizes */
163 + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
164 + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
165 + IXGBE_TXPBSIZE_40KB);
166 + }
167 + break;
168 + }
169 +
170 + return ret_val;
171 +}
172 +
173 +/**
174 + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
175 + * @hw: pointer to hardware structure
176 + * @dcb_config: pointer to ixgbe_dcb_config structure
177 + *
178 + * Configure Rx Data Arbiter and credits for each traffic class.
179 + */
180 +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
181 + struct ixgbe_dcb_config *dcb_config)
182 +{
183 + struct tc_bw_alloc *p;
184 + u32 reg = 0;
185 + u32 credit_refill = 0;
186 + u32 credit_max = 0;
187 + u8 i = 0;
188 +
189 + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
190 + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
191 +
192 + reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
193 + /* Enable Arbiter */
194 + reg &= ~IXGBE_RMCS_ARBDIS;
195 + /* Enable Receive Recycle within the BWG */
196 + reg |= IXGBE_RMCS_RRM;
197 + /* Enable Deficit Fixed Priority arbitration*/
198 + reg |= IXGBE_RMCS_DFP;
199 +
200 + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
201 +
202 + /* Configure traffic class credits and priority */
203 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
204 + p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
205 + credit_refill = p->data_credits_refill;
206 + credit_max = p->data_credits_max;
207 +
208 + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
209 +
210 + if (p->prio_type == prio_link)
211 + reg |= IXGBE_RT2CR_LSP;
212 +
213 + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
214 + }
215 +
216 + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
217 + reg |= IXGBE_RDRXCTL_RDMTS_1_2;
218 + reg |= IXGBE_RDRXCTL_MPBEN;
219 + reg |= IXGBE_RDRXCTL_MCEN;
220 + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
221 +
222 + reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
223 + /* Make sure there is enough descriptors before arbitration */
224 + reg &= ~IXGBE_RXCTRL_DMBYPS;
225 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
226 +
227 + return 0;
228 +}
229 +
230 +/**
231 + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
232 + * @hw: pointer to hardware structure
233 + * @dcb_config: pointer to ixgbe_dcb_config structure
234 + *
235 + * Configure Tx Descriptor Arbiter and credits for each traffic class.
236 + */
237 +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
238 + struct ixgbe_dcb_config *dcb_config)
239 +{
240 + struct tc_bw_alloc *p;
241 + u32 reg, max_credits;
242 + u8 i;
243 +
244 + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
245 +
246 + /* Enable arbiter */
247 + reg &= ~IXGBE_DPMCS_ARBDIS;
248 + if (!(dcb_config->round_robin_enable)) {
249 + /* Enable DFP and Recycle mode */
250 + reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
251 + }
252 + reg |= IXGBE_DPMCS_TSOEF;
253 + /* Configure Max TSO packet size 34KB including payload and headers */
254 + reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
255 +
256 + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
257 +
258 + /* Configure traffic class credits and priority */
259 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
260 + p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
261 + max_credits = dcb_config->tc_config[i].desc_credits_max;
262 + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
263 + reg |= p->data_credits_refill;
264 + reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
265 +
266 + if (p->prio_type == prio_group)
267 + reg |= IXGBE_TDTQ2TCCR_GSP;
268 +
269 + if (p->prio_type == prio_link)
270 + reg |= IXGBE_TDTQ2TCCR_LSP;
271 +
272 + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
273 + }
274 +
275 + return 0;
276 +}
277 +
278 +/**
279 + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
280 + * @hw: pointer to hardware structure
281 + * @dcb_config: pointer to ixgbe_dcb_config structure
282 + *
283 + * Configure Tx Data Arbiter and credits for each traffic class.
284 + */
285 +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
286 + struct ixgbe_dcb_config *dcb_config)
287 +{
288 + struct tc_bw_alloc *p;
289 + u32 reg;
290 + u8 i;
291 +
292 + reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
293 + /* Enable Data Plane Arbiter */
294 + reg &= ~IXGBE_PDPMCS_ARBDIS;
295 + /* Enable DFP and Transmit Recycle Mode */
296 + reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
297 +
298 + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
299 +
300 + /* Configure traffic class credits and priority */
301 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
302 + p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
303 + reg = p->data_credits_refill;
304 + reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
305 + reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
306 +
307 + if (p->prio_type == prio_group)
308 + reg |= IXGBE_TDPT2TCCR_GSP;
309 +
310 + if (p->prio_type == prio_link)
311 + reg |= IXGBE_TDPT2TCCR_LSP;
312 +
313 + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
314 + }
315 +
316 + /* Enable Tx packet buffer division */
317 + reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
318 + reg |= IXGBE_DTXCTL_ENDBUBD;
319 + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
320 +
321 + return 0;
322 +}
323 +
324 +/**
325 + * ixgbe_dcb_config_pfc_82598 - Config priority flow control
326 + * @hw: pointer to hardware structure
327 + * @dcb_config: pointer to ixgbe_dcb_config structure
328 + *
329 + * Configure Priority Flow Control for each traffic class.
330 + */
331 +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
332 + struct ixgbe_dcb_config *dcb_config)
333 +{
334 + u32 reg, rx_pba_size;
335 + u8 i;
336 +
337 + /* Enable Transmit Priority Flow Control */
338 + reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
339 + reg &= ~IXGBE_RMCS_TFCE_802_3X;
340 + /* correct the reporting of our flow control status */
341 + hw->fc.type = ixgbe_fc_none;
342 + reg |= IXGBE_RMCS_TFCE_PRIORITY;
343 + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
344 +
345 + /* Enable Receive Priority Flow Control */
346 + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
347 + reg &= ~IXGBE_FCTRL_RFCE;
348 + reg |= IXGBE_FCTRL_RPFCE;
349 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
350 +
351 + /*
352 + * Configure flow control thresholds and enable priority flow control
353 + * for each traffic class.
354 + */
355 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
356 + if (dcb_config->rx_pba_cfg == pba_equal) {
357 + rx_pba_size = IXGBE_RXPBSIZE_64KB;
358 + } else {
359 + rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
360 + : IXGBE_RXPBSIZE_48KB;
361 + }
362 +
363 + reg = ((rx_pba_size >> 5) & 0xFFF0);
364 + if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
365 + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
366 + reg |= IXGBE_FCRTL_XONE;
367 +
368 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
369 +
370 + reg = ((rx_pba_size >> 2) & 0xFFF0);
371 + if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
372 + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
373 + reg |= IXGBE_FCRTH_FCEN;
374 +
375 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
376 + }
377 +
378 + /* Configure pause time */
379 + for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
380 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
381 +
382 + /* Configure flow control refresh threshold value */
383 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
384 +
385 + return 0;
386 +}
387 +
388 +/**
389 + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
390 + * @hw: pointer to hardware structure
391 + *
392 + * Configure queue statistics registers, all queues belonging to same traffic
393 + * class uses a single set of queue statistics counters.
394 + */
395 +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
396 +{
397 + u32 reg = 0;
398 + u8 i = 0;
399 + u8 j = 0;
400 +
401 + /* Receive Queues stats setting - 8 queues per statistics reg */
402 + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
403 + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
404 + reg |= ((0x1010101) * j);
405 + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
406 + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
407 + reg |= ((0x1010101) * j);
408 + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
409 + }
410 + /* Transmit Queues stats setting - 4 queues per statistics reg */
411 + for (i = 0; i < 8; i++) {
412 + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
413 + reg |= ((0x1010101) * i);
414 + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
415 + }
416 +
417 + return 0;
418 +}
419 +
420 +/**
421 + * ixgbe_dcb_hw_config_82598 - Config and enable DCB
422 + * @hw: pointer to hardware structure
423 + * @dcb_config: pointer to ixgbe_dcb_config structure
424 + *
425 + * Configure dcb settings and enable dcb mode.
426 + */
427 +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
428 + struct ixgbe_dcb_config *dcb_config)
429 +{
430 + ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
431 + ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
432 + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
433 + ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
434 + ixgbe_dcb_config_pfc_82598(hw, dcb_config);
435 + ixgbe_dcb_config_tc_stats_82598(hw);
436 +
437 + return 0;
438 +}
439 --- /dev/null
440 +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
441 @@ -0,0 +1,98 @@
442 +/*******************************************************************************
443 +
444 + Intel 10 Gigabit PCI Express Linux driver
445 + Copyright(c) 1999 - 2007 Intel Corporation.
446 +
447 + This program is free software; you can redistribute it and/or modify it
448 + under the terms and conditions of the GNU General Public License,
449 + version 2, as published by the Free Software Foundation.
450 +
451 + This program is distributed in the hope it will be useful, but WITHOUT
452 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
453 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
454 + more details.
455 +
456 + You should have received a copy of the GNU General Public License along with
457 + this program; if not, write to the Free Software Foundation, Inc.,
458 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
459 +
460 + The full GNU General Public License is included in this distribution in
461 + the file called "COPYING".
462 +
463 + Contact Information:
464 + Linux NICS <linux.nics@intel.com>
465 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
466 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
467 +
468 +*******************************************************************************/
469 +
470 +#ifndef _DCB_82598_CONFIG_H_
471 +#define _DCB_82598_CONFIG_H_
472 +
473 +/* DCB register definitions */
474 +
475 +#define IXGBE_DPMCS_MTSOS_SHIFT 16
476 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
477 +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
478 +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
479 +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
480 +
481 +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
482 +
483 +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
484 +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
485 +
486 +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */
487 +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */
488 +
489 +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
490 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
491 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000
492 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000
493 +
494 +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
495 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
496 +#define IXGBE_TDPT2TCCR_GSP 0x40000000
497 +#define IXGBE_TDPT2TCCR_LSP 0x80000000
498 +
499 +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
500 +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
501 +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
502 +
503 +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
504 +
505 +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
506 +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
507 +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
508 +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
509 +
510 +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
511 +
512 +/* DCB hardware-specific driver APIs */
513 +
514 +/* DCB PFC functions */
515 +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
516 + struct ixgbe_dcb_config *dcb_config);
517 +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
518 + struct ixgbe_hw_stats *stats,
519 + u8 tc_count);
520 +
521 +/* DCB traffic class stats */
522 +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw);
523 +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
524 + struct ixgbe_hw_stats *stats,
525 + u8 tc_count);
526 +
527 +/* DCB config arbiters */
528 +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
529 + struct ixgbe_dcb_config *dcb_config);
530 +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
531 + struct ixgbe_dcb_config *dcb_config);
532 +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
533 + struct ixgbe_dcb_config *dcb_config);
534 +
535 +/* DCB hw initialization */
536 +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
537 + struct ixgbe_dcb_config *config);
538 +
539 +#endif /* _DCB_82598_CONFIG_H */
540 --- /dev/null
541 +++ b/drivers/net/ixgbe/ixgbe_dcb.c
542 @@ -0,0 +1,332 @@
543 +/*******************************************************************************
544 +
545 + Intel 10 Gigabit PCI Express Linux driver
546 + Copyright(c) 1999 - 2007 Intel Corporation.
547 +
548 + This program is free software; you can redistribute it and/or modify it
549 + under the terms and conditions of the GNU General Public License,
550 + version 2, as published by the Free Software Foundation.
551 +
552 + This program is distributed in the hope it will be useful, but WITHOUT
553 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
554 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
555 + more details.
556 +
557 + You should have received a copy of the GNU General Public License along with
558 + this program; if not, write to the Free Software Foundation, Inc.,
559 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
560 +
561 + The full GNU General Public License is included in this distribution in
562 + the file called "COPYING".
563 +
564 + Contact Information:
565 + Linux NICS <linux.nics@intel.com>
566 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
567 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
568 +
569 +*******************************************************************************/
570 +
571 +
572 +#include "ixgbe.h"
573 +#include "ixgbe_type.h"
574 +#include "ixgbe_dcb.h"
575 +#include "ixgbe_dcb_82598.h"
576 +
577 +/**
578 + * ixgbe_dcb_config - Struct containing DCB settings.
579 + * @dcb_config: Pointer to DCB config structure
580 + *
581 + * This function checks DCB rules for DCB settings.
582 + * The following rules are checked:
583 + * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
584 + * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
585 + * Group must total 100.
586 + * 3. A Traffic Class should not be set to both Link Strict Priority
587 + * and Group Strict Priority.
588 + * 4. Link strict Bandwidth Groups can only have link strict traffic classes
589 + * with zero bandwidth.
590 + */
591 +s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
592 +{
593 + struct tc_bw_alloc *p;
594 + s32 ret_val = 0;
595 + u8 i, j, bw = 0, bw_id;
596 + u8 bw_sum[2][MAX_BW_GROUP];
597 + bool link_strict[2][MAX_BW_GROUP];
598 +
599 + memset(bw_sum, 0, sizeof(bw_sum));
600 + memset(link_strict, 0, sizeof(link_strict));
601 +
602 + /* First Tx, then Rx */
603 + for (i = 0; i < 2; i++) {
604 + /* Check each traffic class for rule violation */
605 + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
606 + p = &dcb_config->tc_config[j].path[i];
607 +
608 + bw = p->bwg_percent;
609 + bw_id = p->bwg_id;
610 +
611 + if (bw_id >= MAX_BW_GROUP) {
612 + ret_val = DCB_ERR_CONFIG;
613 + goto err_config;
614 + }
615 + if (p->prio_type == prio_link) {
616 + link_strict[i][bw_id] = true;
617 + /* Link strict should have zero bandwidth */
618 + if (bw) {
619 + ret_val = DCB_ERR_LS_BW_NONZERO;
620 + goto err_config;
621 + }
622 + } else if (!bw) {
623 + /*
624 + * Traffic classes without link strict
625 + * should have non-zero bandwidth.
626 + */
627 + ret_val = DCB_ERR_TC_BW_ZERO;
628 + goto err_config;
629 + }
630 + bw_sum[i][bw_id] += bw;
631 + }
632 +
633 + bw = 0;
634 +
635 + /* Check each bandwidth group for rule violation */
636 + for (j = 0; j < MAX_BW_GROUP; j++) {
637 + bw += dcb_config->bw_percentage[i][j];
638 + /*
639 + * Sum of bandwidth percentages of all traffic classes
640 + * within a Bandwidth Group must total 100 except for
641 + * link strict group (zero bandwidth).
642 + */
643 + if (link_strict[i][j]) {
644 + if (bw_sum[i][j]) {
645 + /*
646 + * Link strict group should have zero
647 + * bandwidth.
648 + */
649 + ret_val = DCB_ERR_LS_BWG_NONZERO;
650 + goto err_config;
651 + }
652 + } else if (bw_sum[i][j] != BW_PERCENT &&
653 + bw_sum[i][j] != 0) {
654 + ret_val = DCB_ERR_TC_BW;
655 + goto err_config;
656 + }
657 + }
658 +
659 + if (bw != BW_PERCENT) {
660 + ret_val = DCB_ERR_BW_GROUP;
661 + goto err_config;
662 + }
663 + }
664 +
665 +err_config:
666 + return ret_val;
667 +}
668 +
669 +/**
670 + * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
671 + * @ixgbe_dcb_config: Struct containing DCB settings.
672 + * @direction: Configuring either Tx or Rx.
673 + *
674 + * This function calculates the credits allocated to each traffic class.
675 + * It should be called only after the rules are checked by
676 + * ixgbe_dcb_check_config().
677 + */
678 +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
679 + u8 direction)
680 +{
681 + struct tc_bw_alloc *p;
682 + s32 ret_val = 0;
683 + /* Initialization values default for Tx settings */
684 + u32 credit_refill = 0;
685 + u32 credit_max = 0;
686 + u16 link_percentage = 0;
687 + u8 bw_percent = 0;
688 + u8 i;
689 +
690 + if (dcb_config == NULL) {
691 + ret_val = DCB_ERR_CONFIG;
692 + goto out;
693 + }
694 +
695 + /* Find out the link percentage for each TC first */
696 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
697 + p = &dcb_config->tc_config[i].path[direction];
698 + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
699 +
700 + link_percentage = p->bwg_percent;
701 + /* Must be careful of integer division for very small nums */
702 + link_percentage = (link_percentage * bw_percent) / 100;
703 + if (p->bwg_percent > 0 && link_percentage == 0)
704 + link_percentage = 1;
705 +
706 + /* Save link_percentage for reference */
707 + p->link_percent = (u8)link_percentage;
708 +
709 + /* Calculate credit refill and save it */
710 + credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
711 + p->data_credits_refill = (u16)credit_refill;
712 +
713 + /* Calculate maximum credit for the TC */
714 + credit_max = (link_percentage * MAX_CREDIT) / 100;
715 +
716 + /*
717 + * Adjustment based on rule checking, if the percentage
718 + * of a TC is too small, the maximum credit may not be
719 + * enough to send out a jumbo frame in data plane arbitration.
720 + */
721 + if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
722 + credit_max = MINIMUM_CREDIT_FOR_JUMBO;
723 +
724 + if (direction == DCB_TX_CONFIG) {
725 + /*
726 + * Adjustment based on rule checking, if the
727 + * percentage of a TC is too small, the maximum
728 + * credit may not be enough to send out a TSO
729 + * packet in descriptor plane arbitration.
730 + */
731 + if (credit_max &&
732 + (credit_max < MINIMUM_CREDIT_FOR_TSO))
733 + credit_max = MINIMUM_CREDIT_FOR_TSO;
734 +
735 + dcb_config->tc_config[i].desc_credits_max =
736 + (u16)credit_max;
737 + }
738 +
739 + p->data_credits_max = (u16)credit_max;
740 + }
741 +
742 +out:
743 + return ret_val;
744 +}
745 +
746 +/**
747 + * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
748 + * @hw: pointer to hardware structure
749 + * @stats: pointer to statistics structure
750 + * @tc_count: Number of elements in bwg_array.
751 + *
752 + * This function returns the status data for each of the Traffic Classes in use.
753 + */
754 +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
755 + u8 tc_count)
756 +{
757 + s32 ret = 0;
758 + if (hw->mac.type == ixgbe_mac_82598EB)
759 + ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
760 + return ret;
761 +}
762 +
763 +/**
764 + * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
765 + * hw - pointer to hardware structure
766 + * stats - pointer to statistics structure
767 + * tc_count - Number of elements in bwg_array.
768 + *
769 + * This function returns the CBFC status data for each of the Traffic Classes.
770 + */
771 +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
772 + u8 tc_count)
773 +{
774 + s32 ret = 0;
775 + if (hw->mac.type == ixgbe_mac_82598EB)
776 + ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
777 + return ret;
778 +}
779 +
780 +/**
781 + * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
782 + * @hw: pointer to hardware structure
783 + * @dcb_config: pointer to ixgbe_dcb_config structure
784 + *
785 + * Configure Rx Data Arbiter and credits for each traffic class.
786 + */
787 +s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
788 + struct ixgbe_dcb_config *dcb_config)
789 +{
790 + s32 ret = 0;
791 + if (hw->mac.type == ixgbe_mac_82598EB)
792 + ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
793 + return ret;
794 +}
795 +
796 +/**
797 + * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
798 + * @hw: pointer to hardware structure
799 + * @dcb_config: pointer to ixgbe_dcb_config structure
800 + *
801 + * Configure Tx Descriptor Arbiter and credits for each traffic class.
802 + */
803 +s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
804 + struct ixgbe_dcb_config *dcb_config)
805 +{
806 + s32 ret = 0;
807 + if (hw->mac.type == ixgbe_mac_82598EB)
808 + ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
809 + return ret;
810 +}
811 +
812 +/**
813 + * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
814 + * @hw: pointer to hardware structure
815 + * @dcb_config: pointer to ixgbe_dcb_config structure
816 + *
817 + * Configure Tx Data Arbiter and credits for each traffic class.
818 + */
819 +s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
820 + struct ixgbe_dcb_config *dcb_config)
821 +{
822 + s32 ret = 0;
823 + if (hw->mac.type == ixgbe_mac_82598EB)
824 + ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
825 + return ret;
826 +}
827 +
828 +/**
829 + * ixgbe_dcb_config_pfc - Config priority flow control
830 + * @hw: pointer to hardware structure
831 + * @dcb_config: pointer to ixgbe_dcb_config structure
832 + *
833 + * Configure Priority Flow Control for each traffic class.
834 + */
835 +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
836 + struct ixgbe_dcb_config *dcb_config)
837 +{
838 + s32 ret = 0;
839 + if (hw->mac.type == ixgbe_mac_82598EB)
840 + ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
841 + return ret;
842 +}
843 +
844 +/**
845 + * ixgbe_dcb_config_tc_stats - Config traffic class statistics
846 + * @hw: pointer to hardware structure
847 + *
848 + * Configure queue statistics registers, all queues belonging to same traffic
849 + * class uses a single set of queue statistics counters.
850 + */
851 +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
852 +{
853 + s32 ret = 0;
854 + if (hw->mac.type == ixgbe_mac_82598EB)
855 + ret = ixgbe_dcb_config_tc_stats_82598(hw);
856 + return ret;
857 +}
858 +
859 +/**
860 + * ixgbe_dcb_hw_config - Config and enable DCB
861 + * @hw: pointer to hardware structure
862 + * @dcb_config: pointer to ixgbe_dcb_config structure
863 + *
864 + * Configure dcb settings and enable dcb mode.
865 + */
866 +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
867 + struct ixgbe_dcb_config *dcb_config)
868 +{
869 + s32 ret = 0;
870 + if (hw->mac.type == ixgbe_mac_82598EB)
871 + ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
872 + return ret;
873 +}
874 +
875 --- /dev/null
876 +++ b/drivers/net/ixgbe/ixgbe_dcb.h
877 @@ -0,0 +1,189 @@
878 +/*******************************************************************************
879 +
880 + Intel 10 Gigabit PCI Express Linux driver
881 + Copyright(c) 1999 - 2007 Intel Corporation.
882 +
883 + This program is free software; you can redistribute it and/or modify it
884 + under the terms and conditions of the GNU General Public License,
885 + version 2, as published by the Free Software Foundation.
886 +
887 + This program is distributed in the hope it will be useful, but WITHOUT
888 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
889 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
890 + more details.
891 +
892 + You should have received a copy of the GNU General Public License along with
893 + this program; if not, write to the Free Software Foundation, Inc.,
894 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
895 +
896 + The full GNU General Public License is included in this distribution in
897 + the file called "COPYING".
898 +
899 + Contact Information:
900 + Linux NICS <linux.nics@intel.com>
901 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
902 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
903 +
904 +*******************************************************************************/
905 +
906 +#ifndef _DCB_CONFIG_H_
907 +#define _DCB_CONFIG_H_
908 +
909 +#include "ixgbe_type.h"
910 +
911 +/* DCB data structures */
912 +
913 +#define IXGBE_MAX_PACKET_BUFFERS 8
914 +#define MAX_USER_PRIORITY 8
915 +#define MAX_TRAFFIC_CLASS 8
916 +#define MAX_BW_GROUP 8
917 +#define BW_PERCENT 100
918 +
919 +#define DCB_TX_CONFIG 0
920 +#define DCB_RX_CONFIG 1
921 +
922 +/* DCB error Codes */
923 +#define DCB_SUCCESS 0
924 +#define DCB_ERR_CONFIG -1
925 +#define DCB_ERR_PARAM -2
926 +
927 +/* Transmit and receive Errors */
928 +/* Error in bandwidth group allocation */
929 +#define DCB_ERR_BW_GROUP -3
930 +/* Error in traffic class bandwidth allocation */
931 +#define DCB_ERR_TC_BW -4
932 +/* Traffic class has both link strict and group strict enabled */
933 +#define DCB_ERR_LS_GS -5
934 +/* Link strict traffic class has non zero bandwidth */
935 +#define DCB_ERR_LS_BW_NONZERO -6
936 +/* Link strict bandwidth group has non zero bandwidth */
937 +#define DCB_ERR_LS_BWG_NONZERO -7
938 +/* Traffic class has zero bandwidth */
939 +#define DCB_ERR_TC_BW_ZERO -8
940 +
941 +#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
942 +
943 +struct dcb_pfc_tc_debug {
944 + u8 tc;
945 + u8 pause_status;
946 + u64 pause_quanta;
947 +};
948 +
949 +enum strict_prio_type {
950 + prio_none = 0,
951 + prio_group,
952 + prio_link
953 +};
954 +
955 +/* Traffic class bandwidth allocation per direction */
956 +struct tc_bw_alloc {
957 + u8 bwg_id; /* Bandwidth Group (BWG) ID */
958 + u8 bwg_percent; /* % of BWG's bandwidth */
959 + u8 link_percent; /* % of link bandwidth */
960 + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
961 + u16 data_credits_refill; /* Credit refill amount in 64B granularity */
962 + u16 data_credits_max; /* Max credits for a configured packet buffer
963 + * in 64B granularity.*/
964 + enum strict_prio_type prio_type; /* Link or Group Strict Priority */
965 +};
966 +
967 +enum dcb_pfc_type {
968 + pfc_disabled = 0,
969 + pfc_enabled_full,
970 + pfc_enabled_tx,
971 + pfc_enabled_rx
972 +};
973 +
974 +/* Traffic class configuration */
975 +struct tc_configuration {
976 + struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
977 + enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */
978 +
979 + u16 desc_credits_max; /* For Tx Descriptor arbitration */
980 + u8 tc; /* Traffic class (TC) */
981 +};
982 +
983 +enum dcb_rx_pba_cfg {
984 + pba_equal, /* PBA[0-7] each use 64KB FIFO */
985 + pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
986 +};
987 +
988 +/*
989 + * This structure contains many values encoded as fixed-point
990 + * numbers, meaning that some of bits are dedicated to the
991 + * magnitude and others to the fraction part. In the comments
992 + * this is shown as f=n, where n is the number of fraction bits.
993 + * These fraction bits are always the low-order bits. The size
994 + * of the magnitude is not specified.
995 + */
996 +struct bcn_config {
997 + u32 rp_admin_mode[MAX_TRAFFIC_CLASS]; /* BCN enabled, per TC */
998 + u32 bcna_option[2]; /* BCNA Port + MAC Addr */
999 + u32 rp_w; /* Derivative Weight, f=3 */
1000 + u32 rp_gi; /* Increase Gain, f=12 */
1001 + u32 rp_gd; /* Decrease Gain, f=12 */
1002 + u32 rp_ru; /* Rate Unit */
1003 + u32 rp_alpha; /* Max Decrease Factor, f=12 */
1004 + u32 rp_beta; /* Max Increase Factor, f=12 */
1005 + u32 rp_ri; /* Initial Rate */
1006 + u32 rp_td; /* Drift Interval Timer */
1007 + u32 rp_rd; /* Drift Increase */
1008 + u32 rp_tmax; /* Severe Congestion Backoff Timer Range */
1009 + u32 rp_rmin; /* Severe Congestion Restart Rate */
1010 + u32 rp_wrtt; /* RTT Moving Average Weight */
1011 +};
1012 +
1013 +struct ixgbe_dcb_config {
1014 + struct bcn_config bcn;
1015 +
1016 + struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
1017 + u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
1018 +
1019 + bool round_robin_enable;
1020 +
1021 + enum dcb_rx_pba_cfg rx_pba_cfg;
1022 +
1023 + u32 dcb_cfg_version; /* Not used...OS-specific? */
1024 + u32 link_speed; /* For bandwidth allocation validation purpose */
1025 +};
1026 +
1027 +/* DCB driver APIs */
1028 +
1029 +/* DCB rule checking function.*/
1030 +s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
1031 +
1032 +/* DCB credits calculation */
1033 +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *config,
1034 + u8 direction);
1035 +
1036 +/* DCB PFC functions */
1037 +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
1038 + struct ixgbe_dcb_config *dcb_config);
1039 +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
1040 + u8 tc_count);
1041 +
1042 +/* DCB traffic class stats */
1043 +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
1044 +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
1045 + u8 tc_count);
1046 +
1047 +/* DCB config arbiters */
1048 +s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
1049 + struct ixgbe_dcb_config *dcb_config);
1050 +s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
1051 + struct ixgbe_dcb_config *dcb_config);
1052 +s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
1053 + struct ixgbe_dcb_config *dcb_config);
1054 +
1055 +/* DCB hw initialization */
1056 +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *config);
1057 +
1058 +/* DCB definitions for credit calculation */
1059 +#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
1060 +#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
1061 +#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
1062 +#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
1063 +#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
1064 +#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
1065 +
1066 +#endif /* _DCB_CONFIG_H */
1067 --- /dev/null
1068 +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
1069 @@ -0,0 +1,611 @@
1070 +/*******************************************************************************
1071 +
1072 + Intel 10 Gigabit PCI Express Linux driver
1073 + Copyright(c) 1999 - 2008 Intel Corporation.
1074 +
1075 + This program is free software; you can redistribute it and/or modify it
1076 + under the terms and conditions of the GNU General Public License,
1077 + version 2, as published by the Free Software Foundation.
1078 +
1079 + This program is distributed in the hope it will be useful, but WITHOUT
1080 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1081 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1082 + more details.
1083 +
1084 + You should have received a copy of the GNU General Public License along with
1085 + this program; if not, write to the Free Software Foundation, Inc.,
1086 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1087 +
1088 + The full GNU General Public License is included in this distribution in
1089 + the file called "COPYING".
1090 +
1091 + Contact Information:
1092 + Linux NICS <linux.nics@intel.com>
1093 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1094 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
1095 +
1096 +*******************************************************************************/
1097 +
1098 +#include "ixgbe.h"
1099 +#include <linux/dcbnl.h>
1100 +
1101 +/* Callbacks for DCB netlink in the kernel */
1102 +#define BIT_DCB_MODE 0x01
1103 +#define BIT_PFC 0x02
1104 +#define BIT_PG_RX 0x04
1105 +#define BIT_PG_TX 0x08
1106 +#define BIT_BCN 0x10
1107 +
1108 +int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
1109 + struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
1110 +{
1111 + struct tc_configuration *src_tc_cfg = NULL;
1112 + struct tc_configuration *dst_tc_cfg = NULL;
1113 + int i;
1114 +
1115 + if (!src_dcb_cfg || !dst_dcb_cfg)
1116 + return -EINVAL;
1117 +
1118 + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
1119 + src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
1120 + dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
1121 +
1122 + dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
1123 + src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
1124 +
1125 + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
1126 + src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
1127 +
1128 + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
1129 + src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
1130 +
1131 + dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
1132 + src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
1133 +
1134 + dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
1135 + src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
1136 +
1137 + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
1138 + src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
1139 +
1140 + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
1141 + src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
1142 +
1143 + dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
1144 + src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
1145 + }
1146 +
1147 + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
1148 + dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
1149 + [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
1150 + [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
1151 + dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
1152 + [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
1153 + [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
1154 + }
1155 +
1156 + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
1157 + dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
1158 + src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
1159 + }
1160 +
1161 + for (i = DCB_BCN_ATTR_RP_0; i < DCB_BCN_ATTR_RP_ALL; i++) {
1162 + dst_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0] =
1163 + src_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0];
1164 + }
1165 + dst_dcb_cfg->bcn.rp_alpha = src_dcb_cfg->bcn.rp_alpha;
1166 + dst_dcb_cfg->bcn.rp_beta = src_dcb_cfg->bcn.rp_beta;
1167 + dst_dcb_cfg->bcn.rp_gd = src_dcb_cfg->bcn.rp_gd;
1168 + dst_dcb_cfg->bcn.rp_gi = src_dcb_cfg->bcn.rp_gi;
1169 + dst_dcb_cfg->bcn.rp_tmax = src_dcb_cfg->bcn.rp_tmax;
1170 + dst_dcb_cfg->bcn.rp_td = src_dcb_cfg->bcn.rp_td;
1171 + dst_dcb_cfg->bcn.rp_rmin = src_dcb_cfg->bcn.rp_rmin;
1172 + dst_dcb_cfg->bcn.rp_w = src_dcb_cfg->bcn.rp_w;
1173 + dst_dcb_cfg->bcn.rp_rd = src_dcb_cfg->bcn.rp_rd;
1174 + dst_dcb_cfg->bcn.rp_ru = src_dcb_cfg->bcn.rp_ru;
1175 + dst_dcb_cfg->bcn.rp_wrtt = src_dcb_cfg->bcn.rp_wrtt;
1176 + dst_dcb_cfg->bcn.rp_ri = src_dcb_cfg->bcn.rp_ri;
1177 +
1178 + return 0;
1179 +}
1180 +
1181 +static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
1182 +{
1183 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1184 +
1185 + DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
1186 +
1187 + return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
1188 +}
1189 +
1190 +static u16 ixgbe_dcb_select_queue(struct net_device *dev, struct sk_buff *skb)
1191 +{
1192 + /* All traffic should default to class 0 */
1193 + return 0;
1194 +}
1195 +
1196 +static void ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
1197 +{
1198 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1199 +
1200 + DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
1201 +
1202 + if (state > 0) {
1203 + /* Turn on DCB */
1204 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1205 + return;
1206 + } else {
1207 + if (netdev->flags & IFF_UP)
1208 + netdev->stop(netdev);
1209 + ixgbe_reset_interrupt_capability(adapter);
1210 + ixgbe_napi_del_all(adapter);
1211 + kfree(adapter->tx_ring);
1212 + kfree(adapter->rx_ring);
1213 + adapter->tx_ring = NULL;
1214 + adapter->rx_ring = NULL;
1215 + netdev->select_queue = &ixgbe_dcb_select_queue;
1216 +
1217 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
1218 + adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
1219 + ixgbe_init_interrupt_scheme(adapter);
1220 + ixgbe_napi_add_all(adapter);
1221 + if (netdev->flags & IFF_UP)
1222 + netdev->open(netdev);
1223 + }
1224 + } else {
1225 + /* Turn off DCB */
1226 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1227 + if (netdev->flags & IFF_UP)
1228 + netdev->stop(netdev);
1229 + ixgbe_reset_interrupt_capability(adapter);
1230 + ixgbe_napi_del_all(adapter);
1231 + kfree(adapter->tx_ring);
1232 + kfree(adapter->rx_ring);
1233 + adapter->tx_ring = NULL;
1234 + adapter->rx_ring = NULL;
1235 + netdev->select_queue = NULL;
1236 +
1237 + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1238 + adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
1239 + ixgbe_init_interrupt_scheme(adapter);
1240 + ixgbe_napi_add_all(adapter);
1241 + if (netdev->flags & IFF_UP)
1242 + netdev->open(netdev);
1243 + } else {
1244 + return;
1245 + }
1246 + }
1247 +}
1248 +
1249 +static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
1250 + u8 *perm_addr)
1251 +{
1252 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1253 + int i;
1254 +
1255 + for (i = 0; i < netdev->addr_len; i++)
1256 + perm_addr[i] = adapter->hw.mac.perm_addr[i];
1257 +}
1258 +
1259 +static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
1260 + u8 prio, u8 bwg_id, u8 bw_pct,
1261 + u8 up_map)
1262 +{
1263 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1264 +
1265 + if (prio != DCB_ATTR_VALUE_UNDEFINED)
1266 + adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
1267 + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
1268 + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
1269 + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
1270 + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
1271 + bw_pct;
1272 + if (up_map != DCB_ATTR_VALUE_UNDEFINED)
1273 + adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
1274 + up_map;
1275 +
1276 + if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
1277 + adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
1278 + (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
1279 + adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
1280 + (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
1281 + adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
1282 + (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
1283 + adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
1284 + adapter->dcb_set_bitmap |= BIT_PG_TX;
1285 +}
1286 +
1287 +static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
1288 + u8 bw_pct)
1289 +{
1290 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1291 +
1292 + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
1293 +
1294 + if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
1295 + adapter->dcb_cfg.bw_percentage[0][bwg_id])
1296 + adapter->dcb_set_bitmap |= BIT_PG_RX;
1297 +}
1298 +
1299 +static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
1300 + u8 prio, u8 bwg_id, u8 bw_pct,
1301 + u8 up_map)
1302 +{
1303 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1304 +
1305 + if (prio != DCB_ATTR_VALUE_UNDEFINED)
1306 + adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
1307 + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
1308 + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
1309 + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
1310 + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
1311 + bw_pct;
1312 + if (up_map != DCB_ATTR_VALUE_UNDEFINED)
1313 + adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
1314 + up_map;
1315 +
1316 + if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
1317 + adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
1318 + (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
1319 + adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
1320 + (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
1321 + adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
1322 + (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
1323 + adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
1324 + adapter->dcb_set_bitmap |= BIT_PG_RX;
1325 +}
1326 +
1327 +static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
1328 + u8 bw_pct)
1329 +{
1330 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1331 +
1332 + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
1333 +
1334 + if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
1335 + adapter->dcb_cfg.bw_percentage[1][bwg_id])
1336 + adapter->dcb_set_bitmap |= BIT_PG_RX;
1337 +}
1338 +
1339 +static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
1340 + u8 *prio, u8 *bwg_id, u8 *bw_pct,
1341 + u8 *up_map)
1342 +{
1343 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1344 +
1345 + *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
1346 + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
1347 + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
1348 + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
1349 +}
1350 +
1351 +static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
1352 + u8 *bw_pct)
1353 +{
1354 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1355 +
1356 + *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
1357 +}
1358 +
1359 +static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
1360 + u8 *prio, u8 *bwg_id, u8 *bw_pct,
1361 + u8 *up_map)
1362 +{
1363 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1364 +
1365 + *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
1366 + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
1367 + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
1368 + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
1369 +}
1370 +
1371 +static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
1372 + u8 *bw_pct)
1373 +{
1374 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1375 +
1376 + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
1377 +}
1378 +
1379 +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
1380 + u8 setting)
1381 +{
1382 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1383 +
1384 + adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
1385 + if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
1386 + adapter->dcb_cfg.tc_config[priority].dcb_pfc)
1387 + adapter->dcb_set_bitmap |= BIT_PFC;
1388 +}
1389 +
1390 +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
1391 + u8 *setting)
1392 +{
1393 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1394 +
1395 + *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
1396 +}
1397 +
1398 +static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
1399 +{
1400 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1401 + int ret;
1402 +
1403 + adapter->dcb_set_bitmap &= ~BIT_BCN; /* no set for BCN */
1404 + if (!adapter->dcb_set_bitmap)
1405 + return 1;
1406 +
1407 + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1408 + msleep(1);
1409 +
1410 + ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
1411 + adapter->ring_feature[RING_F_DCB].indices);
1412 + if (ret) {
1413 + clear_bit(__IXGBE_RESETTING, &adapter->state);
1414 + return ret;
1415 + }
1416 +
1417 + ixgbe_down(adapter);
1418 + ixgbe_up(adapter);
1419 + adapter->dcb_set_bitmap = 0x00;
1420 + clear_bit(__IXGBE_RESETTING, &adapter->state);
1421 + return ret;
1422 +}
1423 +
1424 +static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
1425 +{
1426 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1427 + u8 rval = 0;
1428 +
1429 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1430 + switch (capid) {
1431 + case DCB_CAP_ATTR_PG:
1432 + *cap = true;
1433 + break;
1434 + case DCB_CAP_ATTR_PFC:
1435 + *cap = true;
1436 + break;
1437 + case DCB_CAP_ATTR_UP2TC:
1438 + *cap = false;
1439 + break;
1440 + case DCB_CAP_ATTR_PG_TCS:
1441 + *cap = 0x80;
1442 + break;
1443 + case DCB_CAP_ATTR_PFC_TCS:
1444 + *cap = 0x80;
1445 + break;
1446 + case DCB_CAP_ATTR_GSP:
1447 + *cap = true;
1448 + break;
1449 + case DCB_CAP_ATTR_BCN:
1450 + *cap = false;
1451 + break;
1452 + default:
1453 + rval = -EINVAL;
1454 + break;
1455 + }
1456 + } else {
1457 + rval = -EINVAL;
1458 + }
1459 +
1460 + return rval;
1461 +}
1462 +
1463 +static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
1464 +{
1465 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1466 + u8 rval = 0;
1467 +
1468 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1469 + switch (tcid) {
1470 + case DCB_NUMTCS_ATTR_PG:
1471 + *num = MAX_TRAFFIC_CLASS;
1472 + break;
1473 + case DCB_NUMTCS_ATTR_PFC:
1474 + *num = MAX_TRAFFIC_CLASS;
1475 + break;
1476 + default:
1477 + rval = -EINVAL;
1478 + break;
1479 + }
1480 + } else {
1481 + rval = -EINVAL;
1482 + }
1483 +
1484 + return rval;
1485 +}
1486 +
1487 +static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
1488 +{
1489 + return -EINVAL;
1490 +}
1491 +
1492 +static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
1493 +{
1494 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1495 +
1496 + return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
1497 +}
1498 +
1499 +static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
1500 +{
1501 + return;
1502 +}
1503 +
1504 +static void ixgbe_dcbnl_getbcnrp(struct net_device *netdev, int priority,
1505 + u8 *setting)
1506 +{
1507 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1508 +
1509 + *setting = adapter->dcb_cfg.bcn.rp_admin_mode[priority];
1510 +}
1511 +
1512 +
1513 +static void ixgbe_dcbnl_getbcncfg(struct net_device *netdev, int enum_index,
1514 + u32 *setting)
1515 +{
1516 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1517 +
1518 + switch (enum_index) {
1519 + case DCB_BCN_ATTR_ALPHA:
1520 + *setting = adapter->dcb_cfg.bcn.rp_alpha;
1521 + break;
1522 + case DCB_BCN_ATTR_BETA:
1523 + *setting = adapter->dcb_cfg.bcn.rp_beta;
1524 + break;
1525 + case DCB_BCN_ATTR_GD:
1526 + *setting = adapter->dcb_cfg.bcn.rp_gd;
1527 + break;
1528 + case DCB_BCN_ATTR_GI:
1529 + *setting = adapter->dcb_cfg.bcn.rp_gi;
1530 + break;
1531 + case DCB_BCN_ATTR_TMAX:
1532 + *setting = adapter->dcb_cfg.bcn.rp_tmax;
1533 + break;
1534 + case DCB_BCN_ATTR_TD:
1535 + *setting = adapter->dcb_cfg.bcn.rp_td;
1536 + break;
1537 + case DCB_BCN_ATTR_RMIN:
1538 + *setting = adapter->dcb_cfg.bcn.rp_rmin;
1539 + break;
1540 + case DCB_BCN_ATTR_W:
1541 + *setting = adapter->dcb_cfg.bcn.rp_w;
1542 + break;
1543 + case DCB_BCN_ATTR_RD:
1544 + *setting = adapter->dcb_cfg.bcn.rp_rd;
1545 + break;
1546 + case DCB_BCN_ATTR_RU:
1547 + *setting = adapter->dcb_cfg.bcn.rp_ru;
1548 + break;
1549 + case DCB_BCN_ATTR_WRTT:
1550 + *setting = adapter->dcb_cfg.bcn.rp_wrtt;
1551 + break;
1552 + case DCB_BCN_ATTR_RI:
1553 + *setting = adapter->dcb_cfg.bcn.rp_ri;
1554 + break;
1555 + default:
1556 + *setting = -1;
1557 + }
1558 +}
1559 +
1560 +static void ixgbe_dcbnl_setbcnrp(struct net_device *netdev, int priority,
1561 + u8 setting)
1562 +{
1563 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1564 +
1565 + adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] = setting;
1566 +
1567 + if (adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] !=
1568 + adapter->dcb_cfg.bcn.rp_admin_mode[priority])
1569 + adapter->dcb_set_bitmap |= BIT_BCN;
1570 +}
1571 +
1572 +static void ixgbe_dcbnl_setbcncfg(struct net_device *netdev, int enum_index,
1573 + u32 setting)
1574 +{
1575 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
1576 +
1577 + switch (enum_index) {
1578 + case DCB_BCN_ATTR_ALPHA:
1579 + adapter->temp_dcb_cfg.bcn.rp_alpha = setting;
1580 + if (adapter->temp_dcb_cfg.bcn.rp_alpha !=
1581 + adapter->dcb_cfg.bcn.rp_alpha)
1582 + adapter->dcb_set_bitmap |= BIT_BCN;
1583 + break;
1584 + case DCB_BCN_ATTR_BETA:
1585 + adapter->temp_dcb_cfg.bcn.rp_beta = setting;
1586 + if (adapter->temp_dcb_cfg.bcn.rp_beta !=
1587 + adapter->dcb_cfg.bcn.rp_beta)
1588 + adapter->dcb_set_bitmap |= BIT_BCN;
1589 + break;
1590 + case DCB_BCN_ATTR_GD:
1591 + adapter->temp_dcb_cfg.bcn.rp_gd = setting;
1592 + if (adapter->temp_dcb_cfg.bcn.rp_gd !=
1593 + adapter->dcb_cfg.bcn.rp_gd)
1594 + adapter->dcb_set_bitmap |= BIT_BCN;
1595 + break;
1596 + case DCB_BCN_ATTR_GI:
1597 + adapter->temp_dcb_cfg.bcn.rp_gi = setting;
1598 + if (adapter->temp_dcb_cfg.bcn.rp_gi !=
1599 + adapter->dcb_cfg.bcn.rp_gi)
1600 + adapter->dcb_set_bitmap |= BIT_BCN;
1601 + break;
1602 + case DCB_BCN_ATTR_TMAX:
1603 + adapter->temp_dcb_cfg.bcn.rp_tmax = setting;
1604 + if (adapter->temp_dcb_cfg.bcn.rp_tmax !=
1605 + adapter->dcb_cfg.bcn.rp_tmax)
1606 + adapter->dcb_set_bitmap |= BIT_BCN;
1607 + break;
1608 + case DCB_BCN_ATTR_TD:
1609 + adapter->temp_dcb_cfg.bcn.rp_td = setting;
1610 + if (adapter->temp_dcb_cfg.bcn.rp_td !=
1611 + adapter->dcb_cfg.bcn.rp_td)
1612 + adapter->dcb_set_bitmap |= BIT_BCN;
1613 + break;
1614 + case DCB_BCN_ATTR_RMIN:
1615 + adapter->temp_dcb_cfg.bcn.rp_rmin = setting;
1616 + if (adapter->temp_dcb_cfg.bcn.rp_rmin !=
1617 + adapter->dcb_cfg.bcn.rp_rmin)
1618 + adapter->dcb_set_bitmap |= BIT_BCN;
1619 + break;
1620 + case DCB_BCN_ATTR_W:
1621 + adapter->temp_dcb_cfg.bcn.rp_w = setting;
1622 + if (adapter->temp_dcb_cfg.bcn.rp_w !=
1623 + adapter->dcb_cfg.bcn.rp_w)
1624 + adapter->dcb_set_bitmap |= BIT_BCN;
1625 + break;
1626 + case DCB_BCN_ATTR_RD:
1627 + adapter->temp_dcb_cfg.bcn.rp_rd = setting;
1628 + if (adapter->temp_dcb_cfg.bcn.rp_rd !=
1629 + adapter->dcb_cfg.bcn.rp_rd)
1630 + adapter->dcb_set_bitmap |= BIT_BCN;
1631 + break;
1632 + case DCB_BCN_ATTR_RU:
1633 + adapter->temp_dcb_cfg.bcn.rp_ru = setting;
1634 + if (adapter->temp_dcb_cfg.bcn.rp_ru !=
1635 + adapter->dcb_cfg.bcn.rp_ru)
1636 + adapter->dcb_set_bitmap |= BIT_BCN;
1637 + break;
1638 + case DCB_BCN_ATTR_WRTT:
1639 + adapter->temp_dcb_cfg.bcn.rp_wrtt = setting;
1640 + if (adapter->temp_dcb_cfg.bcn.rp_wrtt !=
1641 + adapter->dcb_cfg.bcn.rp_wrtt)
1642 + adapter->dcb_set_bitmap |= BIT_BCN;
1643 + break;
1644 + case DCB_BCN_ATTR_RI:
1645 + adapter->temp_dcb_cfg.bcn.rp_ri = setting;
1646 + if (adapter->temp_dcb_cfg.bcn.rp_ri !=
1647 + adapter->dcb_cfg.bcn.rp_ri)
1648 + adapter->dcb_set_bitmap |= BIT_BCN;
1649 + break;
1650 + default:
1651 + break;
1652 + }
1653 +}
1654 +
1655 +struct dcbnl_rtnl_ops dcbnl_ops = {
1656 + .getstate = ixgbe_dcbnl_get_state,
1657 + .setstate = ixgbe_dcbnl_set_state,
1658 + .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
1659 + .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
1660 + .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
1661 + .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
1662 + .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
1663 + .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
1664 + .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
1665 + .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
1666 + .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
1667 + .setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
1668 + .getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
1669 + .setall = ixgbe_dcbnl_set_all,
1670 + .getcap = ixgbe_dcbnl_getcap,
1671 + .getnumtcs = ixgbe_dcbnl_getnumtcs,
1672 + .setnumtcs = ixgbe_dcbnl_setnumtcs,
1673 + .getpfcstate = ixgbe_dcbnl_getpfcstate,
1674 + .setpfcstate = ixgbe_dcbnl_setpfcstate,
1675 + .getbcncfg = ixgbe_dcbnl_getbcncfg,
1676 + .getbcnrp = ixgbe_dcbnl_getbcnrp,
1677 + .setbcncfg = ixgbe_dcbnl_setbcncfg,
1678 + .setbcnrp = ixgbe_dcbnl_setbcnrp
1679 +};
1680 +
1681 --- a/drivers/net/ixgbe/ixgbe_ethtool.c
1682 +++ b/drivers/net/ixgbe/ixgbe_ethtool.c
1683 @@ -99,9 +99,18 @@ static struct ixgbe_stats ixgbe_gstrings
1684 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
1685 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
1686 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
1687 -#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
1688 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
1689 -#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
1690 +#define IXGBE_PB_STATS_LEN ( \
1691 + (((struct ixgbe_adapter *)netdev->priv)->flags & \
1692 + IXGBE_FLAG_DCB_ENABLED) ? \
1693 + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
1694 + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
1695 + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
1696 + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
1697 + / sizeof(u64) : 0)
1698 +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
1699 + IXGBE_PB_STATS_LEN + \
1700 + IXGBE_QUEUE_STATS_LEN)
1701
1702 static int ixgbe_get_settings(struct net_device *netdev,
1703 struct ethtool_cmd *ecmd)
1704 @@ -809,6 +818,16 @@ static void ixgbe_get_ethtool_stats(stru
1705 data[i + k] = queue_stat[k];
1706 i += k;
1707 }
1708 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1709 + for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
1710 + data[i++] = adapter->stats.pxontxc[j];
1711 + data[i++] = adapter->stats.pxofftxc[j];
1712 + }
1713 + for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
1714 + data[i++] = adapter->stats.pxonrxc[j];
1715 + data[i++] = adapter->stats.pxoffrxc[j];
1716 + }
1717 + }
1718 }
1719
1720 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1721 @@ -837,6 +856,20 @@ static void ixgbe_get_strings(struct net
1722 sprintf(p, "rx_queue_%u_bytes", i);
1723 p += ETH_GSTRING_LEN;
1724 }
1725 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1726 + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1727 + sprintf(p, "tx_pb_%u_pxon", i);
1728 + p += ETH_GSTRING_LEN;
1729 + sprintf(p, "tx_pb_%u_pxoff", i);
1730 + p += ETH_GSTRING_LEN;
1731 + }
1732 + for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
1733 + sprintf(p, "rx_pb_%u_pxon", i);
1734 + p += ETH_GSTRING_LEN;
1735 + sprintf(p, "rx_pb_%u_pxoff", i);
1736 + p += ETH_GSTRING_LEN;
1737 + }
1738 + }
1739 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1740 break;
1741 }
1742 --- a/drivers/net/ixgbe/ixgbe.h
1743 +++ b/drivers/net/ixgbe/ixgbe.h
1744 @@ -40,6 +40,7 @@
1745
1746 #include "ixgbe_type.h"
1747 #include "ixgbe_common.h"
1748 +#include "ixgbe_dcb.h"
1749
1750 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1751 #include <linux/dca.h>
1752 @@ -89,6 +90,7 @@
1753 #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
1754 #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
1755 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
1756 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
1757 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
1758
1759 /* wrapper around a pointer to a socket buffer,
1760 @@ -136,7 +138,7 @@ struct ixgbe_ring {
1761
1762 u16 reg_idx; /* holds the special value that gets the hardware register
1763 * offset associated with this ring, which is different
1764 - * for DCE and RSS modes */
1765 + * for DCB and RSS modes */
1766
1767 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1768 /* cpu for tx queue */
1769 @@ -156,8 +158,10 @@ struct ixgbe_ring {
1770 u16 rx_buf_len;
1771 };
1772
1773 +#define RING_F_DCB 0
1774 #define RING_F_VMDQ 1
1775 #define RING_F_RSS 2
1776 +#define IXGBE_MAX_DCB_INDICES 8
1777 #define IXGBE_MAX_RSS_INDICES 16
1778 #define IXGBE_MAX_VMDQ_INDICES 16
1779 struct ixgbe_ring_feature {
1780 @@ -168,6 +172,10 @@ struct ixgbe_ring_feature {
1781 #define MAX_RX_QUEUES 64
1782 #define MAX_TX_QUEUES 32
1783
1784 +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
1785 + ? 8 : 1)
1786 +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
1787 +
1788 /* MAX_MSIX_Q_VECTORS of these are allocated,
1789 * but we only use one per queue-specific vector.
1790 */
1791 @@ -219,6 +227,9 @@ struct ixgbe_adapter {
1792 struct work_struct reset_task;
1793 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
1794 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
1795 + struct ixgbe_dcb_config dcb_cfg;
1796 + struct ixgbe_dcb_config temp_dcb_cfg;
1797 + u8 dcb_set_bitmap;
1798
1799 /* Interrupt Throttle Rate */
1800 u32 itr_setting;
1801 @@ -273,6 +284,7 @@ struct ixgbe_adapter {
1802 #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
1803 #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
1804 #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
1805 +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24)
1806
1807 /* default to trying for four seconds */
1808 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
1809 @@ -318,6 +330,13 @@ enum ixgbe_boards {
1810 };
1811
1812 extern struct ixgbe_info ixgbe_82598_info;
1813 +#ifdef CONFIG_DCBNL
1814 +extern struct dcbnl_rtnl_ops dcbnl_ops;
1815 +extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
1816 + struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max);
1817 +#endif
1818 +
1819 +
1820
1821 extern char ixgbe_driver_name[];
1822 extern const char ixgbe_driver_version[];
1823 @@ -332,5 +351,8 @@ extern int ixgbe_setup_tx_resources(stru
1824 extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
1825 extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
1826 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
1827 -
1828 +extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
1829 +extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
1830 +void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
1831 +void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
1832 #endif /* _IXGBE_H_ */
1833 --- a/drivers/net/ixgbe/ixgbe_main.c
1834 +++ b/drivers/net/ixgbe/ixgbe_main.c
1835 @@ -403,7 +403,7 @@ static void ixgbe_receive_skb(struct ixg
1836 #ifdef CONFIG_IXGBE_LRO
1837 if (adapter->netdev->features & NETIF_F_LRO &&
1838 skb->ip_summed == CHECKSUM_UNNECESSARY) {
1839 - if (adapter->vlgrp && is_vlan)
1840 + if (adapter->vlgrp && is_vlan && (tag != 0))
1841 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
1842 adapter->vlgrp, tag,
1843 rx_desc);
1844 @@ -413,12 +413,12 @@ static void ixgbe_receive_skb(struct ixg
1845 } else {
1846 #endif
1847 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
1848 - if (adapter->vlgrp && is_vlan)
1849 + if (adapter->vlgrp && is_vlan && (tag != 0))
1850 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
1851 else
1852 netif_receive_skb(skb);
1853 } else {
1854 - if (adapter->vlgrp && is_vlan)
1855 + if (adapter->vlgrp && is_vlan && (tag != 0))
1856 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
1857 else
1858 netif_rx(skb);
1859 @@ -1656,10 +1656,12 @@ static void ixgbe_configure_rx(struct ix
1860 * effects of setting this bit are only that SRRCTL must be
1861 * fully programmed [0..15]
1862 */
1863 - rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1864 - rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1865 - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1866 -
1867 + if (adapter->flags &
1868 + (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
1869 + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1870 + rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1871 + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1872 + }
1873
1874 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1875 /* Fill out redirection table */
1876 @@ -1718,6 +1720,16 @@ static void ixgbe_vlan_rx_register(struc
1877 ixgbe_irq_disable(adapter);
1878 adapter->vlgrp = grp;
1879
1880 + /*
1881 + * For a DCB driver, always enable VLAN tag stripping so we can
1882 + * still receive traffic from a DCB-enabled host even if we're
1883 + * not in DCB mode.
1884 + */
1885 + ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1886 + ctrl |= IXGBE_VLNCTRL_VME;
1887 + ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1888 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1889 +
1890 if (grp) {
1891 /* enable VLAN tag insert/strip */
1892 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1893 @@ -1882,6 +1894,42 @@ static void ixgbe_napi_disable_all(struc
1894 }
1895 }
1896
1897 +/*
1898 + * ixgbe_configure_dcb - Configure DCB hardware
1899 + * @adapter: ixgbe adapter struct
1900 + *
1901 + * This is called by the driver on open to configure the DCB hardware.
1902 + * This is also called by the gennetlink interface when reconfiguring
1903 + * the DCB state.
1904 + */
1905 +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
1906 +{
1907 + struct ixgbe_hw *hw = &adapter->hw;
1908 + u32 txdctl, vlnctrl;
1909 + int i, j;
1910 +
1911 + ixgbe_dcb_check_config(&adapter->dcb_cfg);
1912 + ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
1913 + ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
1914 +
1915 + /* reconfigure the hardware */
1916 + ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
1917 +
1918 + for (i = 0; i < adapter->num_tx_queues; i++) {
1919 + j = adapter->tx_ring[i].reg_idx;
1920 + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1921 + /* PThresh workaround for Tx hang with DFP enabled. */
1922 + txdctl |= 32;
1923 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1924 + }
1925 + /* Enable VLAN tag insert/strip */
1926 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1927 + vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1928 + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1929 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1930 + hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
1931 +}
1932 +
1933 static void ixgbe_configure(struct ixgbe_adapter *adapter)
1934 {
1935 struct net_device *netdev = adapter->netdev;
1936 @@ -1890,6 +1938,12 @@ static void ixgbe_configure(struct ixgbe
1937 ixgbe_set_rx_mode(netdev);
1938
1939 ixgbe_restore_vlan(adapter);
1940 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1941 + netif_set_gso_max_size(netdev, 32768);
1942 + ixgbe_configure_dcb(adapter);
1943 + } else {
1944 + netif_set_gso_max_size(netdev, 65536);
1945 + }
1946
1947 ixgbe_configure_tx(adapter);
1948 ixgbe_configure_rx(adapter);
1949 @@ -2236,6 +2290,11 @@ static void ixgbe_reset_task(struct work
1950 struct ixgbe_adapter *adapter;
1951 adapter = container_of(work, struct ixgbe_adapter, reset_task);
1952
1953 + /* If we're already down or resetting, just bail */
1954 + if (test_bit(__IXGBE_DOWN, &adapter->state) ||
1955 + test_bit(__IXGBE_RESETTING, &adapter->state))
1956 + return;
1957 +
1958 adapter->tx_timeout_count++;
1959
1960 ixgbe_reinit_locked(adapter);
1961 @@ -2245,15 +2304,31 @@ static void ixgbe_set_num_queues(struct
1962 {
1963 int nrq = 1, ntq = 1;
1964 int feature_mask = 0, rss_i, rss_m;
1965 + int dcb_i, dcb_m;
1966
1967 /* Number of supported queues */
1968 switch (adapter->hw.mac.type) {
1969 case ixgbe_mac_82598EB:
1970 + dcb_i = adapter->ring_feature[RING_F_DCB].indices;
1971 + dcb_m = 0;
1972 rss_i = adapter->ring_feature[RING_F_RSS].indices;
1973 rss_m = 0;
1974 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
1975 + feature_mask |= IXGBE_FLAG_DCB_ENABLED;
1976
1977 switch (adapter->flags & feature_mask) {
1978 + case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
1979 + dcb_m = 0x7 << 3;
1980 + rss_i = min(8, rss_i);
1981 + rss_m = 0x7;
1982 + nrq = dcb_i * rss_i;
1983 + ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
1984 + break;
1985 + case (IXGBE_FLAG_DCB_ENABLED):
1986 + dcb_m = 0x7 << 3;
1987 + nrq = dcb_i;
1988 + ntq = dcb_i;
1989 + break;
1990 case (IXGBE_FLAG_RSS_ENABLED):
1991 rss_m = 0xF;
1992 nrq = rss_i;
1993 @@ -2261,6 +2336,8 @@ static void ixgbe_set_num_queues(struct
1994 break;
1995 case 0:
1996 default:
1997 + dcb_i = 0;
1998 + dcb_m = 0;
1999 rss_i = 0;
2000 rss_m = 0;
2001 nrq = 1;
2002 @@ -2268,6 +2345,12 @@ static void ixgbe_set_num_queues(struct
2003 break;
2004 }
2005
2006 + /* Sanity check, we should never have zero queues */
2007 + nrq = (nrq ?:1);
2008 + ntq = (ntq ?:1);
2009 +
2010 + adapter->ring_feature[RING_F_DCB].indices = dcb_i;
2011 + adapter->ring_feature[RING_F_DCB].mask = dcb_m;
2012 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2013 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2014 break;
2015 @@ -2319,6 +2402,7 @@ static void ixgbe_acquire_msix_vectors(s
2016 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2017 kfree(adapter->msix_entries);
2018 adapter->msix_entries = NULL;
2019 + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2020 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2021 ixgbe_set_num_queues(adapter);
2022 } else {
2023 @@ -2338,15 +2422,42 @@ static void __devinit ixgbe_cache_ring_r
2024 {
2025 int feature_mask = 0, rss_i;
2026 int i, txr_idx, rxr_idx;
2027 + int dcb_i;
2028
2029 /* Number of supported queues */
2030 switch (adapter->hw.mac.type) {
2031 case ixgbe_mac_82598EB:
2032 + dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2033 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2034 txr_idx = 0;
2035 rxr_idx = 0;
2036 + feature_mask |= IXGBE_FLAG_DCB_ENABLED;
2037 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2038 switch (adapter->flags & feature_mask) {
2039 + case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2040 + for (i = 0; i < dcb_i; i++) {
2041 + int j;
2042 + /* Rx first */
2043 + for (j = 0; j < adapter->num_rx_queues; j++) {
2044 + adapter->rx_ring[rxr_idx].reg_idx =
2045 + i << 3 | j;
2046 + rxr_idx++;
2047 + }
2048 + /* Tx now */
2049 + for (j = 0; j < adapter->num_tx_queues; j++) {
2050 + adapter->tx_ring[txr_idx].reg_idx =
2051 + i << 2 | (j >> 1);
2052 + if (j & 1)
2053 + txr_idx++;
2054 + }
2055 + }
2056 + case (IXGBE_FLAG_DCB_ENABLED):
2057 + /* the number of queues is assumed to be symmetric */
2058 + for (i = 0; i < dcb_i; i++) {
2059 + adapter->rx_ring[i].reg_idx = i << 3;
2060 + adapter->tx_ring[i].reg_idx = i << 2;
2061 + }
2062 + break;
2063 case (IXGBE_FLAG_RSS_ENABLED):
2064 for (i = 0; i < adapter->num_rx_queues; i++)
2065 adapter->rx_ring[i].reg_idx = i;
2066 @@ -2371,7 +2482,7 @@ static void __devinit ixgbe_cache_ring_r
2067 * number of queues at compile-time. The polling_netdev array is
2068 * intended for Multiqueue, but should work fine with a single queue.
2069 **/
2070 -static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2071 +static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2072 {
2073 int i;
2074
2075 @@ -2412,8 +2523,7 @@ err_tx_ring_allocation:
2076 * Attempt to configure the interrupts using the best available
2077 * capabilities of the hardware and the kernel.
2078 **/
2079 -static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2080 - *adapter)
2081 +static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2082 {
2083 int err = 0;
2084 int vector, v_budget;
2085 @@ -2441,6 +2551,7 @@ static int __devinit ixgbe_set_interrupt
2086 adapter->msix_entries = kcalloc(v_budget,
2087 sizeof(struct msix_entry), GFP_KERNEL);
2088 if (!adapter->msix_entries) {
2089 + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2090 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2091 ixgbe_set_num_queues(adapter);
2092 kfree(adapter->tx_ring);
2093 @@ -2481,7 +2592,7 @@ out:
2094 return err;
2095 }
2096
2097 -static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2098 +void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2099 {
2100 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2101 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2102 @@ -2505,7 +2616,7 @@ static void ixgbe_reset_interrupt_capabi
2103 * - Hardware queue count (num_*_queues)
2104 * - defined by miscellaneous hardware support/features (RSS, etc.)
2105 **/
2106 -static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2107 +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2108 {
2109 int err;
2110
2111 @@ -2553,6 +2664,8 @@ static int __devinit ixgbe_sw_init(struc
2112 struct ixgbe_hw *hw = &adapter->hw;
2113 struct pci_dev *pdev = adapter->pdev;
2114 unsigned int rss;
2115 + int j;
2116 + struct tc_configuration *tc;
2117
2118 /* PCI config space info */
2119
2120 @@ -2566,6 +2679,26 @@ static int __devinit ixgbe_sw_init(struc
2121 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2122 adapter->ring_feature[RING_F_RSS].indices = rss;
2123 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2124 + adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
2125 +
2126 + /* Configure DCB traffic classes */
2127 + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
2128 + tc = &adapter->dcb_cfg.tc_config[j];
2129 + tc->path[DCB_TX_CONFIG].bwg_id = 0;
2130 + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
2131 + tc->path[DCB_RX_CONFIG].bwg_id = 0;
2132 + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
2133 + tc->dcb_pfc = pfc_disabled;
2134 + }
2135 + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
2136 + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
2137 + adapter->dcb_cfg.rx_pba_cfg = pba_equal;
2138 + adapter->dcb_cfg.round_robin_enable = false;
2139 + adapter->dcb_set_bitmap = 0x00;
2140 +#ifdef CONFIG_DCBNL
2141 + ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
2142 + adapter->ring_feature[RING_F_DCB].indices);
2143 +#endif
2144
2145 /* default flow control settings */
2146 hw->fc.original_type = ixgbe_fc_none;
2147 @@ -2945,7 +3078,7 @@ static int ixgbe_close(struct net_device
2148 * @adapter: private struct
2149 * helper function to napi_add each possible q_vector->napi
2150 */
2151 -static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2152 +void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2153 {
2154 int q_idx, q_vectors;
2155 int (*poll)(struct napi_struct *, int);
2156 @@ -2966,7 +3099,7 @@ static void ixgbe_napi_add_all(struct ix
2157 }
2158 }
2159
2160 -static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2161 +void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2162 {
2163 int q_idx;
2164 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2165 @@ -3087,6 +3220,18 @@ void ixgbe_update_stats(struct ixgbe_ada
2166 adapter->stats.mpc[i] += mpc;
2167 total_mpc += adapter->stats.mpc[i];
2168 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2169 + adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2170 + adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
2171 + adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2172 + adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
2173 + adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
2174 + IXGBE_PXONRXC(i));
2175 + adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
2176 + IXGBE_PXONTXC(i));
2177 + adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
2178 + IXGBE_PXOFFRXC(i));
2179 + adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
2180 + IXGBE_PXOFFTXC(i));
2181 }
2182 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2183 /* work around hardware counting issue */
2184 @@ -3584,6 +3729,14 @@ static int ixgbe_xmit_frame(struct sk_bu
2185
2186 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2187 tx_flags |= vlan_tx_tag_get(skb);
2188 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2189 + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
2190 + tx_flags |= (skb->queue_mapping << 13);
2191 + }
2192 + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2193 + tx_flags |= IXGBE_TX_FLAGS_VLAN;
2194 + } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2195 + tx_flags |= (skb->queue_mapping << 13);
2196 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2197 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2198 }
2199 @@ -3852,6 +4005,13 @@ static int __devinit ixgbe_probe(struct
2200 netdev->vlan_features |= NETIF_F_IP_CSUM;
2201 netdev->vlan_features |= NETIF_F_SG;
2202
2203 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
2204 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2205 +
2206 +#ifdef CONFIG_DCBNL
2207 + netdev->dcbnl_ops = &dcbnl_ops;
2208 +#endif
2209 +
2210 if (pci_using_dac)
2211 netdev->features |= NETIF_F_HIGHDMA;
2212
2213 @@ -4108,7 +4268,6 @@ static struct pci_driver ixgbe_driver =
2214 **/
2215 static int __init ixgbe_init_module(void)
2216 {
2217 - int ret;
2218 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
2219 ixgbe_driver_string, ixgbe_driver_version);
2220
2221 @@ -4118,8 +4277,7 @@ static int __init ixgbe_init_module(void
2222 dca_register_notify(&dca_notifier);
2223
2224 #endif
2225 - ret = pci_register_driver(&ixgbe_driver);
2226 - return ret;
2227 + return pci_register_driver(&ixgbe_driver);
2228 }
2229
2230 module_init(ixgbe_init_module);
2231 --- a/drivers/net/ixgbe/Makefile
2232 +++ b/drivers/net/ixgbe/Makefile
2233 @@ -33,4 +33,5 @@
2234 obj-$(CONFIG_IXGBE) += ixgbe.o
2235
2236 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
2237 - ixgbe_82598.o ixgbe_phy.o
2238 + ixgbe_82598.o ixgbe_phy.o ixgbe_dcb.o ixgbe_dcb_82598.o \
2239 + ixgbe_dcb_nl.o
2240 --- /dev/null
2241 +++ b/include/linux/dcbnl.h
2242 @@ -0,0 +1,324 @@
2243 +#ifndef __LINUX_DCBNL_H__
2244 +#define __LINUX_DCBNL_H__
2245 +/*
2246 + * Data Center Bridging (DCB) netlink header
2247 + *
2248 + * Copyright 2008, Lucy Liu <lucy.liu@intel.com>
2249 + */
2250 +
2251 +#define DCB_PROTO_VERSION 1
2252 +
2253 +struct dcbmsg {
2254 + unsigned char dcb_family;
2255 + __u8 cmd;
2256 + __u16 dcb_pad;
2257 +};
2258 +
2259 +/**
2260 + * enum dcbnl_commands - supported DCB commands
2261 + *
2262 + * @DCB_CMD_UNDEFINED: unspecified command to catch errors
2263 + * @DCB_CMD_GSTATE: request the state of DCB in the device
2264 + * @DCB_CMD_SSTATE: set the state of DCB in the device
2265 + * @DCB_CMD_PGTX_GCFG: request the priority group configuration for Tx
2266 + * @DCB_CMD_PGTX_SCFG: set the priority group configuration for Tx
2267 + * @DCB_CMD_PGRX_GCFG: request the priority group configuration for Rx
2268 + * @DCB_CMD_PGRX_SCFG: set the priority group configuration for Rx
2269 + * @DCB_CMD_PFC_GCFG: request the priority flow control configuration
2270 + * @DCB_CMD_PFC_SCFG: set the priority flow control configuration
2271 + * @DCB_CMD_SET_ALL: apply all changes to the underlying device
2272 + * @DCB_CMD_GPERM_HWADDR: get the permanent MAC address of the underlying
2273 + * device. Only useful when using bonding.
2274 + * @DCB_CMD_GCAP: request the DCB capabilities of the device
2275 + * @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
2276 + * @DCB_CMD_SNUMTCS: set the number of traffic classes
2277 + * @DCB_CMD_GBCN: set backward congestion notification configuration
2278 + * @DCB_CMD_SBCN: get backward congestion notification configration.
2279 + */
2280 +enum dcbnl_commands {
2281 + DCB_CMD_UNDEFINED,
2282 +
2283 + DCB_CMD_GSTATE,
2284 + DCB_CMD_SSTATE,
2285 +
2286 + DCB_CMD_PGTX_GCFG,
2287 + DCB_CMD_PGTX_SCFG,
2288 + DCB_CMD_PGRX_GCFG,
2289 + DCB_CMD_PGRX_SCFG,
2290 +
2291 + DCB_CMD_PFC_GCFG,
2292 + DCB_CMD_PFC_SCFG,
2293 +
2294 + DCB_CMD_SET_ALL,
2295 +
2296 + DCB_CMD_GPERM_HWADDR,
2297 +
2298 + DCB_CMD_GCAP,
2299 +
2300 + DCB_CMD_GNUMTCS,
2301 + DCB_CMD_SNUMTCS,
2302 +
2303 + DCB_CMD_PFC_GSTATE,
2304 + DCB_CMD_PFC_SSTATE,
2305 +
2306 + DCB_CMD_BCN_GCFG,
2307 + DCB_CMD_BCN_SCFG,
2308 +
2309 + __DCB_CMD_ENUM_MAX,
2310 + DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
2311 +};
2312 +
2313 +/**
2314 + * enum dcbnl_attrs - DCB top-level netlink attributes
2315 + *
2316 + * @DCB_ATTR_UNDEFINED: unspecified attribute to catch errors
2317 + * @DCB_ATTR_IFNAME: interface name of the underlying device (NLA_STRING)
2318 + * @DCB_ATTR_STATE: enable state of DCB in the device (NLA_U8)
2319 + * @DCB_ATTR_PFC_STATE: enable state of PFC in the device (NLA_U8)
2320 + * @DCB_ATTR_PFC_CFG: priority flow control configuration (NLA_NESTED)
2321 + * @DCB_ATTR_NUM_TC: number of traffic classes supported in the device (NLA_U8)
2322 + * @DCB_ATTR_PG_CFG: priority group configuration (NLA_NESTED)
2323 + * @DCB_ATTR_SET_ALL: bool to commit changes to hardware or not (NLA_U8)
2324 + * @DCB_ATTR_PERM_HWADDR: MAC address of the physical device (NLA_NESTED)
2325 + * @DCB_ATTR_CAP: DCB capabilities of the device (NLA_NESTED)
2326 + * @DCB_ATTR_NUMTCS: number of traffic classes supported (NLA_NESTED)
2327 + * @DCB_ATTR_BCN: backward congestion notification configuration (NLA_NESTED)
2328 + */
2329 +enum dcbnl_attrs {
2330 + DCB_ATTR_UNDEFINED,
2331 +
2332 + DCB_ATTR_IFNAME,
2333 + DCB_ATTR_STATE,
2334 + DCB_ATTR_PFC_STATE,
2335 + DCB_ATTR_PFC_CFG,
2336 + DCB_ATTR_NUM_TC,
2337 + DCB_ATTR_PG_CFG,
2338 + DCB_ATTR_SET_ALL,
2339 + DCB_ATTR_PERM_HWADDR,
2340 + DCB_ATTR_CAP,
2341 + DCB_ATTR_NUMTCS,
2342 + DCB_ATTR_BCN,
2343 +
2344 + __DCB_ATTR_ENUM_MAX,
2345 + DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
2346 +};
2347 +
2348 +/**
2349 + * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
2350 + *
2351 + * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors
2352 + * @DCB_PFC_UP_ATTR_0: Priority Flow Control value for User Priority 0 (NLA_U8)
2353 + * @DCB_PFC_UP_ATTR_1: Priority Flow Control value for User Priority 1 (NLA_U8)
2354 + * @DCB_PFC_UP_ATTR_2: Priority Flow Control value for User Priority 2 (NLA_U8)
2355 + * @DCB_PFC_UP_ATTR_3: Priority Flow Control value for User Priority 3 (NLA_U8)
2356 + * @DCB_PFC_UP_ATTR_4: Priority Flow Control value for User Priority 4 (NLA_U8)
2357 + * @DCB_PFC_UP_ATTR_5: Priority Flow Control value for User Priority 5 (NLA_U8)
2358 + * @DCB_PFC_UP_ATTR_6: Priority Flow Control value for User Priority 6 (NLA_U8)
2359 + * @DCB_PFC_UP_ATTR_7: Priority Flow Control value for User Priority 7 (NLA_U8)
2360 + * @DCB_PFC_UP_ATTR_MAX: highest attribute number currently defined
2361 + * @DCB_PFC_UP_ATTR_ALL: apply to all priority flow control attrs (NLA_FLAG)
2362 + *
2363 + */
2364 +enum dcbnl_pfc_up_attrs {
2365 + DCB_PFC_UP_ATTR_UNDEFINED,
2366 +
2367 + DCB_PFC_UP_ATTR_0,
2368 + DCB_PFC_UP_ATTR_1,
2369 + DCB_PFC_UP_ATTR_2,
2370 + DCB_PFC_UP_ATTR_3,
2371 + DCB_PFC_UP_ATTR_4,
2372 + DCB_PFC_UP_ATTR_5,
2373 + DCB_PFC_UP_ATTR_6,
2374 + DCB_PFC_UP_ATTR_7,
2375 + DCB_PFC_UP_ATTR_ALL,
2376 +
2377 + __DCB_PFC_UP_ATTR_ENUM_MAX,
2378 + DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1,
2379 +};
2380 +
2381 +/**
2382 + * enum dcbnl_pg_attrs - DCB Priority Group attributes
2383 + *
2384 + * @DCB_PG_ATTR_UNDEFINED: unspecified attribute to catch errors
2385 + * @DCB_PG_ATTR_TC_0: Priority Group Traffic Class 0 configuration (NLA_NESTED)
2386 + * @DCB_PG_ATTR_TC_1: Priority Group Traffic Class 1 configuration (NLA_NESTED)
2387 + * @DCB_PG_ATTR_TC_2: Priority Group Traffic Class 2 configuration (NLA_NESTED)
2388 + * @DCB_PG_ATTR_TC_3: Priority Group Traffic Class 3 configuration (NLA_NESTED)
2389 + * @DCB_PG_ATTR_TC_4: Priority Group Traffic Class 4 configuration (NLA_NESTED)
2390 + * @DCB_PG_ATTR_TC_5: Priority Group Traffic Class 5 configuration (NLA_NESTED)
2391 + * @DCB_PG_ATTR_TC_6: Priority Group Traffic Class 6 configuration (NLA_NESTED)
2392 + * @DCB_PG_ATTR_TC_7: Priority Group Traffic Class 7 configuration (NLA_NESTED)
2393 + * @DCB_PG_ATTR_TC_MAX: highest attribute number currently defined
2394 + * @DCB_PG_ATTR_TC_ALL: apply to all traffic classes (NLA_NESTED)
2395 + * @DCB_PG_ATTR_BW_ID_0: Percent of link bandwidth for Priority Group 0 (NLA_U8)
2396 + * @DCB_PG_ATTR_BW_ID_1: Percent of link bandwidth for Priority Group 1 (NLA_U8)
2397 + * @DCB_PG_ATTR_BW_ID_2: Percent of link bandwidth for Priority Group 2 (NLA_U8)
2398 + * @DCB_PG_ATTR_BW_ID_3: Percent of link bandwidth for Priority Group 3 (NLA_U8)
2399 + * @DCB_PG_ATTR_BW_ID_4: Percent of link bandwidth for Priority Group 4 (NLA_U8)
2400 + * @DCB_PG_ATTR_BW_ID_5: Percent of link bandwidth for Priority Group 5 (NLA_U8)
2401 + * @DCB_PG_ATTR_BW_ID_6: Percent of link bandwidth for Priority Group 6 (NLA_U8)
2402 + * @DCB_PG_ATTR_BW_ID_7: Percent of link bandwidth for Priority Group 7 (NLA_U8)
2403 + * @DCB_PG_ATTR_BW_ID_MAX: highest attribute number currently defined
2404 + * @DCB_PG_ATTR_BW_ID_ALL: apply to all priority groups (NLA_FLAG)
2405 + *
2406 + */
2407 +enum dcbnl_pg_attrs {
2408 + DCB_PG_ATTR_UNDEFINED,
2409 +
2410 + DCB_PG_ATTR_TC_0,
2411 + DCB_PG_ATTR_TC_1,
2412 + DCB_PG_ATTR_TC_2,
2413 + DCB_PG_ATTR_TC_3,
2414 + DCB_PG_ATTR_TC_4,
2415 + DCB_PG_ATTR_TC_5,
2416 + DCB_PG_ATTR_TC_6,
2417 + DCB_PG_ATTR_TC_7,
2418 + DCB_PG_ATTR_TC_MAX,
2419 + DCB_PG_ATTR_TC_ALL,
2420 +
2421 + DCB_PG_ATTR_BW_ID_0,
2422 + DCB_PG_ATTR_BW_ID_1,
2423 + DCB_PG_ATTR_BW_ID_2,
2424 + DCB_PG_ATTR_BW_ID_3,
2425 + DCB_PG_ATTR_BW_ID_4,
2426 + DCB_PG_ATTR_BW_ID_5,
2427 + DCB_PG_ATTR_BW_ID_6,
2428 + DCB_PG_ATTR_BW_ID_7,
2429 + DCB_PG_ATTR_BW_ID_MAX,
2430 + DCB_PG_ATTR_BW_ID_ALL,
2431 +
2432 + __DCB_PG_ATTR_ENUM_MAX,
2433 + DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1,
2434 +};
2435 +
2436 +/**
2437 + * enum dcbnl_tc_attrs - DCB Traffic Class attributes
2438 + *
2439 + * @DCB_TC_ATTR_PARAM_UNDEFINED: unspecified attribute to catch errors
2440 + * @DCB_TC_ATTR_PARAM_PGID: (NLA_U8) Priority group the traffic class belongs to
2441 + * Valid values are: 0-7
2442 + * @DCB_TC_ATTR_PARAM_UP_MAPPING: (NLA_U8) Traffic class to user priority map
2443 + * Some devices may not support changing the
2444 + * user priority map of a TC.
2445 + * @DCB_TC_ATTR_PARAM_STRICT_PRIO: (NLA_U8) Strict priority setting
2446 + * 0 - none
2447 + * 1 - group strict
2448 + * 2 - link strict
2449 + * @DCB_TC_ATTR_PARAM_BW_PCT: optional - (NLA_U8) If supported by the device and
2450 + * not configured to use link strict priority,
2451 + * this is the percentage of bandwidth of the
2452 + * priority group this traffic class belongs to
2453 + * @DCB_TC_ATTR_PARAM_ALL: (NLA_FLAG) all traffic class parameters
2454 + *
2455 + */
2456 +enum dcbnl_tc_attrs {
2457 + DCB_TC_ATTR_PARAM_UNDEFINED,
2458 +
2459 + DCB_TC_ATTR_PARAM_PGID,
2460 + DCB_TC_ATTR_PARAM_UP_MAPPING,
2461 + DCB_TC_ATTR_PARAM_STRICT_PRIO,
2462 + DCB_TC_ATTR_PARAM_BW_PCT,
2463 + DCB_TC_ATTR_PARAM_ALL,
2464 +
2465 + __DCB_TC_ATTR_PARAM_ENUM_MAX,
2466 + DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1,
2467 +};
2468 +
2469 +/**
2470 + * enum dcbnl_cap_attrs - DCB Capability attributes
2471 + *
2472 + * @DCB_CAP_ATTR_UNDEFINED: unspecified attribute to catch errors
2473 + * @DCB_CAP_ATTR_ALL: (NLA_FLAG) all capability parameters
2474 + * @DCB_CAP_ATTR_PG: (NLA_U8) device supports Priority Groups
2475 + * @DCB_CAP_ATTR_PFC: (NLA_U8) device supports Priority Flow Control
2476 + * @DCB_CAP_ATTR_UP2TC: (NLA_U8) device supports user priority to
2477 + * traffic class mapping
2478 + * @DCB_CAP_ATTR_PG_TCS: (NLA_U8) bitmap where each bit represents a
2479 + * number of traffic classes the device
2480 + * can be configured to use for Priority Groups
2481 + * @DCB_CAP_ATTR_PFC_TCS: (NLA_U8) bitmap where each bit represents a
2482 + * number of traffic classes the device can be
2483 + * configured to use for Priority Flow Control
2484 + * @DCB_CAP_ATTR_GSP: (NLA_U8) device supports group strict priority
2485 + * @DCB_CAP_ATTR_BCN: (NLA_U8) device supports Backwards Congestion
2486 + * Notification
2487 + */
2488 +enum dcbnl_cap_attrs {
2489 + DCB_CAP_ATTR_UNDEFINED,
2490 + DCB_CAP_ATTR_ALL,
2491 + DCB_CAP_ATTR_PG,
2492 + DCB_CAP_ATTR_PFC,
2493 + DCB_CAP_ATTR_UP2TC,
2494 + DCB_CAP_ATTR_PG_TCS,
2495 + DCB_CAP_ATTR_PFC_TCS,
2496 + DCB_CAP_ATTR_GSP,
2497 + DCB_CAP_ATTR_BCN,
2498 +
2499 + __DCB_CAP_ATTR_ENUM_MAX,
2500 + DCB_CAP_ATTR_MAX = __DCB_CAP_ATTR_ENUM_MAX - 1,
2501 +};
2502 +
2503 +/**
2504 + * enum dcbnl_numtcs_attrs - number of traffic classes
2505 + *
2506 + * @DCB_NUMTCS_ATTR_UNDEFINED: unspecified attribute to catch errors
2507 + * @DCB_NUMTCS_ATTR_ALL: (NLA_FLAG) all traffic class attributes
2508 + * @DCB_NUMTCS_ATTR_PG: (NLA_U8) number of traffic classes used for
2509 + * priority groups
2510 + * @DCB_NUMTCS_ATTR_PFC: (NLA_U8) number of traffic classes which can
2511 + * support priority flow control
2512 + */
2513 +enum dcbnl_numtcs_attrs {
2514 + DCB_NUMTCS_ATTR_UNDEFINED,
2515 + DCB_NUMTCS_ATTR_ALL,
2516 + DCB_NUMTCS_ATTR_PG,
2517 + DCB_NUMTCS_ATTR_PFC,
2518 +
2519 + __DCB_NUMTCS_ATTR_ENUM_MAX,
2520 + DCB_NUMTCS_ATTR_MAX = __DCB_NUMTCS_ATTR_ENUM_MAX - 1,
2521 +};
2522 +
2523 +enum dcbnl_bcn_attrs{
2524 + DCB_BCN_ATTR_UNDEFINED = 0,
2525 +
2526 + DCB_BCN_ATTR_RP_0,
2527 + DCB_BCN_ATTR_RP_1,
2528 + DCB_BCN_ATTR_RP_2,
2529 + DCB_BCN_ATTR_RP_3,
2530 + DCB_BCN_ATTR_RP_4,
2531 + DCB_BCN_ATTR_RP_5,
2532 + DCB_BCN_ATTR_RP_6,
2533 + DCB_BCN_ATTR_RP_7,
2534 + DCB_BCN_ATTR_RP_ALL,
2535 +
2536 + DCB_BCN_ATTR_ALPHA,
2537 + DCB_BCN_ATTR_BETA,
2538 + DCB_BCN_ATTR_GD,
2539 + DCB_BCN_ATTR_GI,
2540 + DCB_BCN_ATTR_TMAX,
2541 + DCB_BCN_ATTR_TD,
2542 + DCB_BCN_ATTR_RMIN,
2543 + DCB_BCN_ATTR_W,
2544 + DCB_BCN_ATTR_RD,
2545 + DCB_BCN_ATTR_RU,
2546 + DCB_BCN_ATTR_WRTT,
2547 + DCB_BCN_ATTR_RI,
2548 + DCB_BCN_ATTR_C,
2549 + DCB_BCN_ATTR_ALL,
2550 +
2551 + __DCB_BCN_ATTR_ENUM_MAX,
2552 + DCB_BCN_ATTR_MAX = __DCB_BCN_ATTR_ENUM_MAX - 1,
2553 +};
2554 +
2555 +/**
2556 + * enum dcb_general_attr_values - general DCB attribute values
2557 + *
2558 + * @DCB_ATTR_UNDEFINED: value used to indicate an attribute is not supported
2559 + *
2560 + */
2561 +enum dcb_general_attr_values {
2562 + DCB_ATTR_VALUE_UNDEFINED = 0xff
2563 +};
2564 +
2565 +
2566 +#endif /* __LINUX_DCBNL_H__ */
2567 --- a/include/linux/netdevice.h
2568 +++ b/include/linux/netdevice.h
2569 @@ -42,6 +42,9 @@
2570 #include <linux/workqueue.h>
2571
2572 #include <net/net_namespace.h>
2573 +#ifdef CONFIG_DCBNL
2574 +#include <net/dcbnl.h>
2575 +#endif
2576
2577 struct vlan_group;
2578 struct ethtool_ops;
2579 @@ -749,6 +752,11 @@ struct net_device
2580 /* for setting kernel sock attribute on TCP connection setup */
2581 #define GSO_MAX_SIZE 65536
2582 unsigned int gso_max_size;
2583 +
2584 +#ifdef CONFIG_DCBNL
2585 + /* Data Center Bridging netlink ops */
2586 + struct dcbnl_rtnl_ops *dcbnl_ops;
2587 +#endif
2588 };
2589 #define to_net_dev(d) container_of(d, struct net_device, dev)
2590
2591 --- a/include/linux/rtnetlink.h
2592 +++ b/include/linux/rtnetlink.h
2593 @@ -107,6 +107,11 @@ enum {
2594 RTM_GETADDRLABEL,
2595 #define RTM_GETADDRLABEL RTM_GETADDRLABEL
2596
2597 + RTM_GETDCB = 78,
2598 +#define RTM_GETDCB RTM_GETDCB
2599 + RTM_SETDCB,
2600 +#define RTM_SETDCB RTM_SETDCB
2601 +
2602 __RTM_MAX,
2603 #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
2604 };
2605 --- /dev/null
2606 +++ b/include/net/dcbnl.h
2607 @@ -0,0 +1,40 @@
2608 +#ifndef __NET_DCBNL_H__
2609 +#define __NET_DCBNL_H__
2610 +/*
2611 + * Data Center Bridging (DCB) netlink operations
2612 + *
2613 + * Copyright 2008, Lucy Liu <lucy.liu@intel.com>
2614 + */
2615 +
2616 +
2617 +/*
2618 + * Ops struct for the netlink callbacks. Used by DCB-enabled drivers through
2619 + * the netdevice struct.
2620 + */
2621 +struct dcbnl_rtnl_ops {
2622 + u8 (*getstate)(struct net_device *);
2623 + void (*setstate)(struct net_device *, u8);
2624 + void (*getpermhwaddr)(struct net_device *, u8 *);
2625 + void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8);
2626 + void (*setpgbwgcfgtx)(struct net_device *, int, u8);
2627 + void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8);
2628 + void (*setpgbwgcfgrx)(struct net_device *, int, u8);
2629 + void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
2630 + void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);
2631 + void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
2632 + void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);
2633 + void (*setpfccfg)(struct net_device *, int, u8);
2634 + void (*getpfccfg)(struct net_device *, int, u8 *);
2635 + u8 (*setall)(struct net_device *);
2636 + u8 (*getcap)(struct net_device *, int, u8 *);
2637 + u8 (*getnumtcs)(struct net_device *, int, u8 *);
2638 + u8 (*setnumtcs)(struct net_device *, int, u8);
2639 + u8 (*getpfcstate)(struct net_device *);
2640 + void (*setpfcstate)(struct net_device *, u8);
2641 + void (*getbcncfg)(struct net_device *, int, u32 *);
2642 + void (*setbcncfg)(struct net_device *, int, u32);
2643 + void (*getbcnrp)(struct net_device *, int, u8 *);
2644 + void (*setbcnrp)(struct net_device *, int, u8);
2645 +};
2646 +
2647 +#endif /* __NET_DCBNL_H__ */
2648 --- /dev/null
2649 +++ b/net/dcb/dcbnl.c
2650 @@ -0,0 +1,1091 @@
2651 +/*
2652 + * This is the Data Center Bridging configuration interface.
2653 + *
2654 + * Copyright 2008, Lucy Liu <lucy.liu@intel.com>
2655 + *
2656 + */
2657 +
2658 +#include <linux/netdevice.h>
2659 +#include <linux/netlink.h>
2660 +#include <net/netlink.h>
2661 +#include <net/rtnetlink.h>
2662 +#include <linux/dcbnl.h>
2663 +#include <linux/rtnetlink.h>
2664 +#include <net/sock.h>
2665 +
2666 +MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
2667 +MODULE_DESCRIPTION("Data Center Bridging generic netlink interface");
2668 +MODULE_LICENSE("GPL");
2669 +
2670 +/**************** DCB attribute policies *************************************/
2671 +
2672 +/* DCB netlink attributes policy */
2673 +static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
2674 + [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
2675 + [DCB_ATTR_STATE] = {.type = NLA_U8},
2676 + [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
2677 + [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
2678 + [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
2679 + [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
2680 + [DCB_ATTR_CAP] = {.type = NLA_NESTED},
2681 + [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
2682 + [DCB_ATTR_BCN] = {.type = NLA_NESTED},
2683 +};
2684 +
2685 +/* DCB priority flow control to User Priority nested attributes */
2686 +static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
2687 + [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
2688 + [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
2689 + [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
2690 + [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
2691 + [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
2692 + [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
2693 + [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
2694 + [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
2695 + [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
2696 +};
2697 +
2698 +/* DCB priority grouping nested attributes */
2699 +static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
2700 + [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
2701 + [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
2702 + [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
2703 + [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
2704 + [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
2705 + [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
2706 + [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
2707 + [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
2708 + [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
2709 + [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
2710 + [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
2711 + [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
2712 + [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
2713 + [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
2714 + [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
2715 + [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
2716 + [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
2717 + [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
2718 +};
2719 +
2720 +/* DCB traffic class nested attributes. */
2721 +static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
2722 + [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
2723 + [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
2724 + [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
2725 + [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
2726 + [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
2727 +};
2728 +
2729 +/* DCB capabilities nested attributes. */
2730 +static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
2731 + [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
2732 + [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
2733 + [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
2734 + [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
2735 + [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
2736 + [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
2737 + [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
2738 + [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
2739 +};
2740 +
2741 +/* DCB capabilities nested attributes. */
2742 +static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
2743 + [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
2744 + [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
2745 + [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
2746 +};
2747 +
2748 +/* DCB BCN nested attributes. */
2749 +static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
2750 + [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
2751 + [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
2752 + [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
2753 + [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
2754 + [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
2755 + [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
2756 + [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
2757 + [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
2758 + [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
2759 + [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
2760 + [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
2761 + [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
2762 + [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
2763 + [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
2764 + [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
2765 + [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
2766 + [DCB_BCN_ATTR_W] = {.type = NLA_U32},
2767 + [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
2768 + [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
2769 + [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
2770 + [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
2771 + [DCB_BCN_ATTR_C] = {.type = NLA_U32},
2772 + [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
2773 +};
2774 +
2775 +/* standard netlink reply call */
2776 +static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
2777 + u32 seq, u16 flags)
2778 +{
2779 + struct sk_buff *dcbnl_skb;
2780 + struct dcbmsg *dcb;
2781 + struct nlmsghdr *nlh;
2782 + int ret = -EINVAL;
2783 +
2784 + dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2785 + if (!dcbnl_skb)
2786 + return ret;
2787 +
2788 + nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
2789 +
2790 + dcb = NLMSG_DATA(nlh);
2791 + dcb->dcb_family = AF_UNSPEC;
2792 + dcb->cmd = cmd;
2793 + dcb->dcb_pad = 0;
2794 +
2795 + ret = nla_put_u8(dcbnl_skb, attr, value);
2796 + if (ret)
2797 + goto err;
2798 +
2799 + /* end the message, assign the nlmsg_len. */
2800 + nlmsg_end(dcbnl_skb, nlh);
2801 + ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
2802 + if (ret)
2803 + goto err;
2804 +
2805 + return 0;
2806 +nlmsg_failure:
2807 +err:
2808 + kfree(dcbnl_skb);
2809 + return ret;
2810 +}
2811 +
2812 +static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
2813 + u32 pid, u32 seq, u16 flags)
2814 +{
2815 + int ret = -EINVAL;
2816 +
2817 + /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
2818 + if (!netdev->dcbnl_ops->getstate)
2819 + return ret;
2820 +
2821 + ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
2822 + DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
2823 +
2824 + return ret;
2825 +}
2826 +
2827 +static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
2828 + u32 pid, u32 seq, u16 flags)
2829 +{
2830 + struct sk_buff *dcbnl_skb;
2831 + struct nlmsghdr *nlh;
2832 + struct dcbmsg *dcb;
2833 + struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
2834 + u8 value;
2835 + int ret = -EINVAL;
2836 + int i;
2837 + int getall = 0;
2838 +
2839 + if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
2840 + return ret;
2841 +
2842 + ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
2843 + tb[DCB_ATTR_PFC_CFG],
2844 + dcbnl_pfc_up_nest);
2845 + if (ret)
2846 + goto err_out;
2847 +
2848 + dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2849 + if (!dcbnl_skb)
2850 + goto err_out;
2851 +
2852 + nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
2853 +
2854 + dcb = NLMSG_DATA(nlh);
2855 + dcb->dcb_family = AF_UNSPEC;
2856 + dcb->cmd = DCB_CMD_PFC_GCFG;
2857 +
2858 + nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
2859 + if (!nest)
2860 + goto err;
2861 +
2862 + if (data[DCB_PFC_UP_ATTR_ALL])
2863 + getall = 1;
2864 +
2865 + for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
2866 + if (!getall && !data[i])
2867 + continue;
2868 +
2869 + netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
2870 + &value);
2871 + ret = nla_put_u8(dcbnl_skb, i, value);
2872 +
2873 + if (ret) {
2874 + nla_nest_cancel(dcbnl_skb, nest);
2875 + goto err;
2876 + }
2877 + }
2878 + nla_nest_end(dcbnl_skb, nest);
2879 +
2880 + nlmsg_end(dcbnl_skb, nlh);
2881 +
2882 + ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
2883 + if (ret)
2884 + goto err;
2885 +
2886 + return 0;
2887 +nlmsg_failure:
2888 +err:
2889 + kfree(dcbnl_skb);
2890 +err_out:
2891 + return -EINVAL;
2892 +}
2893 +
2894 +static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
2895 + u32 pid, u32 seq, u16 flags)
2896 +{
2897 + struct sk_buff *dcbnl_skb;
2898 + struct nlmsghdr *nlh;
2899 + struct dcbmsg *dcb;
2900 + u8 perm_addr[MAX_ADDR_LEN];
2901 + int ret = -EINVAL;
2902 +
2903 + if (!netdev->dcbnl_ops->getpermhwaddr)
2904 + return ret;
2905 +
2906 + dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2907 + if (!dcbnl_skb)
2908 + goto err_out;
2909 +
2910 + nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
2911 +
2912 + dcb = NLMSG_DATA(nlh);
2913 + dcb->dcb_family = AF_UNSPEC;
2914 + dcb->cmd = DCB_CMD_GPERM_HWADDR;
2915 +
2916 + netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
2917 +
2918 + ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
2919 + perm_addr);
2920 +
2921 + nlmsg_end(dcbnl_skb, nlh);
2922 +
2923 + ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
2924 + if (ret)
2925 + goto err;
2926 +
2927 + return 0;
2928 +
2929 +nlmsg_failure:
2930 +err:
2931 + kfree(dcbnl_skb);
2932 +err_out:
2933 + return -EINVAL;
2934 +}
2935 +
2936 +static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
2937 + u32 pid, u32 seq, u16 flags)
2938 +{
2939 + struct sk_buff *dcbnl_skb;
2940 + struct nlmsghdr *nlh;
2941 + struct dcbmsg *dcb;
2942 + struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
2943 + u8 value;
2944 + int ret = -EINVAL;
2945 + int i;
2946 + int getall = 0;
2947 +
2948 + if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
2949 + return ret;
2950 +
2951 + ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
2952 + dcbnl_cap_nest);
2953 + if (ret)
2954 + goto err_out;
2955 +
2956 + dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2957 + if (!dcbnl_skb)
2958 + goto err_out;
2959 +
2960 + nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
2961 +
2962 + dcb = NLMSG_DATA(nlh);
2963 + dcb->dcb_family = AF_UNSPEC;
2964 + dcb->cmd = DCB_CMD_GCAP;
2965 +
2966 + nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
2967 + if (!nest)
2968 + goto err;
2969 +
2970 + if (data[DCB_CAP_ATTR_ALL])
2971 + getall = 1;
2972 +
2973 + for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
2974 + if (!getall && !data[i])
2975 + continue;
2976 +
2977 + if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
2978 + ret = nla_put_u8(dcbnl_skb, i, value);
2979 +
2980 + if (ret) {
2981 + nla_nest_cancel(dcbnl_skb, nest);
2982 + goto err;
2983 + }
2984 + }
2985 + }
2986 + nla_nest_end(dcbnl_skb, nest);
2987 +
2988 + nlmsg_end(dcbnl_skb, nlh);
2989 +
2990 + ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
2991 + if (ret)
2992 + goto err;
2993 +
2994 + return 0;
2995 +nlmsg_failure:
2996 +err:
2997 + kfree(dcbnl_skb);
2998 +err_out:
2999 + return -EINVAL;
3000 +}
3001 +
3002 +static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
3003 + u32 pid, u32 seq, u16 flags)
3004 +{
3005 + struct sk_buff *dcbnl_skb;
3006 + struct nlmsghdr *nlh;
3007 + struct dcbmsg *dcb;
3008 + struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
3009 + u8 value;
3010 + int ret = -EINVAL;
3011 + int i;
3012 + int getall = 0;
3013 +
3014 + if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
3015 + return ret;
3016 +
3017 + ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
3018 + dcbnl_numtcs_nest);
3019 + if (ret) {
3020 + ret = -EINVAL;
3021 + goto err_out;
3022 + }
3023 +
3024 + dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3025 + if (!dcbnl_skb) {
3026 + ret = -EINVAL;
3027 + goto err_out;
3028 + }
3029 +
3030 + nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
3031 +
3032 + dcb = NLMSG_DATA(nlh);
3033 + dcb->dcb_family = AF_UNSPEC;
3034 + dcb->cmd = DCB_CMD_GNUMTCS;
3035 +
3036 + nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
3037 + if (!nest) {
3038 + ret = -EINVAL;
3039 + goto err;
3040 + }
3041 +
3042 + if (data[DCB_NUMTCS_ATTR_ALL])
3043 + getall = 1;
3044 +
3045 + for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
3046 + if (!getall && !data[i])
3047 + continue;
3048 +
3049 + ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
3050 + if (!ret) {
3051 + ret = nla_put_u8(dcbnl_skb, i, value);
3052 +
3053 + if (ret) {
3054 + nla_nest_cancel(dcbnl_skb, nest);
3055 + ret = -EINVAL;
3056 + goto err;
3057 + }
3058 + } else {
3059 + goto err;
3060 + }
3061 + }
3062 + nla_nest_end(dcbnl_skb, nest);
3063 +
3064 + nlmsg_end(dcbnl_skb, nlh);
3065 +
3066 + ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
3067 + if (ret) {
3068 + ret = -EINVAL;
3069 + goto err;
3070 + }
3071 +
3072 + return 0;
3073 +nlmsg_failure:
3074 +err:
3075 + kfree(dcbnl_skb);
3076 +err_out:
3077 + return ret;
3078 +}
3079 +
3080 +static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
3081 + u32 pid, u32 seq, u16 flags)
3082 +{
3083 + struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
3084 + int ret = -EINVAL;
3085 + u8 value;
3086 + u8 status;
3087 + int i;
3088 +
3089 + if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setstate)
3090 + return ret;
3091 +
3092 + ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
3093 + dcbnl_numtcs_nest);
3094 +
3095 + if (ret) {
3096 + ret = -EINVAL;
3097 + goto err;
3098 + }
3099 +
3100 + for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
3101 + if (data[i] == NULL)
3102 + continue;
3103 +
3104 + value = nla_get_u8(data[i]);
3105 +
3106 + ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
3107 +
3108 + if (ret)
3109 + goto err;
3110 + }
3111 +
3112 + value = nla_get_u8(tb[DCB_ATTR_STATE]);
3113 +
3114 + status = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
3115 +
3116 + ret = dcbnl_reply(!!status, RTM_SETDCB, DCB_CMD_SNUMTCS,
3117 + DCB_ATTR_NUMTCS, pid, seq, flags);
3118 +
3119 +err:
3120 + return ret;
3121 +}
3122 +
3123 +static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
3124 + u32 pid, u32 seq, u16 flags)
3125 +{
3126 + int ret = -EINVAL;
3127 +
3128 + if (!netdev->dcbnl_ops->getpfcstate)
3129 + return ret;
3130 +
3131 + ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
3132 + DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
3133 + pid, seq, flags);
3134 +
3135 + return ret;
3136 +}
3137 +
3138 +static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
3139 + u32 pid, u32 seq, u16 flags)
3140 +{
3141 + int ret = -EINVAL;
3142 + u8 value;
3143 +
3144 + if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
3145 + return ret;
3146 +
3147 + value = nla_get_u8(tb[DCB_ATTR_STATE]);
3148 +
3149 + netdev->dcbnl_ops->setstate(netdev, value);
3150 +
3151 + ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
3152 + pid, seq, flags);
3153 +
3154 + return ret;
3155 +}
3156 +
3157 +static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
3158 + u32 pid, u32 seq, u16 flags, int dir)
3159 +{
3160 + struct sk_buff *dcbnl_skb;
3161 + struct nlmsghdr *nlh;
3162 + struct dcbmsg *dcb;
3163 + struct nlattr *pg_nest, *param_nest, *data;
3164 + struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
3165 + struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
3166 + u8 prio, pgid, tc_pct, up_map;
3167 + int ret = -EINVAL;
3168 + int getall = 0;
3169 + int i;
3170 +
3171 + if (!tb[DCB_ATTR_PG_CFG] ||
3172 + !netdev->dcbnl_ops->getpgtccfgtx ||
3173 + !netdev->dcbnl_ops->getpgtccfgrx ||
3174 + !netdev->dcbnl_ops->getpgbwgcfgtx ||
3175 + !netdev->dcbnl_ops->getpgbwgcfgrx)
3176 + return ret;
3177 +
3178 + ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
3179 + tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
3180 +
3181 + if (ret)
3182 + goto err_out;
3183 +
3184 + dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3185 + if (!dcbnl_skb)
3186 + goto err_out;
3187 +
3188 + nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
3189 +
3190 + dcb = NLMSG_DATA(nlh);
3191 + dcb->dcb_family = AF_UNSPEC;
3192 + dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
3193 +
3194 + pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
3195 + if (!pg_nest)
3196 + goto err;
3197 +
3198 + if (pg_tb[DCB_PG_ATTR_TC_ALL])
3199 + getall = 1;
3200 +
3201 + for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
3202 + if (!getall && !pg_tb[i])
3203 + continue;
3204 +
3205 + if (pg_tb[DCB_PG_ATTR_TC_ALL])
3206 + data = pg_tb[DCB_PG_ATTR_TC_ALL];
3207 + else
3208 + data = pg_tb[i];
3209 + ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
3210 + data, dcbnl_tc_param_nest);
3211 + if (ret)
3212 + goto err_pg;
3213 +
3214 + param_nest = nla_nest_start(dcbnl_skb, i);
3215 + if (!param_nest)
3216 + goto err_pg;
3217 +
3218 + pgid = DCB_ATTR_VALUE_UNDEFINED;
3219 + prio = DCB_ATTR_VALUE_UNDEFINED;
3220 + tc_pct = DCB_ATTR_VALUE_UNDEFINED;
3221 + up_map = DCB_ATTR_VALUE_UNDEFINED;
3222 +
3223 + if (dir) {
3224 + /* Rx */
3225 + netdev->dcbnl_ops->getpgtccfgrx(netdev,
3226 + i - DCB_PG_ATTR_TC_0, &prio,
3227 + &pgid, &tc_pct, &up_map);
3228 + } else {
3229 + /* Tx */
3230 + netdev->dcbnl_ops->getpgtccfgtx(netdev,
3231 + i - DCB_PG_ATTR_TC_0, &prio,
3232 + &pgid, &tc_pct, &up_map);
3233 + }
3234 +
3235 + if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
3236 + param_tb[DCB_TC_ATTR_PARAM_ALL]) {
3237 + ret = nla_put_u8(dcbnl_skb,
3238 + DCB_TC_ATTR_PARAM_PGID, pgid);
3239 + if (ret)
3240 + goto err_param;
3241 + }
3242 + if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
3243 + param_tb[DCB_TC_ATTR_PARAM_ALL]) {
3244 + ret = nla_put_u8(dcbnl_skb,
3245 + DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
3246 + if (ret)
3247 + goto err_param;
3248 + }
3249 + if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
3250 + param_tb[DCB_TC_ATTR_PARAM_ALL]) {
3251 + ret = nla_put_u8(dcbnl_skb,
3252 + DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
3253 + if (ret)
3254 + goto err_param;
3255 + }
3256 + if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
3257 + param_tb[DCB_TC_ATTR_PARAM_ALL]) {
3258 + ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
3259 + tc_pct);
3260 + if (ret)
3261 + goto err_param;
3262 + }
3263 + nla_nest_end(dcbnl_skb, param_nest);
3264 + }
3265 +
3266 + if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
3267 + getall = 1;
3268 + else
3269 + getall = 0;
3270 +
3271 + for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
3272 + if (!getall && !pg_tb[i])
3273 + continue;
3274 +
3275 + tc_pct = DCB_ATTR_VALUE_UNDEFINED;
3276 +
3277 + if (dir) {
3278 + /* Rx */
3279 + netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
3280 + i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
3281 + } else {
3282 + /* Tx */
3283 + netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
3284 + i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
3285 + }
3286 + ret = nla_put_u8(dcbnl_skb, i, tc_pct);
3287 +
3288 + if (ret)
3289 + goto err_pg;
3290 + }
3291 +
3292 + nla_nest_end(dcbnl_skb, pg_nest);
3293 +
3294 + nlmsg_end(dcbnl_skb, nlh);
3295 +
3296 + ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
3297 + if (ret)
3298 + goto err;
3299 +
3300 + return 0;
3301 +
3302 +err_param:
3303 + nla_nest_cancel(dcbnl_skb, param_nest);
3304 +err_pg:
3305 + nla_nest_cancel(dcbnl_skb, pg_nest);
3306 +nlmsg_failure:
3307 +err:
3308 + kfree(dcbnl_skb);
3309 +err_out:
3310 + ret = -EINVAL;
3311 + return ret;
3312 +}
3313 +
3314 +static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
3315 + u32 pid, u32 seq, u16 flags)
3316 +{
3317 + return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
3318 +}
3319 +
3320 +static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
3321 + u32 pid, u32 seq, u16 flags)
3322 +{
3323 + return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
3324 +}
3325 +
3326 +static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
3327 + u32 pid, u32 seq, u16 flags)
3328 +{
3329 + int ret = -EINVAL;
3330 + u8 value;
3331 +
3332 + if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
3333 + return ret;
3334 +
3335 + value = nla_get_u8(tb[DCB_ATTR_STATE]);
3336 +
3337 + netdev->dcbnl_ops->setstate(netdev, value);
3338 +
3339 + ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
3340 + pid, seq, flags);
3341 +
3342 + return ret;
3343 +}
3344 +
3345 +static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
3346 + u32 pid, u32 seq, u16 flags)
3347 +{
3348 + struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
3349 + int i;
3350 + int ret = -EINVAL;
3351 + u8 value;
3352 +
3353 + if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
3354 + return ret;
3355 +
3356 + ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
3357 + tb[DCB_ATTR_PFC_CFG],
3358 + dcbnl_pfc_up_nest);
3359 + if (ret)
3360 + goto err;
3361 +
3362 + for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
3363 + if (data[i] == NULL)
3364 + continue;
3365 + value = nla_get_u8(data[i]);
3366 + netdev->dcbnl_ops->setpfccfg(netdev,
3367 + data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
3368 + }
3369 +
3370 + ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
3371 + pid, seq, flags);
3372 +err:
3373 + return ret;
3374 +}
3375 +
3376 +static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
3377 + u32 pid, u32 seq, u16 flags)
3378 +{
3379 + int ret = -EINVAL;
3380 +
3381 + if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
3382 + return ret;
3383 +
3384 + ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
3385 + DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
3386 +
3387 + return ret;
3388 +}
3389 +
3390 +static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
3391 + u32 pid, u32 seq, u16 flags, int dir)
3392 +{
3393 + struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
3394 + struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
3395 + int ret = -EINVAL;
3396 + int i;
3397 + u8 pgid;
3398 + u8 up_map;
3399 + u8 prio;
3400 + u8 tc_pct;
3401 +
3402 + if (!tb[DCB_ATTR_PG_CFG] ||
3403 + !netdev->dcbnl_ops->setpgtccfgtx ||
3404 + !netdev->dcbnl_ops->setpgtccfgrx ||
3405 + !netdev->dcbnl_ops->setpgbwgcfgtx ||
3406 + !netdev->dcbnl_ops->setpgbwgcfgrx)
3407 + return ret;
3408 +
3409 + ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
3410 + tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
3411 + if (ret)
3412 + goto err;
3413 +
3414 + for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
3415 + if (!pg_tb[i])
3416 + continue;
3417 +
3418 + ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
3419 + pg_tb[i], dcbnl_tc_param_nest);
3420 + if (ret)
3421 + goto err;
3422 +
3423 + pgid = DCB_ATTR_VALUE_UNDEFINED;
3424 + prio = DCB_ATTR_VALUE_UNDEFINED;
3425 + tc_pct = DCB_ATTR_VALUE_UNDEFINED;
3426 + up_map = DCB_ATTR_VALUE_UNDEFINED;
3427 +
3428 + if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
3429 + prio =
3430 + nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
3431 +
3432 + if (param_tb[DCB_TC_ATTR_PARAM_PGID])
3433 + pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
3434 +
3435 + if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
3436 + tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
3437 +
3438 + if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
3439 + up_map =
3440 + nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
3441 +
3442 + /* dir: Tx = 0, Rx = 1 */
3443 + if (dir) {
3444 + /* Rx */
3445 + netdev->dcbnl_ops->setpgtccfgrx(netdev,
3446 + i - DCB_PG_ATTR_TC_0,
3447 + prio, pgid, tc_pct, up_map);
3448 + } else {
3449 + /* Tx */
3450 + netdev->dcbnl_ops->setpgtccfgtx(netdev,
3451 + i - DCB_PG_ATTR_TC_0,
3452 + prio, pgid, tc_pct, up_map);
3453 + }
3454 + }
3455 +
3456 + for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
3457 + if (!pg_tb[i])
3458 + continue;
3459 +
3460 + tc_pct = nla_get_u8(pg_tb[i]);
3461 +
3462 + /* dir: Tx = 0, Rx = 1 */
3463 + if (dir) {
3464 + /* Rx */
3465 + netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
3466 + i - DCB_PG_ATTR_BW_ID_0, tc_pct);
3467 + } else {
3468 + /* Tx */
3469 + netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
3470 + i - DCB_PG_ATTR_BW_ID_0, tc_pct);
3471 + }
3472 + }
3473 +
3474 + ret = dcbnl_reply(0, RTM_SETDCB,
3475 + (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
3476 + DCB_ATTR_PG_CFG, pid, seq, flags);
3477 +
3478 +err:
3479 + return ret;
3480 +}
3481 +
3482 +static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
3483 + u32 pid, u32 seq, u16 flags)
3484 +{
3485 + return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
3486 +}
3487 +
3488 +static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
3489 + u32 pid, u32 seq, u16 flags)
3490 +{
3491 + return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
3492 +}
3493 +
3494 +static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
3495 + u32 pid, u32 seq, u16 flags)
3496 +{
3497 + struct sk_buff *dcbnl_skb;
3498 + struct nlmsghdr *nlh;
3499 + struct dcbmsg *dcb;
3500 + struct nlattr *bcn_nest;
3501 + struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
3502 + u8 value_byte;
3503 + u32 value_integer;
3504 + int ret = -EINVAL;
3505 + bool getall = false;
3506 + int i;
3507 +
3508 + if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
3509 + !netdev->dcbnl_ops->getbcncfg)
3510 + return ret;
3511 +
3512 + ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
3513 + tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
3514 +
3515 + if (ret)
3516 + goto err_out;
3517 +
3518 + dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3519 + if (!dcbnl_skb)
3520 + goto err_out;
3521 +
3522 + nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
3523 +
3524 + dcb = NLMSG_DATA(nlh);
3525 + dcb->dcb_family = AF_UNSPEC;
3526 + dcb->cmd = DCB_CMD_BCN_GCFG;
3527 +
3528 + bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
3529 + if (!bcn_nest)
3530 + goto err;
3531 +
3532 + if (bcn_tb[DCB_BCN_ATTR_ALL])
3533 + getall = true;
3534 +
3535 + for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
3536 + if (!getall && !bcn_tb[i])
3537 + continue;
3538 +
3539 + netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
3540 + &value_byte);
3541 + ret = nla_put_u8(dcbnl_skb, i, value_byte);
3542 + if (ret)
3543 + goto err_bcn;
3544 + }
3545 +
3546 + for (i = DCB_BCN_ATTR_ALPHA; i <= DCB_BCN_ATTR_RI; i++) {
3547 + if (!getall && !bcn_tb[i])
3548 + continue;
3549 +
3550 + netdev->dcbnl_ops->getbcncfg(netdev, i,
3551 + &value_integer);
3552 + ret = nla_put_u32(dcbnl_skb, i, value_integer);
3553 + if (ret)
3554 + goto err_bcn;
3555 + }
3556 +
3557 + nla_nest_end(dcbnl_skb, bcn_nest);
3558 +
3559 + nlmsg_end(dcbnl_skb, nlh);
3560 +
3561 + ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
3562 + if (ret)
3563 + goto err;
3564 +
3565 + return 0;
3566 +
3567 +err_bcn:
3568 + nla_nest_cancel(dcbnl_skb, bcn_nest);
3569 +nlmsg_failure:
3570 +err:
3571 + kfree(dcbnl_skb);
3572 +err_out:
3573 + ret = -EINVAL;
3574 + return ret;
3575 +}
3576 +
3577 +static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
3578 + u32 pid, u32 seq, u16 flags)
3579 +{
3580 + struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
3581 + int i;
3582 + int ret = -EINVAL;
3583 + u8 value_byte;
3584 + u32 value_int;
3585 +
3586 + if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg
3587 + || !netdev->dcbnl_ops->setbcnrp)
3588 + return ret;
3589 +
3590 + ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
3591 + tb[DCB_ATTR_BCN],
3592 + dcbnl_pfc_up_nest);
3593 + if (ret)
3594 + goto err;
3595 +
3596 + for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
3597 + if (data[i] == NULL)
3598 + continue;
3599 + value_byte = nla_get_u8(data[i]);
3600 + netdev->dcbnl_ops->setbcnrp(netdev,
3601 + data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
3602 + }
3603 +
3604 + for (i = DCB_BCN_ATTR_ALPHA; i <= DCB_BCN_ATTR_RI; i++) {
3605 + if (data[i] == NULL)
3606 + continue;
3607 + value_int = nla_get_u32(data[i]);
3608 + netdev->dcbnl_ops->setbcncfg(netdev,
3609 + i, value_int);
3610 + }
3611 +
3612 + ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
3613 + pid, seq, flags);
3614 +err:
3615 + return ret;
3616 +}
3617 +
3618 +static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3619 +{
3620 + struct net *net = sock_net(skb->sk);
3621 + struct net_device *netdev;
3622 + struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
3623 + struct nlattr *tb[DCB_ATTR_MAX + 1];
3624 + u32 pid = skb ? NETLINK_CB(skb).pid : 0;
3625 + int ret = -EINVAL;
3626 +
3627 + if (net != &init_net)
3628 + return -EINVAL;
3629 +
3630 + ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
3631 + dcbnl_rtnl_policy);
3632 + if (ret < 0)
3633 + return ret;
3634 +
3635 + if (!tb[DCB_ATTR_IFNAME])
3636 + return -EINVAL;
3637 +
3638 + netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
3639 + if (!netdev)
3640 + return -EINVAL;
3641 +
3642 + if (!netdev->dcbnl_ops)
3643 + goto errout;
3644 +
3645 + switch (dcb->cmd) {
3646 + case DCB_CMD_GSTATE:
3647 + ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
3648 + nlh->nlmsg_flags);
3649 + goto out;
3650 + case DCB_CMD_PFC_GCFG:
3651 + ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
3652 + nlh->nlmsg_flags);
3653 + goto out;
3654 + case DCB_CMD_GPERM_HWADDR:
3655 + ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
3656 + nlh->nlmsg_flags);
3657 + goto out;
3658 + case DCB_CMD_PGTX_GCFG:
3659 + ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
3660 + nlh->nlmsg_flags);
3661 + goto out;
3662 + case DCB_CMD_PGRX_GCFG:
3663 + ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
3664 + nlh->nlmsg_flags);
3665 + goto out;
3666 + case DCB_CMD_BCN_GCFG:
3667 + ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
3668 + nlh->nlmsg_flags);
3669 + goto out;
3670 + case DCB_CMD_SSTATE:
3671 + ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
3672 + nlh->nlmsg_flags);
3673 + goto out;
3674 + case DCB_CMD_PFC_SCFG:
3675 + ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
3676 + nlh->nlmsg_flags);
3677 + goto out;
3678 +
3679 + case DCB_CMD_SET_ALL:
3680 + ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
3681 + nlh->nlmsg_flags);
3682 + goto out;
3683 + case DCB_CMD_PGTX_SCFG:
3684 + ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
3685 + nlh->nlmsg_flags);
3686 + goto out;
3687 + case DCB_CMD_PGRX_SCFG:
3688 + ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
3689 + nlh->nlmsg_flags);
3690 + goto out;
3691 + case DCB_CMD_GCAP:
3692 + ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
3693 + nlh->nlmsg_flags);
3694 + goto out;
3695 + case DCB_CMD_GNUMTCS:
3696 + ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
3697 + nlh->nlmsg_flags);
3698 + goto out;
3699 + case DCB_CMD_SNUMTCS:
3700 + ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
3701 + nlh->nlmsg_flags);
3702 + goto out;
3703 + case DCB_CMD_PFC_GSTATE:
3704 + ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
3705 + nlh->nlmsg_flags);
3706 + goto out;
3707 + case DCB_CMD_PFC_SSTATE:
3708 + ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
3709 + nlh->nlmsg_flags);
3710 + goto out;
3711 + case DCB_CMD_BCN_SCFG:
3712 + ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
3713 + nlh->nlmsg_flags);
3714 + goto out;
3715 + default:
3716 + goto errout;
3717 + }
3718 +errout:
3719 + ret = -EINVAL;
3720 +out:
3721 + dev_put(netdev);
3722 + return ret;
3723 +}
3724 +
3725 +static int __init dcbnl_init(void)
3726 +{
3727 + rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
3728 + rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
3729 +
3730 + return 0;
3731 +}
3732 +module_init(dcbnl_init);
3733 +
3734 +static void __exit dcbnl_exit(void)
3735 +{
3736 + rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
3737 + rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
3738 +}
3739 +module_exit(dcbnl_exit);
3740 +
3741 +
3742 --- /dev/null
3743 +++ b/net/dcb/Kconfig
3744 @@ -0,0 +1,12 @@
3745 +config DCB
3746 + tristate "Data Center Bridging support"
3747 +
3748 +config DCBNL
3749 + bool "Data Center Bridging netlink interface support"
3750 + depends on DCB
3751 + default n
3752 + ---help---
3753 + This option turns on the netlink interface
3754 + (dcbnl) for Data Center Bridging capable devices.
3755 +
3756 + If unsure, say N.
3757 --- /dev/null
3758 +++ b/net/dcb/Makefile
3759 @@ -0,0 +1 @@
3760 +obj-$(CONFIG_DCB) += dcbnl.o
3761 --- a/net/Kconfig
3762 +++ b/net/Kconfig
3763 @@ -190,6 +190,7 @@ source "net/lapb/Kconfig"
3764 source "net/econet/Kconfig"
3765 source "net/wanrouter/Kconfig"
3766 source "net/sched/Kconfig"
3767 +source "net/dcb/Kconfig"
3768
3769 menu "Network testing"
3770
3771 --- a/net/Makefile
3772 +++ b/net/Makefile
3773 @@ -55,6 +55,9 @@ obj-$(CONFIG_NETLABEL) += netlabel/
3774 obj-$(CONFIG_IUCV) += iucv/
3775 obj-$(CONFIG_RFKILL) += rfkill/
3776 obj-$(CONFIG_NET_9P) += 9p/
3777 +ifeq ($(CONFIG_DCBNL),y)
3778 +obj-$(CONFIG_DCB) += dcb/
3779 +endif
3780
3781 ifeq ($(CONFIG_NET),y)
3782 obj-$(CONFIG_SYSCTL) += sysctl_net.o