]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.drivers/staging-add-serverengines-benet-10gb-ethernet-driver.patch
Fix oinkmaster patch.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.drivers / staging-add-serverengines-benet-10gb-ethernet-driver.patch
CommitLineData
2cb7cef9
BS
1From 225bdeadbbf71d061cf69bc924f92e8c01540001 Mon Sep 17 00:00:00 2001
2From: Subbu Seetharaman <subbus@serverengines.com>
3Date: Sun, 2 Nov 2008 08:09:57 -0500
4Subject: Staging: Add ServerEngines benet 10Gb ethernet driver
5Patch-mainline: 2.6.29
6
7From: Subbu Seetharaman <subbus@serverengines.com>
8
9Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
10Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
11
12---
13 drivers/staging/Kconfig | 2
14 drivers/staging/Makefile | 1
15 drivers/staging/benet/Kconfig | 7
16 drivers/staging/benet/MAINTAINERS | 6
17 drivers/staging/benet/Makefile | 14
18 drivers/staging/benet/TODO | 7
19 drivers/staging/benet/asyncmesg.h | 98 ++
20 drivers/staging/benet/be_cm.h | 134 ++
21 drivers/staging/benet/be_common.h | 53 +
22 drivers/staging/benet/be_ethtool.c | 348 +++++++
23 drivers/staging/benet/be_init.c | 1381 ++++++++++++++++++++++++++++++
24 drivers/staging/benet/be_int.c | 872 ++++++++++++++++++
25 drivers/staging/benet/be_netif.c | 706 +++++++++++++++
26 drivers/staging/benet/benet.h | 429 +++++++++
27 drivers/staging/benet/bestatus.h | 103 ++
28 drivers/staging/benet/cev.h | 243 +++++
29 drivers/staging/benet/cq.c | 211 ++++
30 drivers/staging/benet/descriptors.h | 71 +
31 drivers/staging/benet/doorbells.h | 179 +++
32 drivers/staging/benet/ep.h | 66 +
33 drivers/staging/benet/eq.c | 299 ++++++
34 drivers/staging/benet/eth.c | 1273 +++++++++++++++++++++++++++
35 drivers/staging/benet/etx_context.h | 55 +
36 drivers/staging/benet/funcobj.c | 565 ++++++++++++
37 drivers/staging/benet/fwcmd_common.h | 222 ++++
38 drivers/staging/benet/fwcmd_common_bmap.h | 717 +++++++++++++++
39 drivers/staging/benet/fwcmd_eth_bmap.h | 280 ++++++
40 drivers/staging/benet/fwcmd_hdr_bmap.h | 54 +
41 drivers/staging/benet/fwcmd_mcc.h | 94 ++
42 drivers/staging/benet/fwcmd_opcodes.h | 244 +++++
43 drivers/staging/benet/fwcmd_types_bmap.h | 29
44 drivers/staging/benet/host_struct.h | 182 +++
45 drivers/staging/benet/hwlib.h | 829 ++++++++++++++++++
46 drivers/staging/benet/mpu.c | 1364 +++++++++++++++++++++++++++++
47 drivers/staging/benet/mpu.h | 74 +
48 drivers/staging/benet/mpu_context.h | 46
49 drivers/staging/benet/pcicfg.h | 825 +++++++++++++++++
50 drivers/staging/benet/post_codes.h | 111 ++
51 drivers/staging/benet/regmap.h | 68 +
52 39 files changed, 12262 insertions(+)
53
54--- /dev/null
55+++ b/drivers/staging/benet/asyncmesg.h
56@@ -0,0 +1,98 @@
57+/*
58+ * Copyright (C) 2005 - 2008 ServerEngines
59+ * All rights reserved.
60+ *
61+ * This program is free software; you can redistribute it and/or
62+ * modify it under the terms of the GNU General Public License version 2
63+ * as published by the Free Software Foundation. The full GNU General
64+ * Public License is included in this distribution in the file called COPYING.
65+ *
66+ * Contact Information:
67+ * linux-drivers@serverengines.com
68+ *
69+ * ServerEngines
70+ * 209 N. Fair Oaks Ave
71+ * Sunnyvale, CA 94085
72+ */
73+/*
74+ * Autogenerated by srcgen version: 0127
75+ */
76+#ifndef __asyncmesg_amap_h__
77+#define __asyncmesg_amap_h__
78+#include "fwcmd_common.h"
79+
80+/* --- ASYNC_EVENT_CODES --- */
81+#define ASYNC_EVENT_CODE_LINK_STATE (1)
82+#define ASYNC_EVENT_CODE_ISCSI (2)
83+
84+/* --- ASYNC_LINK_STATES --- */
85+#define ASYNC_EVENT_LINK_DOWN (0) /* Link Down on a port */
86+#define ASYNC_EVENT_LINK_UP (1) /* Link Up on a port */
87+
88+/*
89+ * The last 4 bytes of the async events have this common format. It allows
90+ * the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from
91+ * asynchronous events. Both arrive on the same completion queue. This
92+ * structure also contains the common fields used to decode the async event.
93+ */
94+struct BE_ASYNC_EVENT_TRAILER_AMAP {
95+ u8 rsvd0[8]; /* DWORD 0 */
96+ u8 event_code[8]; /* DWORD 0 */
97+ u8 event_type[8]; /* DWORD 0 */
98+ u8 rsvd1[6]; /* DWORD 0 */
99+ u8 async_event; /* DWORD 0 */
100+ u8 valid; /* DWORD 0 */
101+} __packed;
102+struct ASYNC_EVENT_TRAILER_AMAP {
103+ u32 dw[1];
104+};
105+
106+/*
107+ * Applicable in Initiator, Target and NIC modes.
108+ * A link state async event is seen by all device drivers as soon they
109+ * create an MCC ring. Thereafter, anytime the link status changes the
110+ * drivers will receive a link state async event. Notifications continue to
111+ * be sent until a driver destroys its MCC ring. A link down event is
112+ * reported when either port loses link. A link up event is reported
113+ * when either port regains link. When BE's failover mechanism is enabled, a
114+ * link down on the active port causes traffic to be diverted to the standby
115+ * port by the BE's ARM firmware (assuming the standby port has link). In
116+ * this case, the standy port assumes the active status. Note: when link is
117+ * restored on the failed port, traffic continues on the currently active
118+ * port. The ARM firmware does not attempt to 'fail back' traffic to
119+ * the restored port.
120+ */
121+#if 0
122+struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
123+ struct BE_UEXACT8_AMAP port0_link_status;
124+ struct BE_UEXACT8_AMAP port1_link_status;
125+ struct BE_UEXACT8_AMAP active_port;
126+ u8 rsvd0[8]; /* DWORD 0 */
127+ struct BE_UEXACT8_AMAP port0_duplex;
128+ struct BE_UEXACT8_AMAP port0_speed;
129+ struct BE_UEXACT8_AMAP port1_duplex;
130+ struct BE_UEXACT8_AMAP port1_speed;
131+ struct BE_UEXACT8_AMAP port0_fault;
132+ struct BE_UEXACT8_AMAP port1_fault;
133+ u8 rsvd1[2][8]; /* DWORD 2 */
134+ struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
135+} __packed;
136+#endif
137+struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
138+ u8 port0_link_status[8];
139+ u8 port1_link_status[8];
140+ u8 active_port[8];
141+ u8 rsvd0[8]; /* DWORD 0 */
142+ u8 port0_duplex[8];
143+ u8 port0_speed[8];
144+ u8 port1_duplex[8];
145+ u8 port1_speed[8];
146+ u8 port0_fault[8];
147+ u8 port1_fault[8];
148+ u8 rsvd1[2][8]; /* DWORD 2 */
149+ struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
150+} __packed;
151+struct ASYNC_EVENT_LINK_STATE_AMAP {
152+ u32 dw[4];
153+};
154+#endif /* __asyncmesg_amap_h__ */
155--- /dev/null
156+++ b/drivers/staging/benet/be_cm.h
157@@ -0,0 +1,134 @@
158+/*
159+ * Copyright (C) 2005 - 2008 ServerEngines
160+ * All rights reserved.
161+ *
162+ * This program is free software; you can redistribute it and/or
163+ * modify it under the terms of the GNU General Public License version 2
164+ * as published by the Free Software Foundation. The full GNU General
165+ * Public License is included in this distribution in the file called COPYING.
166+ *
167+ * Contact Information:
168+ * linux-drivers@serverengines.com
169+ *
170+ * ServerEngines
171+ * 209 N. Fair Oaks Ave
172+ * Sunnyvale, CA 94085
173+ */
174+/*
175+ * Autogenerated by srcgen version: 0127
176+ */
177+#ifndef __be_cm_amap_h__
178+#define __be_cm_amap_h__
179+#include "be_common.h"
180+#include "etx_context.h"
181+#include "mpu_context.h"
182+
183+/*
184+ * --- CEV_WATERMARK_ENUM ---
185+ * CQ/EQ Watermark Encodings. Encoded as number of free entries in
186+ * Queue when Watermark is reached.
187+ */
188+#define CEV_WMARK_0 (0) /* Watermark when Queue full */
189+#define CEV_WMARK_16 (1) /* Watermark at 16 free entries */
190+#define CEV_WMARK_32 (2) /* Watermark at 32 free entries */
191+#define CEV_WMARK_48 (3) /* Watermark at 48 free entries */
192+#define CEV_WMARK_64 (4) /* Watermark at 64 free entries */
193+#define CEV_WMARK_80 (5) /* Watermark at 80 free entries */
194+#define CEV_WMARK_96 (6) /* Watermark at 96 free entries */
195+#define CEV_WMARK_112 (7) /* Watermark at 112 free entries */
196+#define CEV_WMARK_128 (8) /* Watermark at 128 free entries */
197+#define CEV_WMARK_144 (9) /* Watermark at 144 free entries */
198+#define CEV_WMARK_160 (10) /* Watermark at 160 free entries */
199+#define CEV_WMARK_176 (11) /* Watermark at 176 free entries */
200+#define CEV_WMARK_192 (12) /* Watermark at 192 free entries */
201+#define CEV_WMARK_208 (13) /* Watermark at 208 free entries */
202+#define CEV_WMARK_224 (14) /* Watermark at 224 free entries */
203+#define CEV_WMARK_240 (15) /* Watermark at 240 free entries */
204+
205+/*
206+ * --- CQ_CNT_ENUM ---
207+ * Completion Queue Count Encodings.
208+ */
209+#define CEV_CQ_CNT_256 (0) /* CQ has 256 entries */
210+#define CEV_CQ_CNT_512 (1) /* CQ has 512 entries */
211+#define CEV_CQ_CNT_1024 (2) /* CQ has 1024 entries */
212+
213+/*
214+ * --- EQ_CNT_ENUM ---
215+ * Event Queue Count Encodings.
216+ */
217+#define CEV_EQ_CNT_256 (0) /* EQ has 256 entries (16-byte EQEs only) */
218+#define CEV_EQ_CNT_512 (1) /* EQ has 512 entries (16-byte EQEs only) */
219+#define CEV_EQ_CNT_1024 (2) /* EQ has 1024 entries (4-byte or */
220+ /* 16-byte EQEs only) */
221+#define CEV_EQ_CNT_2048 (3) /* EQ has 2048 entries (4-byte or */
222+ /* 16-byte EQEs only) */
223+#define CEV_EQ_CNT_4096 (4) /* EQ has 4096 entries (4-byte EQEs only) */
224+
225+/*
226+ * --- EQ_SIZE_ENUM ---
227+ * Event Queue Entry Size Encoding.
228+ */
229+#define CEV_EQ_SIZE_4 (0) /* EQE is 4 bytes */
230+#define CEV_EQ_SIZE_16 (1) /* EQE is 16 bytes */
231+
232+/*
233+ * Completion Queue Context Table Entry. Contains the state of a CQ.
234+ * Located in RAM within the CEV block.
235+ */
236+struct BE_CQ_CONTEXT_AMAP {
237+ u8 Cidx[11]; /* DWORD 0 */
238+ u8 Watermark[4]; /* DWORD 0 */
239+ u8 NoDelay; /* DWORD 0 */
240+ u8 EPIdx[11]; /* DWORD 0 */
241+ u8 Count[2]; /* DWORD 0 */
242+ u8 valid; /* DWORD 0 */
243+ u8 SolEvent; /* DWORD 0 */
244+ u8 Eventable; /* DWORD 0 */
245+ u8 Pidx[11]; /* DWORD 1 */
246+ u8 PD[10]; /* DWORD 1 */
247+ u8 EQID[7]; /* DWORD 1 */
248+ u8 Func; /* DWORD 1 */
249+ u8 WME; /* DWORD 1 */
250+ u8 Stalled; /* DWORD 1 */
251+ u8 Armed; /* DWORD 1 */
252+} __packed;
253+struct CQ_CONTEXT_AMAP {
254+ u32 dw[2];
255+};
256+
257+/*
258+ * Event Queue Context Table Entry. Contains the state of an EQ.
259+ * Located in RAM in the CEV block.
260+ */
261+struct BE_EQ_CONTEXT_AMAP {
262+ u8 Cidx[13]; /* DWORD 0 */
263+ u8 rsvd0[2]; /* DWORD 0 */
264+ u8 Func; /* DWORD 0 */
265+ u8 EPIdx[13]; /* DWORD 0 */
266+ u8 valid; /* DWORD 0 */
267+ u8 rsvd1; /* DWORD 0 */
268+ u8 Size; /* DWORD 0 */
269+ u8 Pidx[13]; /* DWORD 1 */
270+ u8 rsvd2[3]; /* DWORD 1 */
271+ u8 PD[10]; /* DWORD 1 */
272+ u8 Count[3]; /* DWORD 1 */
273+ u8 SolEvent; /* DWORD 1 */
274+ u8 Stalled; /* DWORD 1 */
275+ u8 Armed; /* DWORD 1 */
276+ u8 Watermark[4]; /* DWORD 2 */
277+ u8 WME; /* DWORD 2 */
278+ u8 rsvd3[3]; /* DWORD 2 */
279+ u8 EventVect[6]; /* DWORD 2 */
280+ u8 rsvd4[2]; /* DWORD 2 */
281+ u8 Delay[8]; /* DWORD 2 */
282+ u8 rsvd5[6]; /* DWORD 2 */
283+ u8 TMR; /* DWORD 2 */
284+ u8 rsvd6; /* DWORD 2 */
285+ u8 rsvd7[32]; /* DWORD 3 */
286+} __packed;
287+struct EQ_CONTEXT_AMAP {
288+ u32 dw[4];
289+};
290+
291+#endif /* __be_cm_amap_h__ */
292--- /dev/null
293+++ b/drivers/staging/benet/be_common.h
294@@ -0,0 +1,53 @@
295+/*
296+ * Copyright (C) 2005 - 2008 ServerEngines
297+ * All rights reserved.
298+ *
299+ * This program is free software; you can redistribute it and/or
300+ * modify it under the terms of the GNU General Public License version 2
301+ * as published by the Free Software Foundation. The full GNU General
302+ * Public License is included in this distribution in the file called COPYING.
303+ *
304+ * Contact Information:
305+ * linux-drivers@serverengines.com
306+ *
307+ * ServerEngines
308+ * 209 N. Fair Oaks Ave
309+ * Sunnyvale, CA 94085
310+ */
311+/*
312+ * Autogenerated by srcgen version: 0127
313+ */
314+#ifndef __be_common_amap_h__
315+#define __be_common_amap_h__
316+
317+/* Physical Address. */
318+struct BE_PHYS_ADDR_AMAP {
319+ u8 lo[32]; /* DWORD 0 */
320+ u8 hi[32]; /* DWORD 1 */
321+} __packed;
322+struct PHYS_ADDR_AMAP {
323+ u32 dw[2];
324+};
325+
326+/* Virtual Address. */
327+struct BE_VIRT_ADDR_AMAP {
328+ u8 lo[32]; /* DWORD 0 */
329+ u8 hi[32]; /* DWORD 1 */
330+} __packed;
331+struct VIRT_ADDR_AMAP {
332+ u32 dw[2];
333+};
334+
335+/* Scatter gather element. */
336+struct BE_SGE_AMAP {
337+ u8 addr_hi[32]; /* DWORD 0 */
338+ u8 addr_lo[32]; /* DWORD 1 */
339+ u8 rsvd0[32]; /* DWORD 2 */
340+ u8 len[16]; /* DWORD 3 */
341+ u8 rsvd1[16]; /* DWORD 3 */
342+} __packed;
343+struct SGE_AMAP {
344+ u32 dw[4];
345+};
346+
347+#endif /* __be_common_amap_h__ */
348--- /dev/null
349+++ b/drivers/staging/benet/be_ethtool.c
350@@ -0,0 +1,348 @@
351+/*
352+ * Copyright (C) 2005 - 2008 ServerEngines
353+ * All rights reserved.
354+ *
355+ * This program is free software; you can redistribute it and/or
356+ * modify it under the terms of the GNU General Public License version 2
357+ * as published by the Free Software Foundation. The full GNU General
358+ * Public License is included in this distribution in the file called COPYING.
359+ *
360+ * Contact Information:
361+ * linux-drivers@serverengines.com
362+ *
363+ * ServerEngines
364+ * 209 N. Fair Oaks Ave
365+ * Sunnyvale, CA 94085
366+ */
367+/*
368+ * be_ethtool.c
369+ *
370+ * This file contains various functions that ethtool can use
371+ * to talk to the driver and the BE H/W.
372+ */
373+
374+#include "benet.h"
375+
376+#include <linux/ethtool.h>
377+
378+static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = {
379+/* net_device_stats */
380+ "rx_packets",
381+ "tx_packets",
382+ "rx_bytes",
383+ "tx_bytes",
384+ "rx_errors",
385+ "tx_errors",
386+ "rx_dropped",
387+ "tx_dropped",
388+ "multicast",
389+ "collisions",
390+ "rx_length_errors",
391+ "rx_over_errors",
392+ "rx_crc_errors",
393+ "rx_frame_errors",
394+ "rx_fifo_errors",
395+ "rx_missed_errors",
396+ "tx_aborted_errors",
397+ "tx_carrier_errors",
398+ "tx_fifo_errors",
399+ "tx_heartbeat_errors",
400+ "tx_window_errors",
401+ "rx_compressed",
402+ "tc_compressed",
403+/* BE driver Stats */
404+ "bes_tx_reqs",
405+ "bes_tx_fails",
406+ "bes_fwd_reqs",
407+ "bes_tx_wrbs",
408+ "bes_interrupts",
409+ "bes_events",
410+ "bes_tx_events",
411+ "bes_rx_events",
412+ "bes_tx_compl",
413+ "bes_rx_compl",
414+ "bes_ethrx_post_fail",
415+ "bes_802_3_dropped_frames",
416+ "bes_802_3_malformed_frames",
417+ "bes_rx_misc_pkts",
418+ "bes_eth_tx_rate",
419+ "bes_eth_rx_rate",
420+ "Num Packets collected",
421+ "Num Times Flushed",
422+};
423+
424+#define NET_DEV_STATS_LEN \
425+ (sizeof(struct net_device_stats)/sizeof(unsigned long))
426+
427+#define BENET_STATS_LEN ARRAY_SIZE(benet_gstrings_stats)
428+
429+static void
430+be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
431+{
432+ struct be_net_object *pnob = netdev->priv;
433+ struct be_adapter *adapter = pnob->adapter;
434+
435+ strncpy(drvinfo->driver, be_driver_name, 32);
436+ strncpy(drvinfo->version, be_drvr_ver, 32);
437+ strncpy(drvinfo->fw_version, be_fw_ver, 32);
438+ strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
439+ drvinfo->testinfo_len = 0;
440+ drvinfo->regdump_len = 0;
441+ drvinfo->eedump_len = 0;
442+}
443+
444+static int
445+be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
446+{
447+ struct be_net_object *pnob = netdev->priv;
448+ struct be_adapter *adapter = pnob->adapter;
449+
450+ coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
451+
452+ coalesce->rx_coalesce_usecs = adapter->cur_eqd;
453+ coalesce->rx_coalesce_usecs_high = adapter->max_eqd;
454+ coalesce->rx_coalesce_usecs_low = adapter->min_eqd;
455+
456+ coalesce->tx_coalesce_usecs = adapter->cur_eqd;
457+ coalesce->tx_coalesce_usecs_high = adapter->max_eqd;
458+ coalesce->tx_coalesce_usecs_low = adapter->min_eqd;
459+
460+ coalesce->use_adaptive_rx_coalesce = adapter->enable_aic;
461+ coalesce->use_adaptive_tx_coalesce = adapter->enable_aic;
462+
463+ return 0;
464+}
465+
466+/*
467+ * This routine is used to set interrup coalescing delay *as well as*
468+ * the number of pkts to coalesce for LRO.
469+ */
470+static int
471+be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
472+{
473+ struct be_net_object *pnob = netdev->priv;
474+ struct be_adapter *adapter = pnob->adapter;
475+ struct be_eq_object *eq_objectp;
476+ u32 max, min, cur;
477+ int status;
478+
479+ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
480+ if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS)
481+ adapter->max_rx_coal = BE_LRO_MAX_PKTS;
482+
483+ if (adapter->enable_aic == 0 &&
484+ coalesce->use_adaptive_rx_coalesce == 1) {
485+ /* if AIC is being turned on now, start with an EQD of 0 */
486+ adapter->cur_eqd = 0;
487+ }
488+ adapter->enable_aic = coalesce->use_adaptive_rx_coalesce;
489+
490+ /* round off to nearest multiple of 8 */
491+ max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3);
492+ min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3);
493+ cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3);
494+
495+ if (adapter->enable_aic) {
496+ /* accept low and high if AIC is enabled */
497+ if (max > MAX_EQD)
498+ max = MAX_EQD;
499+ if (min > max)
500+ min = max;
501+ adapter->max_eqd = max;
502+ adapter->min_eqd = min;
503+ if (adapter->cur_eqd > max)
504+ adapter->cur_eqd = max;
505+ if (adapter->cur_eqd < min)
506+ adapter->cur_eqd = min;
507+ } else {
508+ /* accept specified coalesce_usecs only if AIC is disabled */
509+ if (cur > MAX_EQD)
510+ cur = MAX_EQD;
511+ eq_objectp = &pnob->event_q_obj;
512+ status =
513+ be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur,
514+ NULL, NULL, NULL);
515+ if (status == BE_SUCCESS)
516+ adapter->cur_eqd = cur;
517+ }
518+ return 0;
519+}
520+
521+static u32 be_get_rx_csum(struct net_device *netdev)
522+{
523+ struct be_net_object *pnob = netdev->priv;
524+ struct be_adapter *adapter = pnob->adapter;
525+ return adapter->rx_csum;
526+}
527+
528+static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
529+{
530+ struct be_net_object *pnob = netdev->priv;
531+ struct be_adapter *adapter = pnob->adapter;
532+
533+ if (data)
534+ adapter->rx_csum = 1;
535+ else
536+ adapter->rx_csum = 0;
537+
538+ return 0;
539+}
540+
541+static void
542+be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
543+{
544+ switch (stringset) {
545+ case ETH_SS_STATS:
546+ memcpy(data, *benet_gstrings_stats,
547+ sizeof(benet_gstrings_stats));
548+ break;
549+ }
550+}
551+
552+static int be_get_stats_count(struct net_device *netdev)
553+{
554+ return BENET_STATS_LEN;
555+}
556+
557+static void
558+be_get_ethtool_stats(struct net_device *netdev,
559+ struct ethtool_stats *stats, uint64_t *data)
560+{
561+ struct be_net_object *pnob = netdev->priv;
562+ struct be_adapter *adapter = pnob->adapter;
563+ int i;
564+
565+ benet_get_stats(netdev);
566+
567+ for (i = 0; i <= NET_DEV_STATS_LEN; i++)
568+ data[i] = ((unsigned long *)&adapter->benet_stats)[i];
569+
570+ data[i] = adapter->be_stat.bes_tx_reqs;
571+ data[i++] = adapter->be_stat.bes_tx_fails;
572+ data[i++] = adapter->be_stat.bes_fwd_reqs;
573+ data[i++] = adapter->be_stat.bes_tx_wrbs;
574+
575+ data[i++] = adapter->be_stat.bes_ints;
576+ data[i++] = adapter->be_stat.bes_events;
577+ data[i++] = adapter->be_stat.bes_tx_events;
578+ data[i++] = adapter->be_stat.bes_rx_events;
579+ data[i++] = adapter->be_stat.bes_tx_compl;
580+ data[i++] = adapter->be_stat.bes_rx_compl;
581+ data[i++] = adapter->be_stat.bes_ethrx_post_fail;
582+ data[i++] = adapter->be_stat.bes_802_3_dropped_frames;
583+ data[i++] = adapter->be_stat.bes_802_3_malformed_frames;
584+ data[i++] = adapter->be_stat.bes_rx_misc_pkts;
585+ data[i++] = adapter->be_stat.bes_eth_tx_rate;
586+ data[i++] = adapter->be_stat.bes_eth_rx_rate;
587+ data[i++] = adapter->be_stat.bes_rx_coal;
588+ data[i++] = adapter->be_stat.bes_rx_flush;
589+
590+}
591+
592+static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
593+{
594+ ecmd->speed = SPEED_10000;
595+ ecmd->duplex = DUPLEX_FULL;
596+ ecmd->autoneg = AUTONEG_DISABLE;
597+ return 0;
598+}
599+
600+/* Get the Ring parameters from the pnob */
601+static void
602+be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
603+{
604+ struct be_net_object *pnob = netdev->priv;
605+
606+ /* Pre Set Maxims */
607+ ring->rx_max_pending = pnob->rx_q_len;
608+ ring->rx_mini_max_pending = ring->rx_mini_max_pending;
609+ ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending;
610+ ring->tx_max_pending = pnob->tx_q_len;
611+
612+ /* Current hardware Settings */
613+ ring->rx_pending = atomic_read(&pnob->rx_q_posted);
614+ ring->rx_mini_pending = ring->rx_mini_pending;
615+ ring->rx_jumbo_pending = ring->rx_jumbo_pending;
616+ ring->tx_pending = atomic_read(&pnob->tx_q_used);
617+
618+}
619+
620+static void
621+be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
622+{
623+ struct be_net_object *pnob = netdev->priv;
624+ bool rxfc, txfc;
625+ int status;
626+
627+ status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc);
628+ if (status != BE_SUCCESS) {
629+ dev_info(&netdev->dev, "Unable to get pause frame settings\n");
630+ /* return defaults */
631+ ecmd->rx_pause = 1;
632+ ecmd->tx_pause = 0;
633+ ecmd->autoneg = AUTONEG_ENABLE;
634+ return;
635+ }
636+
637+ if (txfc == true)
638+ ecmd->tx_pause = 1;
639+ else
640+ ecmd->tx_pause = 0;
641+
642+ if (rxfc == true)
643+ ecmd->rx_pause = 1;
644+ else
645+ ecmd->rx_pause = 0;
646+
647+ ecmd->autoneg = AUTONEG_ENABLE;
648+}
649+
650+static int
651+be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
652+{
653+ struct be_net_object *pnob = netdev->priv;
654+ bool txfc, rxfc;
655+ int status;
656+
657+ if (ecmd->autoneg != AUTONEG_ENABLE)
658+ return -EINVAL;
659+
660+ if (ecmd->tx_pause)
661+ txfc = true;
662+ else
663+ txfc = false;
664+
665+ if (ecmd->rx_pause)
666+ rxfc = true;
667+ else
668+ rxfc = false;
669+
670+ status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc);
671+ if (status != BE_SUCCESS) {
672+ dev_info(&netdev->dev, "Unable to set pause frame settings\n");
673+ return -1;
674+ }
675+ return 0;
676+}
677+
678+struct ethtool_ops be_ethtool_ops = {
679+ .get_settings = be_get_settings,
680+ .get_drvinfo = be_get_drvinfo,
681+ .get_link = ethtool_op_get_link,
682+ .get_coalesce = be_get_coalesce,
683+ .set_coalesce = be_set_coalesce,
684+ .get_ringparam = be_get_ringparam,
685+ .get_pauseparam = be_get_pauseparam,
686+ .set_pauseparam = be_set_pauseparam,
687+ .get_rx_csum = be_get_rx_csum,
688+ .set_rx_csum = be_set_rx_csum,
689+ .get_tx_csum = ethtool_op_get_tx_csum,
690+ .set_tx_csum = ethtool_op_set_tx_csum,
691+ .get_sg = ethtool_op_get_sg,
692+ .set_sg = ethtool_op_set_sg,
693+ .get_tso = ethtool_op_get_tso,
694+ .set_tso = ethtool_op_set_tso,
695+ .get_strings = be_get_strings,
696+ .get_stats_count = be_get_stats_count,
697+ .get_ethtool_stats = be_get_ethtool_stats,
698+};
699--- /dev/null
700+++ b/drivers/staging/benet/be_init.c
701@@ -0,0 +1,1381 @@
702+/*
703+ * Copyright (C) 2005 - 2008 ServerEngines
704+ * All rights reserved.
705+ *
706+ * This program is free software; you can redistribute it and/or
707+ * modify it under the terms of the GNU General Public License version 2
708+ * as published by the Free Software Foundation. The full GNU General
709+ * Public License is included in this distribution in the file called COPYING.
710+ *
711+ * Contact Information:
712+ * linux-drivers@serverengines.com
713+ *
714+ * ServerEngines
715+ * 209 N. Fair Oaks Ave
716+ * Sunnyvale, CA 94085
717+ */
718+#include <linux/etherdevice.h>
719+#include "benet.h"
720+
721+#define DRVR_VERSION "1.0.728"
722+
723+static const struct pci_device_id be_device_id_table[] = {
724+ {PCI_DEVICE(0x19a2, 0x0201)},
725+ {0}
726+};
727+
728+MODULE_DEVICE_TABLE(pci, be_device_id_table);
729+
730+MODULE_VERSION(DRVR_VERSION);
731+
732+#define DRV_DESCRIPTION "ServerEngines BladeEngine Network Driver Version "
733+
734+MODULE_DESCRIPTION(DRV_DESCRIPTION DRVR_VERSION);
735+MODULE_AUTHOR("ServerEngines");
736+MODULE_LICENSE("GPL");
737+
738+static unsigned int msix = 1;
739+module_param(msix, uint, S_IRUGO);
740+MODULE_PARM_DESC(msix, "Use MSI-x interrupts");
741+
742+static unsigned int rxbuf_size = 2048; /* Default RX frag size */
743+module_param(rxbuf_size, uint, S_IRUGO);
744+MODULE_PARM_DESC(rxbuf_size, "Size of buffers to hold Rx data");
745+
746+const char be_drvr_ver[] = DRVR_VERSION;
747+char be_fw_ver[32]; /* F/W version filled in by be_probe */
748+char be_driver_name[] = "benet";
749+
750+/*
751+ * Number of entries in each queue.
752+ */
753+#define EVENT_Q_LEN 1024
754+#define ETH_TXQ_LEN 2048
755+#define ETH_TXCQ_LEN 1024
756+#define ETH_RXQ_LEN 1024 /* Does not support any other value */
757+#define ETH_UC_RXCQ_LEN 1024
758+#define ETH_BC_RXCQ_LEN 256
759+#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
760+#define MCC_CQ_LEN 256
761+
762+/* Bit mask describing events of interest to be traced */
763+unsigned int trace_level;
764+
765+static int
766+init_pci_be_function(struct be_adapter *adapter, struct pci_dev *pdev)
767+{
768+ u64 pa;
769+
770+ /* CSR */
771+ pa = pci_resource_start(pdev, 2);
772+ adapter->csr_va = ioremap_nocache(pa, pci_resource_len(pdev, 2));
773+ if (adapter->csr_va == NULL)
774+ return -ENOMEM;
775+
776+ /* Door Bell */
777+ pa = pci_resource_start(pdev, 4);
778+ adapter->db_va = ioremap_nocache(pa, (128 * 1024));
779+ if (adapter->db_va == NULL) {
780+ iounmap(adapter->csr_va);
781+ return -ENOMEM;
782+ }
783+
784+ /* PCI */
785+ pa = pci_resource_start(pdev, 1);
786+ adapter->pci_va = ioremap_nocache(pa, pci_resource_len(pdev, 1));
787+ if (adapter->pci_va == NULL) {
788+ iounmap(adapter->csr_va);
789+ iounmap(adapter->db_va);
790+ return -ENOMEM;
791+ }
792+ return 0;
793+}
794+
795+/*
796+ This function enables the interrupt corresponding to the Event
797+ queue ID for the given NetObject
798+*/
799+void be_enable_eq_intr(struct be_net_object *pnob)
800+{
801+ struct CQ_DB_AMAP cqdb;
802+ cqdb.dw[0] = 0;
803+ AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
804+ AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 1);
805+ AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
806+ AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
807+ PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
808+}
809+
810+/*
811+ This function disables the interrupt corresponding to the Event
812+ queue ID for the given NetObject
813+*/
814+void be_disable_eq_intr(struct be_net_object *pnob)
815+{
816+ struct CQ_DB_AMAP cqdb;
817+ cqdb.dw[0] = 0;
818+ AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
819+ AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 0);
820+ AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
821+ AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
822+ PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
823+}
824+
825+/*
826+ This function enables the interrupt from the network function
827+ of the BladeEngine. Use the function be_disable_eq_intr()
828+ to enable the interrupt from the event queue of only one specific
829+ NetObject
830+*/
831+void be_enable_intr(struct be_net_object *pnob)
832+{
833+ struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
834+ u32 host_intr;
835+
836+ ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
837+ host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
838+ hostintr, ctrl.dw);
839+ if (!host_intr) {
840+ AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
841+ hostintr, ctrl.dw, 1);
842+ PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
843+ ctrl.dw[0]);
844+ }
845+}
846+
847+/*
848+ This function disables the interrupt from the network function of
849+ the BladeEngine. Use the function be_disable_eq_intr() to
850+ disable the interrupt from the event queue of only one specific NetObject
851+*/
852+void be_disable_intr(struct be_net_object *pnob)
853+{
854+
855+ struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
856+ u32 host_intr;
857+ ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
858+ host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
859+ hostintr, ctrl.dw);
860+ if (host_intr) {
861+ AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, hostintr,
862+ ctrl.dw, 0);
863+ PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
864+ ctrl.dw[0]);
865+ }
866+}
867+
868+static int be_enable_msix(struct be_adapter *adapter)
869+{
870+ int i, ret;
871+
872+ if (!msix)
873+ return -1;
874+
875+ for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++)
876+ adapter->msix_entries[i].entry = i;
877+
878+ ret = pci_enable_msix(adapter->pdev, adapter->msix_entries,
879+ BE_MAX_REQ_MSIX_VECTORS);
880+
881+ if (ret == 0)
882+ adapter->msix_enabled = 1;
883+ return ret;
884+}
885+
886+static int be_register_isr(struct be_adapter *adapter,
887+ struct be_net_object *pnob)
888+{
889+ struct net_device *netdev = pnob->netdev;
890+ int intx = 0, r;
891+
892+ netdev->irq = adapter->pdev->irq;
893+ r = be_enable_msix(adapter);
894+
895+ if (r == 0) {
896+ r = request_irq(adapter->msix_entries[0].vector,
897+ be_int, IRQF_SHARED, netdev->name, netdev);
898+ if (r) {
899+ printk(KERN_WARNING
900+ "MSIX Request IRQ failed - Errno %d\n", r);
901+ intx = 1;
902+ pci_disable_msix(adapter->pdev);
903+ adapter->msix_enabled = 0;
904+ }
905+ } else {
906+ intx = 1;
907+ }
908+
909+ if (intx) {
910+ r = request_irq(netdev->irq, be_int, IRQF_SHARED,
911+ netdev->name, netdev);
912+ if (r) {
913+ printk(KERN_WARNING
914+ "INTx Request IRQ failed - Errno %d\n", r);
915+ return -1;
916+ }
917+ }
918+ adapter->isr_registered = 1;
919+ return 0;
920+}
921+
922+static void be_unregister_isr(struct be_adapter *adapter)
923+{
924+ struct net_device *netdev = adapter->netdevp;
925+ if (adapter->isr_registered) {
926+ if (adapter->msix_enabled) {
927+ free_irq(adapter->msix_entries[0].vector, netdev);
928+ pci_disable_msix(adapter->pdev);
929+ adapter->msix_enabled = 0;
930+ } else {
931+ free_irq(netdev->irq, netdev);
932+ }
933+ adapter->isr_registered = 0;
934+ }
935+}
936+
937+/*
938+ This function processes the Flush Completions that are issued by the
939+ ARM F/W, when a Recv Ring is destroyed. A flush completion is
940+ identified when a Rx COmpl descriptor has the tcpcksum and udpcksum
941+ set and the pktsize is 32. These completions are received on the
942+ Rx Completion Queue.
943+*/
944+static u32 be_process_rx_flush_cmpl(struct be_net_object *pnob)
945+{
946+ struct ETH_RX_COMPL_AMAP *rxcp;
947+ unsigned int i = 0;
948+ while ((rxcp = be_get_rx_cmpl(pnob)) != NULL) {
949+ be_notify_cmpl(pnob, 1, pnob->rx_cq_id, 1);
950+ i++;
951+ }
952+ return i;
953+}
954+
955+static void be_tx_q_clean(struct be_net_object *pnob)
956+{
957+ while (atomic_read(&pnob->tx_q_used))
958+ process_one_tx_compl(pnob, tx_compl_lastwrb_idx_get(pnob));
959+}
960+
961+static void be_rx_q_clean(struct be_net_object *pnob)
962+{
963+ if (pnob->rx_ctxt) {
964+ int i;
965+ struct be_rx_page_info *rx_page_info;
966+ for (i = 0; i < pnob->rx_q_len; i++) {
967+ rx_page_info = &(pnob->rx_page_info[i]);
968+ if (!pnob->rx_pg_shared || rx_page_info->page_offset) {
969+ pci_unmap_page(pnob->adapter->pdev,
970+ pci_unmap_addr(rx_page_info, bus),
971+ pnob->rx_buf_size,
972+ PCI_DMA_FROMDEVICE);
973+ }
974+ if (rx_page_info->page)
975+ put_page(rx_page_info->page);
976+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
977+ }
978+ pnob->rx_pg_info_hd = 0;
979+ }
980+}
981+
982+static void be_destroy_netobj(struct be_net_object *pnob)
983+{
984+ int status;
985+
986+ if (pnob->tx_q_created) {
987+ status = be_eth_sq_destroy(&pnob->tx_q_obj);
988+ pnob->tx_q_created = 0;
989+ }
990+
991+ if (pnob->rx_q_created) {
992+ status = be_eth_rq_destroy(&pnob->rx_q_obj);
993+ if (status != 0) {
994+ status = be_eth_rq_destroy_options(&pnob->rx_q_obj, 0,
995+ NULL, NULL);
996+ BUG_ON(status);
997+ }
998+ pnob->rx_q_created = 0;
999+ }
1000+
1001+ be_process_rx_flush_cmpl(pnob);
1002+
1003+ if (pnob->tx_cq_created) {
1004+ status = be_cq_destroy(&pnob->tx_cq_obj);
1005+ pnob->tx_cq_created = 0;
1006+ }
1007+
1008+ if (pnob->rx_cq_created) {
1009+ status = be_cq_destroy(&pnob->rx_cq_obj);
1010+ pnob->rx_cq_created = 0;
1011+ }
1012+
1013+ if (pnob->mcc_q_created) {
1014+ status = be_mcc_ring_destroy(&pnob->mcc_q_obj);
1015+ pnob->mcc_q_created = 0;
1016+ }
1017+ if (pnob->mcc_cq_created) {
1018+ status = be_cq_destroy(&pnob->mcc_cq_obj);
1019+ pnob->mcc_cq_created = 0;
1020+ }
1021+
1022+ if (pnob->event_q_created) {
1023+ status = be_eq_destroy(&pnob->event_q_obj);
1024+ pnob->event_q_created = 0;
1025+ }
1026+ be_function_cleanup(&pnob->fn_obj);
1027+}
1028+
1029+/*
1030+ * free all resources associated with a pnob
1031+ * Called at the time of module cleanup as well a any error during
1032+ * module init. Some resources may be partially allocated in a NetObj.
1033+ */
1034+static void netobject_cleanup(struct be_adapter *adapter,
1035+ struct be_net_object *pnob)
1036+{
1037+ struct net_device *netdev = adapter->netdevp;
1038+
1039+ if (netif_running(netdev)) {
1040+ netif_stop_queue(netdev);
1041+ be_wait_nic_tx_cmplx_cmpl(pnob);
1042+ be_disable_eq_intr(pnob);
1043+ }
1044+
1045+ be_unregister_isr(adapter);
1046+
1047+ if (adapter->tasklet_started) {
1048+ tasklet_kill(&(adapter->sts_handler));
1049+ adapter->tasklet_started = 0;
1050+ }
1051+ if (pnob->fn_obj_created)
1052+ be_disable_intr(pnob);
1053+
1054+ if (adapter->dev_state != BE_DEV_STATE_NONE)
1055+ unregister_netdev(netdev);
1056+
1057+ if (pnob->fn_obj_created)
1058+ be_destroy_netobj(pnob);
1059+
1060+ adapter->net_obj = NULL;
1061+ adapter->netdevp = NULL;
1062+
1063+ be_rx_q_clean(pnob);
1064+ if (pnob->rx_ctxt) {
1065+ kfree(pnob->rx_page_info);
1066+ kfree(pnob->rx_ctxt);
1067+ }
1068+
1069+ be_tx_q_clean(pnob);
1070+ kfree(pnob->tx_ctxt);
1071+
1072+ if (pnob->mcc_q)
1073+ pci_free_consistent(adapter->pdev, pnob->mcc_q_size,
1074+ pnob->mcc_q, pnob->mcc_q_bus);
1075+
1076+ if (pnob->mcc_wrb_ctxt)
1077+ free_pages((unsigned long)pnob->mcc_wrb_ctxt,
1078+ get_order(pnob->mcc_wrb_ctxt_size));
1079+
1080+ if (pnob->mcc_cq)
1081+ pci_free_consistent(adapter->pdev, pnob->mcc_cq_size,
1082+ pnob->mcc_cq, pnob->mcc_cq_bus);
1083+
1084+ if (pnob->event_q)
1085+ pci_free_consistent(adapter->pdev, pnob->event_q_size,
1086+ pnob->event_q, pnob->event_q_bus);
1087+
1088+ if (pnob->tx_cq)
1089+ pci_free_consistent(adapter->pdev, pnob->tx_cq_size,
1090+ pnob->tx_cq, pnob->tx_cq_bus);
1091+
1092+ if (pnob->tx_q)
1093+ pci_free_consistent(adapter->pdev, pnob->tx_q_size,
1094+ pnob->tx_q, pnob->tx_q_bus);
1095+
1096+ if (pnob->rx_q)
1097+ pci_free_consistent(adapter->pdev, pnob->rx_q_size,
1098+ pnob->rx_q, pnob->rx_q_bus);
1099+
1100+ if (pnob->rx_cq)
1101+ pci_free_consistent(adapter->pdev, pnob->rx_cq_size,
1102+ pnob->rx_cq, pnob->rx_cq_bus);
1103+
1104+
1105+ if (pnob->mb_ptr)
1106+ pci_free_consistent(adapter->pdev, pnob->mb_size, pnob->mb_ptr,
1107+ pnob->mb_bus);
1108+
1109+ free_netdev(netdev);
1110+}
1111+
1112+
1113+static int be_nob_ring_alloc(struct be_adapter *adapter,
1114+ struct be_net_object *pnob)
1115+{
1116+ u32 size;
1117+
1118+ /* Mail box rd; mailbox pointer needs to be 16 byte aligned */
1119+ pnob->mb_size = sizeof(struct MCC_MAILBOX_AMAP) + 16;
1120+ pnob->mb_ptr = pci_alloc_consistent(adapter->pdev, pnob->mb_size,
1121+ &pnob->mb_bus);
1122+ if (!pnob->mb_bus)
1123+ return -1;
1124+ memset(pnob->mb_ptr, 0, pnob->mb_size);
1125+ pnob->mb_rd.va = PTR_ALIGN(pnob->mb_ptr, 16);
1126+ pnob->mb_rd.pa = PTR_ALIGN(pnob->mb_bus, 16);
1127+ pnob->mb_rd.length = sizeof(struct MCC_MAILBOX_AMAP);
1128+ /*
1129+ * Event queue
1130+ */
1131+ pnob->event_q_len = EVENT_Q_LEN;
1132+ pnob->event_q_size = pnob->event_q_len * sizeof(struct EQ_ENTRY_AMAP);
1133+ pnob->event_q = pci_alloc_consistent(adapter->pdev, pnob->event_q_size,
1134+ &pnob->event_q_bus);
1135+ if (!pnob->event_q_bus)
1136+ return -1;
1137+ memset(pnob->event_q, 0, pnob->event_q_size);
1138+ /*
1139+ * Eth TX queue
1140+ */
1141+ pnob->tx_q_len = ETH_TXQ_LEN;
1142+ pnob->tx_q_port = 0;
1143+ pnob->tx_q_size = pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP);
1144+ pnob->tx_q = pci_alloc_consistent(adapter->pdev, pnob->tx_q_size,
1145+ &pnob->tx_q_bus);
1146+ if (!pnob->tx_q_bus)
1147+ return -1;
1148+ memset(pnob->tx_q, 0, pnob->tx_q_size);
1149+ /*
1150+ * Eth TX Compl queue
1151+ */
1152+ pnob->txcq_len = ETH_TXCQ_LEN;
1153+ pnob->tx_cq_size = pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP);
1154+ pnob->tx_cq = pci_alloc_consistent(adapter->pdev, pnob->tx_cq_size,
1155+ &pnob->tx_cq_bus);
1156+ if (!pnob->tx_cq_bus)
1157+ return -1;
1158+ memset(pnob->tx_cq, 0, pnob->tx_cq_size);
1159+ /*
1160+ * Eth RX queue
1161+ */
1162+ pnob->rx_q_len = ETH_RXQ_LEN;
1163+ pnob->rx_q_size = pnob->rx_q_len * sizeof(struct ETH_RX_D_AMAP);
1164+ pnob->rx_q = pci_alloc_consistent(adapter->pdev, pnob->rx_q_size,
1165+ &pnob->rx_q_bus);
1166+ if (!pnob->rx_q_bus)
1167+ return -1;
1168+ memset(pnob->rx_q, 0, pnob->rx_q_size);
1169+ /*
1170+ * Eth Unicast RX Compl queue
1171+ */
1172+ pnob->rx_cq_len = ETH_UC_RXCQ_LEN;
1173+ pnob->rx_cq_size = pnob->rx_cq_len *
1174+ sizeof(struct ETH_RX_COMPL_AMAP);
1175+ pnob->rx_cq = pci_alloc_consistent(adapter->pdev, pnob->rx_cq_size,
1176+ &pnob->rx_cq_bus);
1177+ if (!pnob->rx_cq_bus)
1178+ return -1;
1179+ memset(pnob->rx_cq, 0, pnob->rx_cq_size);
1180+
1181+ /* TX resources */
1182+ size = pnob->tx_q_len * sizeof(void **);
1183+ pnob->tx_ctxt = kzalloc(size, GFP_KERNEL);
1184+ if (pnob->tx_ctxt == NULL)
1185+ return -1;
1186+
1187+ /* RX resources */
1188+ size = pnob->rx_q_len * sizeof(void *);
1189+ pnob->rx_ctxt = kzalloc(size, GFP_KERNEL);
1190+ if (pnob->rx_ctxt == NULL)
1191+ return -1;
1192+
1193+ size = (pnob->rx_q_len * sizeof(struct be_rx_page_info));
1194+ pnob->rx_page_info = kzalloc(size, GFP_KERNEL);
1195+ if (pnob->rx_page_info == NULL)
1196+ return -1;
1197+
1198+ adapter->eth_statsp = kzalloc(sizeof(struct FWCMD_ETH_GET_STATISTICS),
1199+ GFP_KERNEL);
1200+ if (adapter->eth_statsp == NULL)
1201+ return -1;
1202+ pnob->rx_buf_size = rxbuf_size;
1203+ return 0;
1204+}
1205+
1206+/*
1207+ This function initializes the be_net_object for subsequent
1208+ network operations.
1209+
1210+ Before calling this function, the driver must have allocated
1211+ space for the NetObject structure, initialized the structure,
1212+ allocated DMAable memory for all the network queues that form
1213+ part of the NetObject and populated the start address (virtual)
1214+ and number of entries allocated for each queue in the NetObject structure.
1215+
1216+ The driver must also have allocated memory to hold the
1217+ mailbox structure (MCC_MAILBOX) and post the physical address,
1218+ virtual addresses and the size of the mailbox memory in the
1219+ NetObj.mb_rd. This structure is used by BECLIB for
1220+ initial communication with the embedded MCC processor. BECLIB
1221+ uses the mailbox until MCC rings are created for more efficient
1222+ communication with the MCC processor.
1223+
1224+ If the driver wants to create multiple network interface for more
1225+ than one protection domain, it can call be_create_netobj()
1226+ multiple times once for each protection domain. A Maximum of
1227+ 32 protection domains are supported.
1228+
1229+*/
1230+static int
1231+be_create_netobj(struct be_net_object *pnob, u8 __iomem *csr_va,
1232+ u8 __iomem *db_va, u8 __iomem *pci_va)
1233+{
1234+ int status = 0;
1235+ bool eventable = false, tx_no_delay = false, rx_no_delay = false;
1236+ struct be_eq_object *eq_objectp = NULL;
1237+ struct be_function_object *pfob = &pnob->fn_obj;
1238+ struct ring_desc rd;
1239+ u32 set_rxbuf_size;
1240+ u32 tx_cmpl_wm = CEV_WMARK_96; /* 0xffffffff to disable */
1241+ u32 rx_cmpl_wm = CEV_WMARK_160; /* 0xffffffff to disable */
1242+ u32 eq_delay = 0; /* delay in 8usec units. 0xffffffff to disable */
1243+
1244+ memset(&rd, 0, sizeof(struct ring_desc));
1245+
1246+ status = be_function_object_create(csr_va, db_va, pci_va,
1247+ BE_FUNCTION_TYPE_NETWORK, &pnob->mb_rd, pfob);
1248+ if (status != BE_SUCCESS)
1249+ return status;
1250+ pnob->fn_obj_created = true;
1251+
1252+ if (tx_cmpl_wm == 0xffffffff)
1253+ tx_no_delay = true;
1254+ if (rx_cmpl_wm == 0xffffffff)
1255+ rx_no_delay = true;
1256+ /*
1257+ * now create the necessary rings
1258+ * Event Queue first.
1259+ */
1260+ if (pnob->event_q_len) {
1261+ rd.va = pnob->event_q;
1262+ rd.pa = pnob->event_q_bus;
1263+ rd.length = pnob->event_q_size;
1264+
1265+ status = be_eq_create(pfob, &rd, 4, pnob->event_q_len,
1266+ (u32) -1, /* CEV_WMARK_* or -1 */
1267+ eq_delay, /* in 8us units, or -1 */
1268+ &pnob->event_q_obj);
1269+ if (status != BE_SUCCESS)
1270+ goto error_ret;
1271+ pnob->event_q_id = pnob->event_q_obj.eq_id;
1272+ pnob->event_q_created = 1;
1273+ eventable = true;
1274+ eq_objectp = &pnob->event_q_obj;
1275+ }
1276+ /*
1277+ * Now Eth Tx Compl. queue.
1278+ */
1279+ if (pnob->txcq_len) {
1280+ rd.va = pnob->tx_cq;
1281+ rd.pa = pnob->tx_cq_bus;
1282+ rd.length = pnob->tx_cq_size;
1283+
1284+ status = be_cq_create(pfob, &rd,
1285+ pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP),
1286+ false, /* solicted events, */
1287+ tx_no_delay, /* nodelay */
1288+ tx_cmpl_wm, /* Watermark encodings */
1289+ eq_objectp, &pnob->tx_cq_obj);
1290+ if (status != BE_SUCCESS)
1291+ goto error_ret;
1292+
1293+ pnob->tx_cq_id = pnob->tx_cq_obj.cq_id;
1294+ pnob->tx_cq_created = 1;
1295+ }
1296+ /*
1297+ * Eth Tx queue
1298+ */
1299+ if (pnob->tx_q_len) {
1300+ struct be_eth_sq_parameters ex_params = { 0 };
1301+ u32 type;
1302+
1303+ if (pnob->tx_q_port) {
1304+ /* TXQ to be bound to a specific port */
1305+ type = BE_ETH_TX_RING_TYPE_BOUND;
1306+ ex_params.port = pnob->tx_q_port - 1;
1307+ } else
1308+ type = BE_ETH_TX_RING_TYPE_STANDARD;
1309+
1310+ rd.va = pnob->tx_q;
1311+ rd.pa = pnob->tx_q_bus;
1312+ rd.length = pnob->tx_q_size;
1313+
1314+ status = be_eth_sq_create_ex(pfob, &rd,
1315+ pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP),
1316+ type, 2, &pnob->tx_cq_obj,
1317+ &ex_params, &pnob->tx_q_obj);
1318+
1319+ if (status != BE_SUCCESS)
1320+ goto error_ret;
1321+
1322+ pnob->tx_q_id = pnob->tx_q_obj.bid;
1323+ pnob->tx_q_created = 1;
1324+ }
1325+ /*
1326+ * Now Eth Rx compl. queue. Always needed.
1327+ */
1328+ rd.va = pnob->rx_cq;
1329+ rd.pa = pnob->rx_cq_bus;
1330+ rd.length = pnob->rx_cq_size;
1331+
1332+ status = be_cq_create(pfob, &rd,
1333+ pnob->rx_cq_len * sizeof(struct ETH_RX_COMPL_AMAP),
1334+ false, /* solicted events, */
1335+ rx_no_delay, /* nodelay */
1336+ rx_cmpl_wm, /* Watermark encodings */
1337+ eq_objectp, &pnob->rx_cq_obj);
1338+ if (status != BE_SUCCESS)
1339+ goto error_ret;
1340+
1341+ pnob->rx_cq_id = pnob->rx_cq_obj.cq_id;
1342+ pnob->rx_cq_created = 1;
1343+
1344+ status = be_eth_rq_set_frag_size(pfob, pnob->rx_buf_size,
1345+ (u32 *) &set_rxbuf_size);
1346+ if (status != BE_SUCCESS) {
1347+ be_eth_rq_get_frag_size(pfob, (u32 *) &pnob->rx_buf_size);
1348+ if ((pnob->rx_buf_size != 2048) && (pnob->rx_buf_size != 4096)
1349+ && (pnob->rx_buf_size != 8192))
1350+ goto error_ret;
1351+ } else {
1352+ if (pnob->rx_buf_size != set_rxbuf_size)
1353+ pnob->rx_buf_size = set_rxbuf_size;
1354+ }
1355+ /*
1356+ * Eth RX queue. be_eth_rq_create() always assumes 2 pages size
1357+ */
1358+ rd.va = pnob->rx_q;
1359+ rd.pa = pnob->rx_q_bus;
1360+ rd.length = pnob->rx_q_size;
1361+
1362+ status = be_eth_rq_create(pfob, &rd, &pnob->rx_cq_obj,
1363+ &pnob->rx_cq_obj, &pnob->rx_q_obj);
1364+
1365+ if (status != BE_SUCCESS)
1366+ goto error_ret;
1367+
1368+ pnob->rx_q_id = pnob->rx_q_obj.rid;
1369+ pnob->rx_q_created = 1;
1370+
1371+ return BE_SUCCESS; /* All required queues created. */
1372+
1373+error_ret:
1374+ be_destroy_netobj(pnob);
1375+ return status;
1376+}
1377+
1378+static int be_nob_ring_init(struct be_adapter *adapter,
1379+ struct be_net_object *pnob)
1380+{
1381+ int status;
1382+
1383+ pnob->event_q_tl = 0;
1384+
1385+ pnob->tx_q_hd = 0;
1386+ pnob->tx_q_tl = 0;
1387+
1388+ pnob->tx_cq_tl = 0;
1389+
1390+ pnob->rx_cq_tl = 0;
1391+
1392+ memset(pnob->event_q, 0, pnob->event_q_size);
1393+ memset(pnob->tx_cq, 0, pnob->tx_cq_size);
1394+ memset(pnob->tx_ctxt, 0, pnob->tx_q_len * sizeof(void **));
1395+ memset(pnob->rx_ctxt, 0, pnob->rx_q_len * sizeof(void *));
1396+ pnob->rx_pg_info_hd = 0;
1397+ pnob->rx_q_hd = 0;
1398+ atomic_set(&pnob->rx_q_posted, 0);
1399+
1400+ status = be_create_netobj(pnob, adapter->csr_va, adapter->db_va,
1401+ adapter->pci_va);
1402+ if (status != BE_SUCCESS)
1403+ return -1;
1404+
1405+ be_post_eth_rx_buffs(pnob);
1406+ return 0;
1407+}
1408+
1409+/* This function handles async callback for link status */
1410+static void
1411+be_link_status_async_callback(void *context, u32 event_code, void *event)
1412+{
1413+ struct ASYNC_EVENT_LINK_STATE_AMAP *link_status = event;
1414+ struct be_adapter *adapter = context;
1415+ bool link_enable = false;
1416+ struct be_net_object *pnob;
1417+ struct ASYNC_EVENT_TRAILER_AMAP *async_trailer;
1418+ struct net_device *netdev;
1419+ u32 async_event_code, async_event_type, active_port;
1420+ u32 port0_link_status, port1_link_status, port0_duplex, port1_duplex;
1421+ u32 port0_speed, port1_speed;
1422+
1423+ if (event_code != ASYNC_EVENT_CODE_LINK_STATE) {
1424+ /* Not our event to handle */
1425+ return;
1426+ }
1427+ async_trailer = (struct ASYNC_EVENT_TRAILER_AMAP *)
1428+ ((u8 *) event + sizeof(struct MCC_CQ_ENTRY_AMAP) -
1429+ sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
1430+
1431+ async_event_code = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_code,
1432+ async_trailer);
1433+ BUG_ON(async_event_code != ASYNC_EVENT_CODE_LINK_STATE);
1434+
1435+ pnob = adapter->net_obj;
1436+ netdev = pnob->netdev;
1437+
1438+ /* Determine if this event is a switch VLD or a physical link event */
1439+ async_event_type = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_type,
1440+ async_trailer);
1441+ active_port = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
1442+ active_port, link_status);
1443+ port0_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
1444+ port0_link_status, link_status);
1445+ port1_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
1446+ port1_link_status, link_status);
1447+ port0_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
1448+ port0_duplex, link_status);
1449+ port1_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
1450+ port1_duplex, link_status);
1451+ port0_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
1452+ port0_speed, link_status);
1453+ port1_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
1454+ port1_speed, link_status);
1455+ if (async_event_type == NTWK_LINK_TYPE_VIRTUAL) {
1456+ adapter->be_stat.bes_link_change_virtual++;
1457+ if (adapter->be_link_sts->active_port != active_port) {
1458+ dev_notice(&netdev->dev,
1459+ "Active port changed due to VLD on switch\n");
1460+ } else {
1461+ dev_notice(&netdev->dev, "Link status update\n");
1462+ }
1463+
1464+ } else {
1465+ adapter->be_stat.bes_link_change_physical++;
1466+ if (adapter->be_link_sts->active_port != active_port) {
1467+ dev_notice(&netdev->dev,
1468+ "Active port changed due to port link"
1469+ " status change\n");
1470+ } else {
1471+ dev_notice(&netdev->dev, "Link status update\n");
1472+ }
1473+ }
1474+
1475+ memset(adapter->be_link_sts, 0, sizeof(adapter->be_link_sts));
1476+
1477+ if ((port0_link_status == ASYNC_EVENT_LINK_UP) ||
1478+ (port1_link_status == ASYNC_EVENT_LINK_UP)) {
1479+ if ((adapter->port0_link_sts == BE_PORT_LINK_DOWN) &&
1480+ (adapter->port1_link_sts == BE_PORT_LINK_DOWN)) {
1481+ /* Earlier both the ports are down So link is up */
1482+ link_enable = true;
1483+ }
1484+
1485+ if (port0_link_status == ASYNC_EVENT_LINK_UP) {
1486+ adapter->port0_link_sts = BE_PORT_LINK_UP;
1487+ adapter->be_link_sts->mac0_duplex = port0_duplex;
1488+ adapter->be_link_sts->mac0_speed = port0_speed;
1489+ if (active_port == NTWK_PORT_A)
1490+ adapter->be_link_sts->active_port = 0;
1491+ } else
1492+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
1493+
1494+ if (port1_link_status == ASYNC_EVENT_LINK_UP) {
1495+ adapter->port1_link_sts = BE_PORT_LINK_UP;
1496+ adapter->be_link_sts->mac1_duplex = port1_duplex;
1497+ adapter->be_link_sts->mac1_speed = port1_speed;
1498+ if (active_port == NTWK_PORT_B)
1499+ adapter->be_link_sts->active_port = 1;
1500+ } else
1501+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
1502+
1503+ printk(KERN_INFO "Link Properties for %s:\n", netdev->name);
1504+ dev_info(&netdev->dev, "Link Properties:\n");
1505+ be_print_link_info(adapter->be_link_sts);
1506+
1507+ if (!link_enable)
1508+ return;
1509+ /*
1510+ * Both ports were down previously, but atleast one of
1511+ * them has come up if this netdevice's carrier is not up,
1512+ * then indicate to stack
1513+ */
1514+ if (!netif_carrier_ok(netdev)) {
1515+ netif_start_queue(netdev);
1516+ netif_carrier_on(netdev);
1517+ }
1518+ return;
1519+ }
1520+
1521+ /* Now both the ports are down. Tell the stack about it */
1522+ dev_info(&netdev->dev, "Both ports are down\n");
1523+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
1524+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
1525+ if (netif_carrier_ok(netdev)) {
1526+ netif_carrier_off(netdev);
1527+ netif_stop_queue(netdev);
1528+ }
1529+ return;
1530+}
1531+
1532+static int be_mcc_create(struct be_adapter *adapter)
1533+{
1534+ struct be_net_object *pnob;
1535+
1536+ pnob = adapter->net_obj;
1537+ /*
1538+ * Create the MCC ring so that all further communication with
1539+ * MCC can go thru the ring. we do this at the end since
1540+ * we do not want to be dealing with interrupts until the
1541+ * initialization is complete.
1542+ */
1543+ pnob->mcc_q_len = MCC_Q_LEN;
1544+ pnob->mcc_q_size = pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP);
1545+ pnob->mcc_q = pci_alloc_consistent(adapter->pdev, pnob->mcc_q_size,
1546+ &pnob->mcc_q_bus);
1547+ if (!pnob->mcc_q_bus)
1548+ return -1;
1549+ /*
1550+ * space for MCC WRB context
1551+ */
1552+ pnob->mcc_wrb_ctxtLen = MCC_Q_LEN;
1553+ pnob->mcc_wrb_ctxt_size = pnob->mcc_wrb_ctxtLen *
1554+ sizeof(struct be_mcc_wrb_context);
1555+ pnob->mcc_wrb_ctxt = (void *)__get_free_pages(GFP_KERNEL,
1556+ get_order(pnob->mcc_wrb_ctxt_size));
1557+ if (pnob->mcc_wrb_ctxt == NULL)
1558+ return -1;
1559+ /*
1560+ * Space for MCC compl. ring
1561+ */
1562+ pnob->mcc_cq_len = MCC_CQ_LEN;
1563+ pnob->mcc_cq_size = pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP);
1564+ pnob->mcc_cq = pci_alloc_consistent(adapter->pdev, pnob->mcc_cq_size,
1565+ &pnob->mcc_cq_bus);
1566+ if (!pnob->mcc_cq_bus)
1567+ return -1;
1568+ return 0;
1569+}
1570+
1571+/*
1572+ This function creates the MCC request and completion ring required
1573+ for communicating with the ARM processor. The caller must have
1574+ allocated required amount of memory for the MCC ring and MCC
1575+ completion ring and posted the virtual address and number of
1576+ entries in the corresponding members (mcc_q and mcc_cq) in the
1577+ NetObject struture.
1578+
1579+ When this call is completed, all further communication with
1580+ ARM will switch from mailbox to this ring.
1581+
1582+ pnob - Pointer to the NetObject structure. This NetObject should
1583+ have been created using a previous call to be_create_netobj()
1584+*/
1585+int be_create_mcc_rings(struct be_net_object *pnob)
1586+{
1587+ int status = 0;
1588+ struct ring_desc rd;
1589+ struct be_function_object *pfob = &pnob->fn_obj;
1590+
1591+ memset(&rd, 0, sizeof(struct ring_desc));
1592+ if (pnob->mcc_cq_len) {
1593+ rd.va = pnob->mcc_cq;
1594+ rd.pa = pnob->mcc_cq_bus;
1595+ rd.length = pnob->mcc_cq_size;
1596+
1597+ status = be_cq_create(pfob, &rd,
1598+ pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP),
1599+ false, /* solicted events, */
1600+ true, /* nodelay */
1601+ 0, /* 0 Watermark since Nodelay is true */
1602+ &pnob->event_q_obj,
1603+ &pnob->mcc_cq_obj);
1604+
1605+ if (status != BE_SUCCESS)
1606+ return status;
1607+
1608+ pnob->mcc_cq_id = pnob->mcc_cq_obj.cq_id;
1609+ pnob->mcc_cq_created = 1;
1610+ }
1611+ if (pnob->mcc_q_len) {
1612+ rd.va = pnob->mcc_q;
1613+ rd.pa = pnob->mcc_q_bus;
1614+ rd.length = pnob->mcc_q_size;
1615+
1616+ status = be_mcc_ring_create(pfob, &rd,
1617+ pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP),
1618+ pnob->mcc_wrb_ctxt, pnob->mcc_wrb_ctxtLen,
1619+ &pnob->mcc_cq_obj, &pnob->mcc_q_obj);
1620+
1621+ if (status != BE_SUCCESS)
1622+ return status;
1623+
1624+ pnob->mcc_q_created = 1;
1625+ }
1626+ return BE_SUCCESS;
1627+}
1628+
1629+static int be_mcc_init(struct be_adapter *adapter)
1630+{
1631+ u32 r;
1632+ struct be_net_object *pnob;
1633+
1634+ pnob = adapter->net_obj;
1635+ memset(pnob->mcc_q, 0, pnob->mcc_q_size);
1636+ pnob->mcc_q_hd = 0;
1637+
1638+ memset(pnob->mcc_wrb_ctxt, 0, pnob->mcc_wrb_ctxt_size);
1639+
1640+ memset(pnob->mcc_cq, 0, pnob->mcc_cq_size);
1641+ pnob->mcc_cq_tl = 0;
1642+
1643+ r = be_create_mcc_rings(adapter->net_obj);
1644+ if (r != BE_SUCCESS)
1645+ return -1;
1646+
1647+ return 0;
1648+}
1649+
1650+static void be_remove(struct pci_dev *pdev)
1651+{
1652+ struct be_net_object *pnob;
1653+ struct be_adapter *adapter;
1654+
1655+ adapter = pci_get_drvdata(pdev);
1656+ if (!adapter)
1657+ return;
1658+
1659+ pci_set_drvdata(pdev, NULL);
1660+ pnob = (struct be_net_object *)adapter->net_obj;
1661+
1662+ flush_scheduled_work();
1663+
1664+ if (pnob) {
1665+ /* Unregister async callback function for link status updates */
1666+ if (pnob->mcc_q_created)
1667+ be_mcc_add_async_event_callback(&pnob->mcc_q_obj,
1668+ NULL, NULL);
1669+ netobject_cleanup(adapter, pnob);
1670+ }
1671+
1672+ if (adapter->csr_va)
1673+ iounmap(adapter->csr_va);
1674+ if (adapter->db_va)
1675+ iounmap(adapter->db_va);
1676+ if (adapter->pci_va)
1677+ iounmap(adapter->pci_va);
1678+
1679+ pci_release_regions(adapter->pdev);
1680+ pci_disable_device(adapter->pdev);
1681+
1682+ kfree(adapter->be_link_sts);
1683+ kfree(adapter->eth_statsp);
1684+
1685+ if (adapter->timer_ctxt.get_stats_timer.function)
1686+ del_timer_sync(&adapter->timer_ctxt.get_stats_timer);
1687+ kfree(adapter);
1688+}
1689+
1690+/*
1691+ * This function is called by the PCI sub-system when it finds a PCI
1692+ * device with dev/vendor IDs that match with one of our devices.
1693+ * All of the driver initialization is done in this function.
1694+ */
1695+static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
1696+{
1697+ int status = 0;
1698+ struct be_adapter *adapter;
1699+ struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD get_fwv;
1700+ struct be_net_object *pnob;
1701+ struct net_device *netdev;
1702+
1703+ status = pci_enable_device(pdev);
1704+ if (status)
1705+ goto error;
1706+
1707+ status = pci_request_regions(pdev, be_driver_name);
1708+ if (status)
1709+ goto error_pci_req;
1710+
1711+ pci_set_master(pdev);
1712+ adapter = kzalloc(sizeof(struct be_adapter), GFP_KERNEL);
1713+ if (adapter == NULL) {
1714+ status = -ENOMEM;
1715+ goto error_adapter;
1716+ }
1717+ adapter->dev_state = BE_DEV_STATE_NONE;
1718+ adapter->pdev = pdev;
1719+ pci_set_drvdata(pdev, adapter);
1720+
1721+ adapter->enable_aic = 1;
1722+ adapter->max_eqd = MAX_EQD;
1723+ adapter->min_eqd = 0;
1724+ adapter->cur_eqd = 0;
1725+
1726+ status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1727+ if (!status) {
1728+ adapter->dma_64bit_cap = true;
1729+ } else {
1730+ adapter->dma_64bit_cap = false;
1731+ status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1732+ if (status != 0) {
1733+ printk(KERN_ERR "Could not set PCI DMA Mask\n");
1734+ goto cleanup;
1735+ }
1736+ }
1737+
1738+ status = init_pci_be_function(adapter, pdev);
1739+ if (status != 0) {
1740+ printk(KERN_ERR "Failed to map PCI BARS\n");
1741+ status = -ENOMEM;
1742+ goto cleanup;
1743+ }
1744+
1745+ be_trace_set_level(DL_ALWAYS | DL_ERR);
1746+
1747+ adapter->be_link_sts = kmalloc(sizeof(struct BE_LINK_STATUS),
1748+ GFP_KERNEL);
1749+ if (adapter->be_link_sts == NULL) {
1750+ printk(KERN_ERR "Memory allocation for link status "
1751+ "buffer failed\n");
1752+ goto cleanup;
1753+ }
1754+ spin_lock_init(&adapter->txq_lock);
1755+
1756+ netdev = alloc_etherdev(sizeof(struct be_net_object));
1757+ if (netdev == NULL) {
1758+ status = -ENOMEM;
1759+ goto cleanup;
1760+ }
1761+ pnob = netdev->priv;
1762+ adapter->net_obj = pnob;
1763+ adapter->netdevp = netdev;
1764+ pnob->adapter = adapter;
1765+ pnob->netdev = netdev;
1766+
1767+ status = be_nob_ring_alloc(adapter, pnob);
1768+ if (status != 0)
1769+ goto cleanup;
1770+
1771+ status = be_nob_ring_init(adapter, pnob);
1772+ if (status != 0)
1773+ goto cleanup;
1774+
1775+ be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, false,
1776+ false, false, netdev->dev_addr, NULL, NULL);
1777+
1778+ netdev->init = &benet_init;
1779+ netif_carrier_off(netdev);
1780+ netif_stop_queue(netdev);
1781+
1782+ SET_NETDEV_DEV(netdev, &(adapter->pdev->dev));
1783+
1784+ netif_napi_add(netdev, &pnob->napi, be_poll, 64);
1785+
1786+ /* if the rx_frag size if 2K, one page is shared as two RX frags */
1787+ pnob->rx_pg_shared = (pnob->rx_buf_size <= PAGE_SIZE / 2)? true : false;
1788+ if (pnob->rx_buf_size != rxbuf_size) {
1789+ printk(KERN_WARNING
1790+ "Could not set Rx buffer size to %d. Using %d\n",
1791+ rxbuf_size, pnob->rx_buf_size);
1792+ rxbuf_size = pnob->rx_buf_size;
1793+ }
1794+
1795+ tasklet_init(&(adapter->sts_handler), be_process_intr,
1796+ (unsigned long)adapter);
1797+ adapter->tasklet_started = 1;
1798+ spin_lock_init(&(adapter->int_lock));
1799+
1800+ status = be_register_isr(adapter, pnob);
1801+ if (status != 0)
1802+ goto cleanup;
1803+
1804+ adapter->rx_csum = 1;
1805+ adapter->max_rx_coal = BE_LRO_MAX_PKTS;
1806+
1807+ memset(&get_fwv, 0,
1808+ sizeof(struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD));
1809+ printk(KERN_INFO "BladeEngine Driver version:%s. "
1810+ "Copyright ServerEngines, Corporation 2005 - 2008\n",
1811+ be_drvr_ver);
1812+ status = be_function_get_fw_version(&pnob->fn_obj, &get_fwv, NULL,
1813+ NULL);
1814+ if (status == BE_SUCCESS) {
1815+ strncpy(be_fw_ver, get_fwv.firmware_version_string, 32);
1816+ printk(KERN_INFO "BladeEngine Firmware Version:%s\n",
1817+ get_fwv.firmware_version_string);
1818+ } else {
1819+ printk(KERN_WARNING "Unable to get BE Firmware Version\n");
1820+ }
1821+
1822+ sema_init(&adapter->get_eth_stat_sem, 0);
1823+ init_timer(&adapter->timer_ctxt.get_stats_timer);
1824+ atomic_set(&adapter->timer_ctxt.get_stat_flag, 0);
1825+ adapter->timer_ctxt.get_stats_timer.function =
1826+ &be_get_stats_timer_handler;
1827+
1828+ status = be_mcc_create(adapter);
1829+ if (status < 0)
1830+ goto cleanup;
1831+ status = be_mcc_init(adapter);
1832+ if (status < 0)
1833+ goto cleanup;
1834+
1835+
1836+ status = be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj,
1837+ be_link_status_async_callback, (void *)adapter);
1838+ if (status != BE_SUCCESS) {
1839+ printk(KERN_WARNING "add_async_event_callback failed");
1840+ printk(KERN_WARNING
1841+ "Link status changes may not be reflected\n");
1842+ }
1843+
1844+ status = register_netdev(netdev);
1845+ if (status != 0)
1846+ goto cleanup;
1847+ be_update_link_status(adapter);
1848+ adapter->dev_state = BE_DEV_STATE_INIT;
1849+ return 0;
1850+
1851+cleanup:
1852+ be_remove(pdev);
1853+ return status;
1854+error_adapter:
1855+ pci_release_regions(pdev);
1856+error_pci_req:
1857+ pci_disable_device(pdev);
1858+error:
1859+ printk(KERN_ERR "BladeEngine initalization failed\n");
1860+ return status;
1861+}
1862+
1863+/*
1864+ * Get the current link status and print the status on console
1865+ */
1866+void be_update_link_status(struct be_adapter *adapter)
1867+{
1868+ int status;
1869+ struct be_net_object *pnob = adapter->net_obj;
1870+
1871+ status = be_rxf_link_status(&pnob->fn_obj, adapter->be_link_sts, NULL,
1872+ NULL, NULL);
1873+ if (status == BE_SUCCESS) {
1874+ if (adapter->be_link_sts->mac0_speed &&
1875+ adapter->be_link_sts->mac0_duplex)
1876+ adapter->port0_link_sts = BE_PORT_LINK_UP;
1877+ else
1878+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
1879+
1880+ if (adapter->be_link_sts->mac1_speed &&
1881+ adapter->be_link_sts->mac1_duplex)
1882+ adapter->port1_link_sts = BE_PORT_LINK_UP;
1883+ else
1884+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
1885+
1886+ dev_info(&pnob->netdev->dev, "Link Properties:\n");
1887+ be_print_link_info(adapter->be_link_sts);
1888+ return;
1889+ }
1890+ dev_info(&pnob->netdev->dev, "Could not get link status\n");
1891+ return;
1892+}
1893+
1894+
1895+#ifdef CONFIG_PM
1896+static void
1897+be_pm_cleanup(struct be_adapter *adapter,
1898+ struct be_net_object *pnob, struct net_device *netdev)
1899+{
1900+ netif_carrier_off(netdev);
1901+ netif_stop_queue(netdev);
1902+
1903+ be_wait_nic_tx_cmplx_cmpl(pnob);
1904+ be_disable_eq_intr(pnob);
1905+
1906+ if (adapter->tasklet_started) {
1907+ tasklet_kill(&adapter->sts_handler);
1908+ adapter->tasklet_started = 0;
1909+ }
1910+
1911+ be_unregister_isr(adapter);
1912+ be_disable_intr(pnob);
1913+
1914+ be_tx_q_clean(pnob);
1915+ be_rx_q_clean(pnob);
1916+
1917+ be_destroy_netobj(pnob);
1918+}
1919+
1920+static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1921+{
1922+ struct be_adapter *adapter = pci_get_drvdata(pdev);
1923+ struct net_device *netdev = adapter->netdevp;
1924+ struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
1925+
1926+ adapter->dev_pm_state = adapter->dev_state;
1927+ adapter->dev_state = BE_DEV_STATE_SUSPEND;
1928+
1929+ netif_device_detach(netdev);
1930+ if (netif_running(netdev))
1931+ be_pm_cleanup(adapter, pnob, netdev);
1932+
1933+ pci_enable_wake(pdev, 3, 1);
1934+ pci_enable_wake(pdev, 4, 1); /* D3 Cold = 4 */
1935+ pci_save_state(pdev);
1936+ pci_disable_device(pdev);
1937+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
1938+ return 0;
1939+}
1940+
1941+static void be_up(struct be_adapter *adapter)
1942+{
1943+ struct be_net_object *pnob = adapter->net_obj;
1944+
1945+ if (pnob->num_vlans != 0)
1946+ be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
1947+ pnob->vlan_tag, NULL, NULL, NULL);
1948+
1949+}
1950+
1951+static int be_resume(struct pci_dev *pdev)
1952+{
1953+ int status = 0;
1954+ struct be_adapter *adapter = pci_get_drvdata(pdev);
1955+ struct net_device *netdev = adapter->netdevp;
1956+ struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
1957+
1958+ netif_device_detach(netdev);
1959+
1960+ status = pci_enable_device(pdev);
1961+ if (status)
1962+ return status;
1963+
1964+ pci_set_power_state(pdev, 0);
1965+ pci_restore_state(pdev);
1966+ pci_enable_wake(pdev, 3, 0);
1967+ pci_enable_wake(pdev, 4, 0); /* 4 is D3 cold */
1968+
1969+ netif_carrier_on(netdev);
1970+ netif_start_queue(netdev);
1971+
1972+ if (netif_running(netdev)) {
1973+ be_rxf_mac_address_read_write(&pnob->fn_obj, false, false,
1974+ false, true, false, netdev->dev_addr, NULL, NULL);
1975+
1976+ status = be_nob_ring_init(adapter, pnob);
1977+ if (status < 0)
1978+ return status;
1979+
1980+ tasklet_init(&(adapter->sts_handler), be_process_intr,
1981+ (unsigned long)adapter);
1982+ adapter->tasklet_started = 1;
1983+
1984+ if (be_register_isr(adapter, pnob) != 0) {
1985+ printk(KERN_ERR "be_register_isr failed\n");
1986+ return status;
1987+ }
1988+
1989+
1990+ status = be_mcc_init(adapter);
1991+ if (status < 0) {
1992+ printk(KERN_ERR "be_mcc_init failed\n");
1993+ return status;
1994+ }
1995+ be_update_link_status(adapter);
1996+ /*
1997+ * Register async call back function to handle link
1998+ * status updates
1999+ */
2000+ status = be_mcc_add_async_event_callback(
2001+ &adapter->net_obj->mcc_q_obj,
2002+ be_link_status_async_callback, (void *)adapter);
2003+ if (status != BE_SUCCESS) {
2004+ printk(KERN_WARNING "add_async_event_callback failed");
2005+ printk(KERN_WARNING
2006+ "Link status changes may not be reflected\n");
2007+ }
2008+ be_enable_intr(pnob);
2009+ be_enable_eq_intr(pnob);
2010+ be_up(adapter);
2011+ }
2012+ netif_device_attach(netdev);
2013+ adapter->dev_state = adapter->dev_pm_state;
2014+ return 0;
2015+
2016+}
2017+
2018+#endif
2019+
2020+/* Wait until no more pending transmits */
2021+void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *pnob)
2022+{
2023+ int i;
2024+
2025+ /* Wait for 20us * 50000 (= 1s) and no more */
2026+ i = 0;
2027+ while ((pnob->tx_q_tl != pnob->tx_q_hd) && (i < 50000)) {
2028+ ++i;
2029+ udelay(20);
2030+ }
2031+
2032+ /* Check for no more pending transmits */
2033+ if (i >= 50000) {
2034+ printk(KERN_WARNING
2035+ "Did not receive completions for all TX requests\n");
2036+ }
2037+}
2038+
2039+static struct pci_driver be_driver = {
2040+ .name = be_driver_name,
2041+ .id_table = be_device_id_table,
2042+ .probe = be_probe,
2043+#ifdef CONFIG_PM
2044+ .suspend = be_suspend,
2045+ .resume = be_resume,
2046+#endif
2047+ .remove = be_remove
2048+};
2049+
2050+/*
2051+ * Module init entry point. Registers our our device and return.
2052+ * Our probe will be called if the device is found.
2053+ */
2054+static int __init be_init_module(void)
2055+{
2056+ int ret;
2057+
2058+ if (rxbuf_size != 8192 && rxbuf_size != 4096 && rxbuf_size != 2048) {
2059+ printk(KERN_WARNING
2060+ "Unsupported receive buffer size (%d) requested\n",
2061+ rxbuf_size);
2062+ printk(KERN_WARNING
2063+ "Must be 2048, 4096 or 8192. Defaulting to 2048\n");
2064+ rxbuf_size = 2048;
2065+ }
2066+
2067+ ret = pci_register_driver(&be_driver);
2068+
2069+ return ret;
2070+}
2071+
2072+module_init(be_init_module);
2073+
2074+/*
2075+ * be_exit_module - Driver Exit Cleanup Routine
2076+ */
2077+static void __exit be_exit_module(void)
2078+{
2079+ pci_unregister_driver(&be_driver);
2080+}
2081+
2082+module_exit(be_exit_module);
2083--- /dev/null
2084+++ b/drivers/staging/benet/be_int.c
2085@@ -0,0 +1,872 @@
2086+/*
2087+ * Copyright (C) 2005 - 2008 ServerEngines
2088+ * All rights reserved.
2089+ *
2090+ * This program is free software; you can redistribute it and/or
2091+ * modify it under the terms of the GNU General Public License version 2
2092+ * as published by the Free Software Foundation. The full GNU General
2093+ * Public License is included in this distribution in the file called COPYING.
2094+ *
2095+ * Contact Information:
2096+ * linux-drivers@serverengines.com
2097+ *
2098+ * ServerEngines
2099+ * 209 N. Fair Oaks Ave
2100+ * Sunnyvale, CA 94085
2101+ */
2102+#include <linux/if_vlan.h>
2103+#include <linux/inet_lro.h>
2104+
2105+#include "benet.h"
2106+
2107+/* number of bytes of RX frame that are copied to skb->data */
2108+#define BE_HDR_LEN 64
2109+
2110+#define NETIF_RX(skb) netif_receive_skb(skb)
2111+#define VLAN_ACCEL_RX(skb, pnob, vt) \
2112+ vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
2113+
2114+/*
2115+ This function notifies BladeEngine of the number of completion
2116+ entries processed from the specified completion queue by writing
2117+ the number of popped entries to the door bell.
2118+
2119+ pnob - Pointer to the NetObject structure
2120+ n - Number of completion entries processed
2121+ cq_id - Queue ID of the completion queue for which notification
2122+ is being done.
2123+ re_arm - 1 - rearm the completion ring to generate an event.
2124+ - 0 - dont rearm the completion ring to generate an event
2125+*/
2126+void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
2127+{
2128+ struct CQ_DB_AMAP cqdb;
2129+
2130+ cqdb.dw[0] = 0;
2131+ AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
2132+ AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
2133+ AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
2134+ PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
2135+}
2136+
2137+/*
2138+ * adds additional receive frags indicated by BE starting from given
2139+ * frag index (fi) to specified skb's frag list
2140+ */
2141+static void
2142+add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
2143+ u32 nresid, u32 fi)
2144+{
2145+ struct be_adapter *adapter = pnob->adapter;
2146+ u32 sk_frag_idx, n;
2147+ struct be_rx_page_info *rx_page_info;
2148+ u32 frag_sz = pnob->rx_buf_size;
2149+
2150+ sk_frag_idx = skb_shinfo(skb)->nr_frags;
2151+ while (nresid) {
2152+ index_inc(&fi, pnob->rx_q_len);
2153+
2154+ rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
2155+ pnob->rx_ctxt[fi] = NULL;
2156+ if ((rx_page_info->page_offset) ||
2157+ (pnob->rx_pg_shared == false)) {
2158+ pci_unmap_page(adapter->pdev,
2159+ pci_unmap_addr(rx_page_info, bus),
2160+ frag_sz, PCI_DMA_FROMDEVICE);
2161+ }
2162+
2163+ n = min(nresid, frag_sz);
2164+ skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
2165+ skb_shinfo(skb)->frags[sk_frag_idx].page_offset
2166+ = rx_page_info->page_offset;
2167+ skb_shinfo(skb)->frags[sk_frag_idx].size = n;
2168+
2169+ sk_frag_idx++;
2170+ skb->len += n;
2171+ skb->data_len += n;
2172+ skb_shinfo(skb)->nr_frags++;
2173+ nresid -= n;
2174+
2175+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2176+ atomic_dec(&pnob->rx_q_posted);
2177+ }
2178+}
2179+
2180+/*
2181+ * This function processes incoming nic packets over various Rx queues.
2182+ * This function takes the adapter, the current Rx status descriptor
2183+ * entry and the Rx completion queue ID as argument.
2184+ */
2185+static inline int process_nic_rx_completion(struct be_net_object *pnob,
2186+ struct ETH_RX_COMPL_AMAP *rxcp)
2187+{
2188+ struct be_adapter *adapter = pnob->adapter;
2189+ struct sk_buff *skb;
2190+ int udpcksm, tcpcksm;
2191+ int n;
2192+ u32 nresid, fi;
2193+ u32 frag_sz = pnob->rx_buf_size;
2194+ u8 *va;
2195+ struct be_rx_page_info *rx_page_info;
2196+ u32 numfrags, vtp, vtm, vlan_tag, pktsize;
2197+
2198+ fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
2199+ BUG_ON(fi >= (int)pnob->rx_q_len);
2200+ BUG_ON(fi < 0);
2201+
2202+ rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
2203+ BUG_ON(!rx_page_info->page);
2204+ pnob->rx_ctxt[fi] = NULL;
2205+
2206+ /*
2207+ * If one page is used per fragment or if this is the second half of
2208+ * of the page, unmap the page here
2209+ */
2210+ if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
2211+ pci_unmap_page(adapter->pdev,
2212+ pci_unmap_addr(rx_page_info, bus), frag_sz,
2213+ PCI_DMA_FROMDEVICE);
2214+ }
2215+
2216+ atomic_dec(&pnob->rx_q_posted);
2217+ udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
2218+ tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
2219+ pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
2220+ /*
2221+ * get rid of RX flush completions first.
2222+ */
2223+ if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
2224+ put_page(rx_page_info->page);
2225+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2226+ return 0;
2227+ }
2228+ skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
2229+ if (skb == NULL) {
2230+ dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
2231+ put_page(rx_page_info->page);
2232+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2233+ goto free_frags;
2234+ }
2235+ skb_reserve(skb, NET_IP_ALIGN);
2236+
2237+ skb->dev = pnob->netdev;
2238+
2239+ n = min(pktsize, frag_sz);
2240+
2241+ va = page_address(rx_page_info->page) + rx_page_info->page_offset;
2242+ prefetch(va);
2243+
2244+ skb->len = skb->data_len = n;
2245+ if (n <= BE_HDR_LEN) {
2246+ memcpy(skb->data, va, n);
2247+ put_page(rx_page_info->page);
2248+ skb->data_len -= n;
2249+ skb->tail += n;
2250+ } else {
2251+
2252+ /* Setup the SKB with page buffer information */
2253+ skb_shinfo(skb)->frags[0].page = rx_page_info->page;
2254+ skb_shinfo(skb)->nr_frags++;
2255+
2256+ /* Copy the header into the skb_data */
2257+ memcpy(skb->data, va, BE_HDR_LEN);
2258+ skb_shinfo(skb)->frags[0].page_offset =
2259+ rx_page_info->page_offset + BE_HDR_LEN;
2260+ skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
2261+ skb->data_len -= BE_HDR_LEN;
2262+ skb->tail += BE_HDR_LEN;
2263+ }
2264+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2265+ nresid = pktsize - n;
2266+
2267+ skb->protocol = eth_type_trans(skb, pnob->netdev);
2268+
2269+ if ((tcpcksm || udpcksm) && adapter->rx_csum)
2270+ skb->ip_summed = CHECKSUM_UNNECESSARY;
2271+ else
2272+ skb->ip_summed = CHECKSUM_NONE;
2273+ /*
2274+ * if we have more bytes left, the frame has been
2275+ * given to us in multiple fragments. This happens
2276+ * with Jumbo frames. Add the remaining fragments to
2277+ * skb->frags[] array.
2278+ */
2279+ if (nresid)
2280+ add_skb_frags(pnob, skb, nresid, fi);
2281+
2282+ /* update the the true size of the skb. */
2283+ skb->truesize = skb->len + sizeof(struct sk_buff);
2284+
2285+ /*
2286+ * If a 802.3 frame or 802.2 LLC frame
2287+ * (i.e) contains length field in MAC Hdr
2288+ * and frame len is greater than 64 bytes
2289+ */
2290+ if (((skb->protocol == ntohs(ETH_P_802_2)) ||
2291+ (skb->protocol == ntohs(ETH_P_802_3)))
2292+ && (pktsize > BE_HDR_LEN)) {
2293+ /*
2294+ * If the length given in Mac Hdr is less than frame size
2295+ * Erraneous frame, Drop it
2296+ */
2297+ if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
2298+ /* Increment Non Ether type II frames dropped */
2299+ adapter->be_stat.bes_802_3_dropped_frames++;
2300+
2301+ kfree_skb(skb);
2302+ return 0;
2303+ }
2304+ /*
2305+ * else if the length given in Mac Hdr is greater than
2306+ * frame size, should not be seeing this sort of frames
2307+ * dump the pkt and pass to stack
2308+ */
2309+ else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
2310+ /* Increment Non Ether type II frames malformed */
2311+ adapter->be_stat.bes_802_3_malformed_frames++;
2312+ }
2313+ }
2314+
2315+ vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
2316+ vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
2317+ if (vtp && vtm) {
2318+ /* Vlan tag present in pkt and BE found
2319+ * that the tag matched an entry in VLAN table
2320+ */
2321+ if (!pnob->vlan_grp || pnob->num_vlans == 0) {
2322+ /* But we have no VLANs configured.
2323+ * This should never happen. Drop the packet.
2324+ */
2325+ dev_info(&pnob->netdev->dev,
2326+ "BladeEngine: Unexpected vlan tagged packet\n");
2327+ kfree_skb(skb);
2328+ return 0;
2329+ }
2330+ /* pass the VLAN packet to stack */
2331+ vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
2332+ VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
2333+
2334+ } else {
2335+ NETIF_RX(skb);
2336+ }
2337+ return 0;
2338+
2339+free_frags:
2340+ /* free all frags associated with the current rxcp */
2341+ numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
2342+ while (numfrags-- > 1) {
2343+ index_inc(&fi, pnob->rx_q_len);
2344+
2345+ rx_page_info = (struct be_rx_page_info *)
2346+ pnob->rx_ctxt[fi];
2347+ pnob->rx_ctxt[fi] = (void *)NULL;
2348+ if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
2349+ pci_unmap_page(adapter->pdev,
2350+ pci_unmap_addr(rx_page_info, bus),
2351+ frag_sz, PCI_DMA_FROMDEVICE);
2352+ }
2353+
2354+ put_page(rx_page_info->page);
2355+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2356+ atomic_dec(&pnob->rx_q_posted);
2357+ }
2358+ return -ENOMEM;
2359+}
2360+
2361+static void process_nic_rx_completion_lro(struct be_net_object *pnob,
2362+ struct ETH_RX_COMPL_AMAP *rxcp)
2363+{
2364+ struct be_adapter *adapter = pnob->adapter;
2365+ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
2366+ unsigned int udpcksm, tcpcksm;
2367+ u32 numfrags, vlanf, vtm, vlan_tag, nresid;
2368+ u16 vlant;
2369+ unsigned int fi, idx, n;
2370+ struct be_rx_page_info *rx_page_info;
2371+ u32 frag_sz = pnob->rx_buf_size, pktsize;
2372+ bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
2373+ u8 err, *va;
2374+ __wsum csum = 0;
2375+
2376+ if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
2377+ /* Drop the pkt and move to the next completion. */
2378+ adapter->be_stat.bes_rx_misc_pkts++;
2379+ return;
2380+ }
2381+ err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
2382+ if (err || !rx_coal) {
2383+ /* We won't coalesce Rx pkts if the err bit set.
2384+ * take the path of normal completion processing */
2385+ process_nic_rx_completion(pnob, rxcp);
2386+ return;
2387+ }
2388+
2389+ fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
2390+ BUG_ON(fi >= (int)pnob->rx_q_len);
2391+ BUG_ON(fi < 0);
2392+ rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
2393+ BUG_ON(!rx_page_info->page);
2394+ pnob->rx_ctxt[fi] = (void *)NULL;
2395+ /* If one page is used per fragment or if this is the
2396+ * second half of the page, unmap the page here
2397+ */
2398+ if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
2399+ pci_unmap_page(adapter->pdev,
2400+ pci_unmap_addr(rx_page_info, bus),
2401+ frag_sz, PCI_DMA_FROMDEVICE);
2402+ }
2403+
2404+ numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
2405+ udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
2406+ tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
2407+ vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
2408+ vlant = be16_to_cpu(vlan_tag);
2409+ vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
2410+ vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
2411+ pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
2412+
2413+ atomic_dec(&pnob->rx_q_posted);
2414+
2415+ if (tcpcksm && udpcksm && pktsize == 32) {
2416+ /* flush completion entries */
2417+ put_page(rx_page_info->page);
2418+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2419+ return;
2420+ }
2421+ /* Only one of udpcksum and tcpcksum can be set */
2422+ BUG_ON(udpcksm && tcpcksm);
2423+
2424+ /* jumbo frames could come in multiple fragments */
2425+ BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
2426+ n = min(pktsize, frag_sz);
2427+ nresid = pktsize - n; /* will be useful for jumbo pkts */
2428+ idx = 0;
2429+
2430+ va = page_address(rx_page_info->page) + rx_page_info->page_offset;
2431+ prefetch(va);
2432+ rx_frags[idx].page = rx_page_info->page;
2433+ rx_frags[idx].page_offset = (rx_page_info->page_offset);
2434+ rx_frags[idx].size = n;
2435+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2436+
2437+ /* If we got multiple fragments, we have more data. */
2438+ while (nresid) {
2439+ idx++;
2440+ index_inc(&fi, pnob->rx_q_len);
2441+
2442+ rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
2443+ pnob->rx_ctxt[fi] = (void *)NULL;
2444+ if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
2445+ pci_unmap_page(adapter->pdev,
2446+ pci_unmap_addr(rx_page_info, bus),
2447+ frag_sz, PCI_DMA_FROMDEVICE);
2448+ }
2449+
2450+ n = min(nresid, frag_sz);
2451+ rx_frags[idx].page = rx_page_info->page;
2452+ rx_frags[idx].page_offset = (rx_page_info->page_offset);
2453+ rx_frags[idx].size = n;
2454+
2455+ nresid -= n;
2456+ memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
2457+ atomic_dec(&pnob->rx_q_posted);
2458+ }
2459+
2460+ if (likely(!(vlanf && vtm))) {
2461+ lro_receive_frags(&pnob->lro_mgr, rx_frags,
2462+ pktsize, pktsize,
2463+ (void *)(unsigned long)csum, csum);
2464+ } else {
2465+ /* Vlan tag present in pkt and BE found
2466+ * that the tag matched an entry in VLAN table
2467+ */
2468+ if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
2469+ /* But we have no VLANs configured.
2470+ * This should never happen. Drop the packet.
2471+ */
2472+ dev_info(&pnob->netdev->dev,
2473+ "BladeEngine: Unexpected vlan tagged packet\n");
2474+ return;
2475+ }
2476+ /* pass the VLAN packet to stack */
2477+ lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
2478+ rx_frags, pktsize, pktsize,
2479+ pnob->vlan_grp, vlant,
2480+ (void *)(unsigned long)csum,
2481+ csum);
2482+ }
2483+
2484+ adapter->be_stat.bes_rx_coal++;
2485+}
2486+
2487+struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
2488+{
2489+ struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
2490+ u32 valid, ct;
2491+
2492+ valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
2493+ if (valid == 0)
2494+ return NULL;
2495+
2496+ ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
2497+ if (ct != 0) {
2498+ /* Invalid chute #. treat as error */
2499+ AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
2500+ }
2501+
2502+ be_adv_rxcq_tl(pnob);
2503+ AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
2504+ return rxcp;
2505+}
2506+
2507+static void update_rx_rate(struct be_adapter *adapter)
2508+{
2509+ /* update the rate once in two seconds */
2510+ if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
2511+ u32 r;
2512+ r = adapter->eth_rx_bytes /
2513+ ((jiffies - adapter->eth_rx_jiffies) / (HZ));
2514+ r = (r / 1000000); /* MB/Sec */
2515+
2516+ /* Mega Bits/Sec */
2517+ adapter->be_stat.bes_eth_rx_rate = (r * 8);
2518+ adapter->eth_rx_jiffies = jiffies;
2519+ adapter->eth_rx_bytes = 0;
2520+ }
2521+}
2522+
2523+static int process_rx_completions(struct be_net_object *pnob, int max_work)
2524+{
2525+ struct be_adapter *adapter = pnob->adapter;
2526+ struct ETH_RX_COMPL_AMAP *rxcp;
2527+ u32 nc = 0;
2528+ unsigned int pktsize;
2529+
2530+ while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
2531+ prefetch(rxcp);
2532+ pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
2533+ process_nic_rx_completion_lro(pnob, rxcp);
2534+ adapter->eth_rx_bytes += pktsize;
2535+ update_rx_rate(adapter);
2536+ nc++;
2537+ max_work--;
2538+ adapter->be_stat.bes_rx_compl++;
2539+ }
2540+ if (likely(adapter->max_rx_coal > 1)) {
2541+ adapter->be_stat.bes_rx_flush++;
2542+ lro_flush_all(&pnob->lro_mgr);
2543+ }
2544+
2545+ /* Refill the queue */
2546+ if (atomic_read(&pnob->rx_q_posted) < 900)
2547+ be_post_eth_rx_buffs(pnob);
2548+
2549+ return nc;
2550+}
2551+
2552+static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
2553+{
2554+ struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
2555+ u32 valid;
2556+
2557+ valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
2558+ if (valid == 0)
2559+ return NULL;
2560+
2561+ AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
2562+ be_adv_txcq_tl(pnob);
2563+ return txcp;
2564+
2565+}
2566+
2567+void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
2568+{
2569+ struct be_adapter *adapter = pnob->adapter;
2570+ int cur_index, tx_wrbs_completed = 0;
2571+ struct sk_buff *skb;
2572+ u64 busaddr, pa, pa_lo, pa_hi;
2573+ struct ETH_WRB_AMAP *wrb;
2574+ u32 frag_len, last_index, j;
2575+
2576+ last_index = tx_compl_lastwrb_idx_get(pnob);
2577+ BUG_ON(last_index != end_idx);
2578+ pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
2579+ do {
2580+ cur_index = pnob->tx_q_tl;
2581+ wrb = &pnob->tx_q[cur_index];
2582+ pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
2583+ pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
2584+ frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
2585+ busaddr = (pa_hi << 32) | pa_lo;
2586+ if (busaddr != 0) {
2587+ pa = le64_to_cpu(busaddr);
2588+ pci_unmap_single(adapter->pdev, pa,
2589+ frag_len, PCI_DMA_TODEVICE);
2590+ }
2591+ if (cur_index == last_index) {
2592+ skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
2593+ BUG_ON(!skb);
2594+ for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
2595+ struct skb_frag_struct *frag;
2596+ frag = &skb_shinfo(skb)->frags[j];
2597+ pci_unmap_page(adapter->pdev,
2598+ (ulong) frag->page, frag->size,
2599+ PCI_DMA_TODEVICE);
2600+ }
2601+ kfree_skb(skb);
2602+ pnob->tx_ctxt[cur_index] = NULL;
2603+ } else {
2604+ BUG_ON(pnob->tx_ctxt[cur_index]);
2605+ }
2606+ tx_wrbs_completed++;
2607+ be_adv_txq_tl(pnob);
2608+ } while (cur_index != last_index);
2609+ atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
2610+}
2611+
2612+/* there is no need to take an SMP lock here since currently
2613+ * we have only one instance of the tasklet that does completion
2614+ * processing.
2615+ */
2616+static void process_nic_tx_completions(struct be_net_object *pnob)
2617+{
2618+ struct be_adapter *adapter = pnob->adapter;
2619+ struct ETH_TX_COMPL_AMAP *txcp;
2620+ struct net_device *netdev = pnob->netdev;
2621+ u32 end_idx, num_processed = 0;
2622+
2623+ adapter->be_stat.bes_tx_events++;
2624+
2625+ while ((txcp = be_get_tx_cmpl(pnob))) {
2626+ end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
2627+ process_one_tx_compl(pnob, end_idx);
2628+ num_processed++;
2629+ adapter->be_stat.bes_tx_compl++;
2630+ }
2631+ be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
2632+ /*
2633+ * We got Tx completions and have usable WRBs.
2634+ * If the netdev's queue has been stopped
2635+ * because we had run out of WRBs, wake it now.
2636+ */
2637+ spin_lock(&adapter->txq_lock);
2638+ if (netif_queue_stopped(netdev)
2639+ && atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
2640+ netif_wake_queue(netdev);
2641+ }
2642+ spin_unlock(&adapter->txq_lock);
2643+}
2644+
2645+static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
2646+{
2647+ u32 nposted = 0;
2648+ struct ETH_RX_D_AMAP *rxd = NULL;
2649+ struct be_recv_buffer *rxbp;
2650+ void **rx_ctxp;
2651+ struct RQ_DB_AMAP rqdb;
2652+
2653+ rx_ctxp = pnob->rx_ctxt;
2654+
2655+ while (!list_empty(rxbl) &&
2656+ (rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
2657+
2658+ rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
2659+ list_del(&rxbp->rxb_list);
2660+ rxd = pnob->rx_q + pnob->rx_q_hd;
2661+ AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
2662+ AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
2663+
2664+ rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
2665+ be_adv_rxq_hd(pnob);
2666+ nposted++;
2667+ }
2668+
2669+ if (nposted) {
2670+ /* Now press the door bell to notify BladeEngine. */
2671+ rqdb.dw[0] = 0;
2672+ AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
2673+ AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
2674+ PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
2675+ }
2676+ atomic_add(nposted, &pnob->rx_q_posted);
2677+ return nposted;
2678+}
2679+
2680+void be_post_eth_rx_buffs(struct be_net_object *pnob)
2681+{
2682+ struct be_adapter *adapter = pnob->adapter;
2683+ u32 num_bufs, r;
2684+ u64 busaddr = 0, tmp_pa;
2685+ u32 max_bufs, pg_hd;
2686+ u32 frag_size;
2687+ struct be_recv_buffer *rxbp;
2688+ struct list_head rxbl;
2689+ struct be_rx_page_info *rx_page_info;
2690+ struct page *page = NULL;
2691+ u32 page_order = 0;
2692+ gfp_t alloc_flags = GFP_ATOMIC;
2693+
2694+ BUG_ON(!adapter);
2695+
2696+ max_bufs = 64; /* should be even # <= 255. */
2697+
2698+ frag_size = pnob->rx_buf_size;
2699+ page_order = get_order(frag_size);
2700+
2701+ if (frag_size == 8192)
2702+ alloc_flags |= (gfp_t) __GFP_COMP;
2703+ /*
2704+ * Form a linked list of RECV_BUFFFER structure to be be posted.
2705+ * We will post even number of buffer so that pages can be
2706+ * shared.
2707+ */
2708+ INIT_LIST_HEAD(&rxbl);
2709+
2710+ for (num_bufs = 0; num_bufs < max_bufs; ++num_bufs) {
2711+
2712+ rxbp = &pnob->eth_rx_bufs[num_bufs];
2713+ pg_hd = pnob->rx_pg_info_hd;
2714+ rx_page_info = &pnob->rx_page_info[pg_hd];
2715+
2716+ if (!page) {
2717+ /*
2718+ * before we allocate a page make sure that we
2719+ * have space in the RX queue to post the buffer.
2720+ * We check for two vacant slots since with
2721+ * 2K frags, we will need two slots.
2722+ */
2723+ if ((pnob->rx_ctxt[(pnob->rx_q_hd + num_bufs) &
2724+ (pnob->rx_q_len - 1)] != NULL)
2725+ || (pnob->rx_ctxt[(pnob->rx_q_hd + num_bufs + 1) %
2726+ pnob->rx_q_len] != NULL)) {
2727+ break;
2728+ }
2729+ page = alloc_pages(alloc_flags, page_order);
2730+ if (unlikely(page == NULL)) {
2731+ adapter->be_stat.bes_ethrx_post_fail++;
2732+ pnob->rxbuf_post_fail++;
2733+ break;
2734+ }
2735+ pnob->rxbuf_post_fail = 0;
2736+ busaddr = pci_map_page(adapter->pdev, page, 0,
2737+ frag_size, PCI_DMA_FROMDEVICE);
2738+ rx_page_info->page_offset = 0;
2739+ rx_page_info->page = page;
2740+ /*
2741+ * If we are sharing a page among two skbs,
2742+ * alloc a new one on the next iteration
2743+ */
2744+ if (pnob->rx_pg_shared == false)
2745+ page = NULL;
2746+ } else {
2747+ get_page(page);
2748+ rx_page_info->page_offset += frag_size;
2749+ rx_page_info->page = page;
2750+ /*
2751+ * We are finished with the alloced page,
2752+ * Alloc a new one on the next iteration
2753+ */
2754+ page = NULL;
2755+ }
2756+ rxbp->rxb_ctxt = (void *)rx_page_info;
2757+ index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
2758+
2759+ pci_unmap_addr_set(rx_page_info, bus, busaddr);
2760+ tmp_pa = busaddr + rx_page_info->page_offset;
2761+ rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
2762+ rxbp->rxb_pa_hi = (tmp_pa >> 32);
2763+ rxbp->rxb_len = frag_size;
2764+ list_add_tail(&rxbp->rxb_list, &rxbl);
2765+ } /* End of for */
2766+
2767+ r = post_rx_buffs(pnob, &rxbl);
2768+ BUG_ON(r != num_bufs);
2769+ return;
2770+}
2771+
2772+/*
2773+ * Interrupt service for network function. We just schedule the
2774+ * tasklet which does all completion processing.
2775+ */
2776+irqreturn_t be_int(int irq, void *dev)
2777+{
2778+ struct net_device *netdev = dev;
2779+ struct be_net_object *pnob = (struct be_net_object *)(netdev->priv);
2780+ struct be_adapter *adapter = pnob->adapter;
2781+ u32 isr;
2782+
2783+ isr = CSR_READ(&pnob->fn_obj, cev.isr1);
2784+ if (unlikely(!isr))
2785+ return IRQ_NONE;
2786+
2787+ spin_lock(&adapter->int_lock);
2788+ adapter->isr |= isr;
2789+ spin_unlock(&adapter->int_lock);
2790+
2791+ adapter->be_stat.bes_ints++;
2792+
2793+ tasklet_schedule(&adapter->sts_handler);
2794+ return IRQ_HANDLED;
2795+}
2796+
2797+/*
2798+ * Poll function called by NAPI with a work budget.
2799+ * We process as many UC. BC and MC receive completions
2800+ * as the budget allows and return the actual number of
2801+ * RX ststutses processed.
2802+ */
2803+int be_poll(struct napi_struct *napi, int budget)
2804+{
2805+ struct be_net_object *pnob = container_of(napi, struct be_net_object, napi);
2806+ u32 work_done;
2807+
2808+ pnob->adapter->be_stat.bes_polls++;
2809+ work_done = process_rx_completions(pnob, budget);
2810+ BUG_ON(work_done > budget);
2811+
2812+ /* All consumed */
2813+ if (work_done < budget) {
2814+ netif_rx_complete(pnob->netdev, napi);
2815+ /* enable intr */
2816+ be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
2817+ } else {
2818+ /* More to be consumed; continue with interrupts disabled */
2819+ be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
2820+ }
2821+ return work_done;
2822+}
2823+
2824+static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
2825+{
2826+ struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
2827+ if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
2828+ return NULL;
2829+ be_adv_eq_tl(pnob);
2830+ return eqp;
2831+}
2832+
2833+/*
2834+ * Processes all valid events in the event ring associated with given
2835+ * NetObject. Also, notifies BE the number of events processed.
2836+ */
2837+static inline u32 process_events(struct be_net_object *pnob)
2838+{
2839+ struct be_adapter *adapter = pnob->adapter;
2840+ struct EQ_ENTRY_AMAP *eqp;
2841+ u32 rid, num_events = 0;
2842+ struct net_device *netdev = pnob->netdev;
2843+
2844+ while ((eqp = get_event(pnob)) != NULL) {
2845+ adapter->be_stat.bes_events++;
2846+ rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
2847+ if (rid == pnob->rx_cq_id) {
2848+ adapter->be_stat.bes_rx_events++;
2849+ netif_rx_schedule(netdev, &pnob->napi);
2850+ } else if (rid == pnob->tx_cq_id) {
2851+ process_nic_tx_completions(pnob);
2852+ } else if (rid == pnob->mcc_cq_id) {
2853+ be_mcc_process_cq(&pnob->mcc_q_obj, 1);
2854+ } else {
2855+ dev_info(&netdev->dev,
2856+ "Invalid EQ ResourceID %d\n", rid);
2857+ }
2858+ AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
2859+ AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
2860+ num_events++;
2861+ }
2862+ return num_events;
2863+}
2864+
2865+static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
2866+{
2867+ int status;
2868+ struct be_eq_object *eq_objectp;
2869+
2870+ /* update once a second */
2871+ if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
2872+ /* One second elapsed since last update */
2873+ u32 r, new_eqd = -1;
2874+ r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
2875+ r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
2876+ adapter->be_stat.bes_ips = r;
2877+ adapter->ips_jiffies = jiffies;
2878+ adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
2879+ if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
2880+ new_eqd = (adapter->cur_eqd + 8);
2881+ if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
2882+ new_eqd = (adapter->cur_eqd - 8);
2883+ if (adapter->enable_aic && new_eqd != -1) {
2884+ eq_objectp = &pnob->event_q_obj;
2885+ status = be_eq_modify_delay(&pnob->fn_obj, 1,
2886+ &eq_objectp, &new_eqd, NULL,
2887+ NULL, NULL);
2888+ if (status == BE_SUCCESS)
2889+ adapter->cur_eqd = new_eqd;
2890+ }
2891+ }
2892+}
2893+
2894+/*
2895+ This function notifies BladeEngine of how many events were processed
2896+ from the event queue by ringing the corresponding door bell and
2897+ optionally re-arms the event queue.
2898+ n - number of events processed
2899+ re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ
2900+
2901+*/
2902+static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
2903+{
2904+ struct CQ_DB_AMAP eqdb;
2905+ eqdb.dw[0] = 0;
2906+
2907+ AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
2908+ AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
2909+ AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
2910+ AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
2911+ /*
2912+ * Under some situations we see an interrupt and no valid
2913+ * EQ entry. To keep going, we need to ring the DB even if
2914+ * numPOsted is 0.
2915+ */
2916+ PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
2917+ return;
2918+}
2919+
2920+/*
2921+ * Called from the tasklet scheduled by ISR. All real interrupt processing
2922+ * is done here.
2923+ */
2924+void be_process_intr(unsigned long context)
2925+{
2926+ struct be_adapter *adapter = (struct be_adapter *)context;
2927+ struct be_net_object *pnob = adapter->net_obj;
2928+ u32 isr, n;
2929+ ulong flags = 0;
2930+
2931+ isr = adapter->isr;
2932+
2933+ /*
2934+ * we create only one NIC event queue in Linux. Event is
2935+ * expected only in the first event queue
2936+ */
2937+ BUG_ON(isr & 0xfffffffe);
2938+ if ((isr & 1) == 0)
2939+ return; /* not our interrupt */
2940+ n = process_events(pnob);
2941+ /*
2942+ * Clear the event bit. adapter->isr is set by
2943+ * hard interrupt. Prevent race with lock.
2944+ */
2945+ spin_lock_irqsave(&adapter->int_lock, flags);
2946+ adapter->isr &= ~1;
2947+ spin_unlock_irqrestore(&adapter->int_lock, flags);
2948+ be_notify_event(pnob, n, 1);
2949+ /*
2950+ * If previous allocation attempts had failed and
2951+ * BE has used up all posted buffers, post RX buffers here
2952+ */
2953+ if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
2954+ be_post_eth_rx_buffs(pnob);
2955+ update_eqd(adapter, pnob);
2956+ return;
2957+}
2958--- /dev/null
2959+++ b/drivers/staging/benet/benet.h
2960@@ -0,0 +1,429 @@
2961+/*
2962+ * Copyright (C) 2005 - 2008 ServerEngines
2963+ * All rights reserved.
2964+ *
2965+ * This program is free software; you can redistribute it and/or
2966+ * modify it under the terms of the GNU General Public License version 2
2967+ * as published by the Free Software Foundation. The full GNU General
2968+ * Public License is included in this distribution in the file called COPYING.
2969+ *
2970+ * Contact Information:
2971+ * linux-drivers@serverengines.com
2972+ *
2973+ * ServerEngines
2974+ * 209 N. Fair Oaks Ave
2975+ * Sunnyvale, CA 94085
2976+ */
2977+#ifndef _BENET_H_
2978+#define _BENET_H_
2979+
2980+#include <linux/pci.h>
2981+#include <linux/netdevice.h>
2982+#include <linux/inet_lro.h>
2983+#include "hwlib.h"
2984+
2985+#define _SA_MODULE_NAME "net-driver"
2986+
2987+#define VLAN_VALID_BIT 0x8000
2988+#define BE_NUM_VLAN_SUPPORTED 32
2989+#define BE_PORT_LINK_DOWN 0000
2990+#define BE_PORT_LINK_UP 0001
2991+#define BE_MAX_TX_FRAG_COUNT (30)
2992+
2993+/* Flag bits for send operation */
2994+#define IPCS (1 << 0) /* Enable IP checksum offload */
2995+#define UDPCS (1 << 1) /* Enable UDP checksum offload */
2996+#define TCPCS (1 << 2) /* Enable TCP checksum offload */
2997+#define LSO (1 << 3) /* Enable Large Segment offload */
2998+#define ETHVLAN (1 << 4) /* Enable VLAN insert */
2999+#define ETHEVENT (1 << 5) /* Generate event on completion */
3000+#define ETHCOMPLETE (1 << 6) /* Generate completion when done */
3001+#define IPSEC (1 << 7) /* Enable IPSEC */
3002+#define FORWARD (1 << 8) /* Send the packet in forwarding path */
3003+#define FIN (1 << 9) /* Issue FIN segment */
3004+
3005+#define BE_MAX_MTU 8974
3006+
3007+#define BE_MAX_LRO_DESCRIPTORS 8
3008+#define BE_LRO_MAX_PKTS 64
3009+#define BE_MAX_FRAGS_PER_FRAME 6
3010+
3011+extern const char be_drvr_ver[];
3012+extern char be_fw_ver[];
3013+extern char be_driver_name[];
3014+
3015+extern struct ethtool_ops be_ethtool_ops;
3016+
3017+#define BE_DEV_STATE_NONE 0
3018+#define BE_DEV_STATE_INIT 1
3019+#define BE_DEV_STATE_OPEN 2
3020+#define BE_DEV_STATE_SUSPEND 3
3021+
3022+/* This structure is used to describe physical fragments to use
3023+ * for DMAing data from NIC.
3024+ */
3025+struct be_recv_buffer {
3026+ struct list_head rxb_list; /* for maintaining a linked list */
3027+ void *rxb_va; /* buffer virtual address */
3028+ u32 rxb_pa_lo; /* low part of physical address */
3029+ u32 rxb_pa_hi; /* high part of physical address */
3030+ u32 rxb_len; /* length of recv buffer */
3031+ void *rxb_ctxt; /* context for OSM driver to use */
3032+};
3033+
3034+/*
3035+ * fragment list to describe scattered data.
3036+ */
3037+struct be_tx_frag_list {
3038+ u32 txb_len; /* Size of this fragment */
3039+ u32 txb_pa_lo; /* Lower 32 bits of 64 bit physical addr */
3040+ u32 txb_pa_hi; /* Higher 32 bits of 64 bit physical addr */
3041+};
3042+
3043+struct be_rx_page_info {
3044+ struct page *page;
3045+ dma_addr_t bus;
3046+ u16 page_offset;
3047+};
3048+
3049+/*
3050+ * This structure is the main tracking structure for a NIC interface.
3051+ */
3052+struct be_net_object {
3053+ /* MCC Ring - used to send fwcmds to embedded ARM processor */
3054+ struct MCC_WRB_AMAP *mcc_q; /* VA of the start of the ring */
3055+ u32 mcc_q_len; /* # of WRB entries in this ring */
3056+ u32 mcc_q_size;
3057+ u32 mcc_q_hd; /* MCC ring head */
3058+ u8 mcc_q_created; /* flag to help cleanup */
3059+ struct be_mcc_object mcc_q_obj; /* BECLIB's MCC ring Object */
3060+ dma_addr_t mcc_q_bus; /* DMA'ble bus address */
3061+
3062+ /* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */
3063+ struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */
3064+ u32 mcc_cq_len; /* # of compl. entries in this ring */
3065+ u32 mcc_cq_size;
3066+ u32 mcc_cq_tl; /* compl. ring tail */
3067+ u8 mcc_cq_created; /* flag to help cleanup */
3068+ struct be_cq_object mcc_cq_obj; /* BECLIB's MCC compl. ring object */
3069+ u32 mcc_cq_id; /* MCC ring ID */
3070+ dma_addr_t mcc_cq_bus; /* DMA'ble bus address */
3071+
3072+ struct ring_desc mb_rd; /* RD for MCC_MAIL_BOX */
3073+ void *mb_ptr; /* mailbox ptr to be freed */
3074+ dma_addr_t mb_bus; /* DMA'ble bus address */
3075+ u32 mb_size;
3076+
3077+ /* BEClib uses an array of context objects to track outstanding
3078+ * requests to the MCC. We need allocate the same number of
3079+ * conext entries as the number of entries in the MCC WRB ring
3080+ */
3081+ u32 mcc_wrb_ctxt_size;
3082+ void *mcc_wrb_ctxt; /* pointer to the context area */
3083+ u32 mcc_wrb_ctxtLen; /* Number of entries in the context */
3084+ /*
3085+ * NIC send request ring - used for xmitting raw ether frames.
3086+ */
3087+ struct ETH_WRB_AMAP *tx_q; /* VA of the start of the ring */
3088+ u32 tx_q_len; /* # if entries in the send ring */
3089+ u32 tx_q_size;
3090+ u32 tx_q_hd; /* Head index. Next req. goes here */
3091+ u32 tx_q_tl; /* Tail indx. oldest outstanding req. */
3092+ u8 tx_q_created; /* flag to help cleanup */
3093+ struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */
3094+ dma_addr_t tx_q_bus; /* DMA'ble bus address */
3095+ u32 tx_q_id; /* send queue ring ID */
3096+ u32 tx_q_port; /* 0 no binding, 1 port A, 2 port B */
3097+ atomic_t tx_q_used; /* # of WRBs used */
3098+ /* ptr to an array in which we store context info for each send req. */
3099+ void **tx_ctxt;
3100+ /*
3101+ * NIC Send compl. ring - completion status for all NIC frames xmitted.
3102+ */
3103+ struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */
3104+ u32 txcq_len; /* # of entries in the ring */
3105+ u32 tx_cq_size;
3106+ /*
3107+ * index into compl ring where the host expects next completion entry
3108+ */
3109+ u32 tx_cq_tl;
3110+ u32 tx_cq_id; /* completion queue id */
3111+ u8 tx_cq_created; /* flag to help cleanup */
3112+ struct be_cq_object tx_cq_obj;
3113+ dma_addr_t tx_cq_bus; /* DMA'ble bus address */
3114+ /*
3115+ * Event Queue - all completion entries post events here.
3116+ */
3117+ struct EQ_ENTRY_AMAP *event_q; /* VA of start of event queue */
3118+ u32 event_q_len; /* # of entries */
3119+ u32 event_q_size;
3120+ u32 event_q_tl; /* Tail of the event queue */
3121+ u32 event_q_id; /* Event queue ID */
3122+ u8 event_q_created; /* flag to help cleanup */
3123+ struct be_eq_object event_q_obj; /* Queue handle */
3124+ dma_addr_t event_q_bus; /* DMA'ble bus address */
3125+ /*
3126+ * NIC receive queue - Data buffers to be used for receiving unicast,
3127+ * broadcast and multi-cast frames are posted here.
3128+ */
3129+ struct ETH_RX_D_AMAP *rx_q; /* VA of start of the queue */
3130+ u32 rx_q_len; /* # of entries */
3131+ u32 rx_q_size;
3132+ u32 rx_q_hd; /* Head of the queue */
3133+ atomic_t rx_q_posted; /* number of posted buffers */
3134+ u32 rx_q_id; /* queue ID */
3135+ u8 rx_q_created; /* flag to help cleanup */
3136+ struct be_ethrq_object rx_q_obj; /* NIC RX queue handle */
3137+ dma_addr_t rx_q_bus; /* DMA'ble bus address */
3138+ /*
3139+ * Pointer to an array of opaque context object for use by OSM driver
3140+ */
3141+ void **rx_ctxt;
3142+ /*
3143+ * NIC unicast RX completion queue - all unicast ether frame completion
3144+ * statuses from BE come here.
3145+ */
3146+ struct ETH_RX_COMPL_AMAP *rx_cq; /* VA of start of the queue */
3147+ u32 rx_cq_len; /* # of entries */
3148+ u32 rx_cq_size;
3149+ u32 rx_cq_tl; /* Tail of the queue */
3150+ u32 rx_cq_id; /* queue ID */
3151+ u8 rx_cq_created; /* flag to help cleanup */
3152+ struct be_cq_object rx_cq_obj; /* queue handle */
3153+ dma_addr_t rx_cq_bus; /* DMA'ble bus address */
3154+ struct be_function_object fn_obj; /* function object */
3155+ bool fn_obj_created;
3156+ u32 rx_buf_size; /* Size of the RX buffers */
3157+
3158+ struct net_device *netdev;
3159+ struct be_recv_buffer eth_rx_bufs[256]; /* to pass Rx buffer
3160+ addresses */
3161+ struct be_adapter *adapter; /* Pointer to OSM adapter */
3162+ u32 devno; /* OSM, network dev no. */
3163+ u32 use_port; /* Current active port */
3164+ struct be_rx_page_info *rx_page_info; /* Array of Rx buf pages */
3165+ u32 rx_pg_info_hd; /* Head of queue */
3166+ int rxbuf_post_fail; /* RxBuff posting fail count */
3167+ bool rx_pg_shared; /* Is an allocsted page shared as two frags ? */
3168+ struct vlan_group *vlan_grp;
3169+ u32 num_vlans; /* Number of vlans in BE's filter */
3170+ u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
3171+ struct napi_struct napi;
3172+ struct net_lro_mgr lro_mgr;
3173+ struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
3174+};
3175+
3176+#define NET_FH(np) (&(np)->fn_obj)
3177+
3178+/*
3179+ * BE driver statistics.
3180+ */
3181+struct be_drvr_stat {
3182+ u32 bes_tx_reqs; /* number of TX requests initiated */
3183+ u32 bes_tx_fails; /* number of TX requests that failed */
3184+ u32 bes_fwd_reqs; /* number of send reqs through forwarding i/f */
3185+ u32 bes_tx_wrbs; /* number of tx WRBs used */
3186+
3187+ u32 bes_ints; /* number of interrupts */
3188+ u32 bes_polls; /* number of times NAPI called poll function */
3189+ u32 bes_events; /* total evet entries processed */
3190+ u32 bes_tx_events; /* number of tx completion events */
3191+ u32 bes_rx_events; /* number of ucast rx completion events */
3192+ u32 bes_tx_compl; /* number of tx completion entries processed */
3193+ u32 bes_rx_compl; /* number of rx completion entries
3194+ processed */
3195+ u32 bes_ethrx_post_fail; /* number of ethrx buffer alloc
3196+ failures */
3197+ /*
3198+ * number of non ether type II frames dropped where
3199+ * frame len > length field of Mac Hdr
3200+ */
3201+ u32 bes_802_3_dropped_frames;
3202+ /*
3203+ * number of non ether type II frames malformed where
3204+ * in frame len < length field of Mac Hdr
3205+ */
3206+ u32 bes_802_3_malformed_frames;
3207+ u32 bes_ips; /* interrupts / sec */
3208+ u32 bes_prev_ints; /* bes_ints at last IPS calculation */
3209+ u16 bes_eth_tx_rate; /* ETH TX rate - Mb/sec */
3210+ u16 bes_eth_rx_rate; /* ETH RX rate - Mb/sec */
3211+ u32 bes_rx_coal; /* Num pkts coalasced */
3212+ u32 bes_rx_flush; /* Num times coalasced */
3213+ u32 bes_link_change_physical; /*Num of times physical link changed */
3214+ u32 bes_link_change_virtual; /*Num of times virtual link changed */
3215+ u32 bes_rx_misc_pkts; /* Misc pkts received */
3216+};
3217+
3218+/* Maximum interrupt delay (in microseconds) allowed */
3219+#define MAX_EQD 120
3220+
3221+/*
3222+ * timer to prevent system shutdown hang for ever if h/w stops responding
3223+ */
3224+struct be_timer_ctxt {
3225+ atomic_t get_stat_flag;
3226+ struct timer_list get_stats_timer;
3227+ unsigned long get_stat_sem_addr;
3228+} ;
3229+
3230+/* This structure is the main BladeEngine driver context. */
3231+struct be_adapter {
3232+ struct net_device *netdevp;
3233+ struct be_drvr_stat be_stat;
3234+ struct net_device_stats benet_stats;
3235+
3236+ /* PCI BAR mapped addresses */
3237+ u8 __iomem *csr_va; /* CSR */
3238+ u8 __iomem *db_va; /* Door Bell */
3239+ u8 __iomem *pci_va; /* PCI Config */
3240+
3241+ struct tasklet_struct sts_handler;
3242+ struct timer_list cq_timer;
3243+ spinlock_t int_lock;
3244+
3245+ struct FWCMD_ETH_GET_STATISTICS *eth_statsp;
3246+ /*
3247+ * This will enable the use of ethtool to enable or disable
3248+ * Checksum on Rx pkts to be obeyed or disobeyed.
3249+ * If this is true = 1, then whatever is the checksum on the
3250+ * Received pkt as per BE, it will be given to the stack.
3251+ * Else the stack will re calculate it.
3252+ */
3253+ bool rx_csum;
3254+ /*
3255+ * This will enable the use of ethtool to enable or disable
3256+ * Coalese on Rx pkts to be obeyed or disobeyed.
3257+ * If this is grater than 0 and less than 16 then coalascing
3258+ * is enabled else it is disabled
3259+ */
3260+ u32 max_rx_coal;
3261+ struct pci_dev *pdev; /* Pointer to OS's PCI dvice */
3262+
3263+ spinlock_t txq_lock;
3264+
3265+ u32 isr; /* copy of Intr status reg. */
3266+
3267+ u32 port0_link_sts; /* Port 0 link status */
3268+ u32 port1_link_sts; /* port 1 list status */
3269+ struct BE_LINK_STATUS *be_link_sts;
3270+
3271+ /* pointer to the first netobject of this adapter */
3272+ struct be_net_object *net_obj;
3273+
3274+ /* Flags to indicate what to clean up */
3275+ bool tasklet_started;
3276+ bool isr_registered;
3277+ /*
3278+ * adaptive interrupt coalescing (AIC) related
3279+ */
3280+ bool enable_aic; /* 1 if AIC is enabled */
3281+ u16 min_eqd; /* minimum EQ delay in usec */
3282+ u16 max_eqd; /* minimum EQ delay in usec */
3283+ u16 cur_eqd; /* current EQ delay in usec */
3284+ /*
3285+ * book keeping for interrupt / sec and TX/RX rate calculation
3286+ */
3287+ ulong ips_jiffies; /* jiffies at last IPS calc */
3288+ u32 eth_tx_bytes;
3289+ ulong eth_tx_jiffies;
3290+ u32 eth_rx_bytes;
3291+ ulong eth_rx_jiffies;
3292+
3293+ struct semaphore get_eth_stat_sem;
3294+
3295+ /* timer ctxt to prevent shutdown hanging due to un-responsive BE */
3296+ struct be_timer_ctxt timer_ctxt;
3297+
3298+#define BE_MAX_MSIX_VECTORS 32
3299+#define BE_MAX_REQ_MSIX_VECTORS 1 /* only one EQ in Linux driver */
3300+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
3301+ bool msix_enabled;
3302+ bool dma_64bit_cap; /* the Device DAC capable or not */
3303+ u8 dev_state; /* The current state of the device */
3304+ u8 dev_pm_state; /* The State of device before going to suspend */
3305+};
3306+
3307+/*
3308+ * Every second we look at the ints/sec and adjust eq_delay
3309+ * between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between
3310+ * IPS_HI_WM and IPS_LO_WM.
3311+ */
3312+#define IPS_HI_WM 18000
3313+#define IPS_LO_WM 8000
3314+
3315+
3316+static inline void index_adv(u32 *index, u32 val, u32 limit)
3317+{
3318+ BUG_ON(limit & (limit-1));
3319+ *index = (*index + val) & (limit - 1);
3320+}
3321+
3322+static inline void index_inc(u32 *index, u32 limit)
3323+{
3324+ BUG_ON(limit & (limit-1));
3325+ *index = (*index + 1) & (limit - 1);
3326+}
3327+
3328+static inline void be_adv_eq_tl(struct be_net_object *pnob)
3329+{
3330+ index_inc(&pnob->event_q_tl, pnob->event_q_len);
3331+}
3332+
3333+static inline void be_adv_txq_hd(struct be_net_object *pnob)
3334+{
3335+ index_inc(&pnob->tx_q_hd, pnob->tx_q_len);
3336+}
3337+
3338+static inline void be_adv_txq_tl(struct be_net_object *pnob)
3339+{
3340+ index_inc(&pnob->tx_q_tl, pnob->tx_q_len);
3341+}
3342+
3343+static inline void be_adv_txcq_tl(struct be_net_object *pnob)
3344+{
3345+ index_inc(&pnob->tx_cq_tl, pnob->txcq_len);
3346+}
3347+
3348+static inline void be_adv_rxq_hd(struct be_net_object *pnob)
3349+{
3350+ index_inc(&pnob->rx_q_hd, pnob->rx_q_len);
3351+}
3352+
3353+static inline void be_adv_rxcq_tl(struct be_net_object *pnob)
3354+{
3355+ index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len);
3356+}
3357+
3358+static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob)
3359+{
3360+ return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1)
3361+ & (pnob->tx_q_len - 1);
3362+}
3363+
3364+int benet_init(struct net_device *);
3365+int be_ethtool_ioctl(struct net_device *, struct ifreq *);
3366+struct net_device_stats *benet_get_stats(struct net_device *);
3367+void be_process_intr(unsigned long context);
3368+irqreturn_t be_int(int irq, void *dev);
3369+void be_post_eth_rx_buffs(struct be_net_object *);
3370+void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *);
3371+void be_get_stats_timer_handler(unsigned long);
3372+void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *);
3373+void be_print_link_info(struct BE_LINK_STATUS *);
3374+void be_update_link_status(struct be_adapter *);
3375+void be_init_procfs(struct be_adapter *);
3376+void be_cleanup_procfs(struct be_adapter *);
3377+int be_poll(struct napi_struct *, int);
3378+struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *);
3379+void be_notify_cmpl(struct be_net_object *, int, int, int);
3380+void be_enable_intr(struct be_net_object *);
3381+void be_enable_eq_intr(struct be_net_object *);
3382+void be_disable_intr(struct be_net_object *);
3383+void be_disable_eq_intr(struct be_net_object *);
3384+int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8,
3385+ u8 *, mcc_wrb_cqe_callback, void *);
3386+int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *);
3387+void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx);
3388+
3389+#endif /* _BENET_H_ */
3390--- /dev/null
3391+++ b/drivers/staging/benet/be_netif.c
3392@@ -0,0 +1,706 @@
3393+/*
3394+ * Copyright (C) 2005 - 2008 ServerEngines
3395+ * All rights reserved.
3396+ *
3397+ * This program is free software; you can redistribute it and/or
3398+ * modify it under the terms of the GNU General Public License version 2
3399+ * as published by the Free Software Foundation. The full GNU General
3400+ * Public License is included in this distribution in the file called COPYING.
3401+ *
3402+ * Contact Information:
3403+ * linux-drivers@serverengines.com
3404+ *
3405+ * ServerEngines
3406+ * 209 N. Fair Oaks Ave
3407+ * Sunnyvale, CA 94085
3408+ */
3409+/*
3410+ * be_netif.c
3411+ *
3412+ * This file contains various entry points of drivers seen by tcp/ip stack.
3413+ */
3414+
3415+#include <linux/if_vlan.h>
3416+#include <linux/in.h>
3417+#include "benet.h"
3418+#include <linux/ip.h>
3419+#include <linux/inet_lro.h>
3420+
3421+/* Strings to print Link properties */
3422+static const char *link_speed[] = {
3423+ "Invalid link Speed Value",
3424+ "10 Mbps",
3425+ "100 Mbps",
3426+ "1 Gbps",
3427+ "10 Gbps"
3428+};
3429+
3430+static const char *link_duplex[] = {
3431+ "Invalid Duplex Value",
3432+ "Half Duplex",
3433+ "Full Duplex"
3434+};
3435+
3436+static const char *link_state[] = {
3437+ "",
3438+ "(active)"
3439+};
3440+
3441+void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
3442+{
3443+ u16 si, di, ai;
3444+
3445+ /* Port 0 */
3446+ if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
3447+ /* Port is up and running */
3448+ si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
3449+ di = (lnk_status->mac0_duplex < 3) ?
3450+ lnk_status->mac0_duplex : 0;
3451+ ai = (lnk_status->active_port == 0) ? 1 : 0;
3452+ printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
3453+ link_speed[si], link_duplex[di], link_state[ai]);
3454+ } else
3455+ printk(KERN_INFO "PortNo. 0: Down\n");
3456+
3457+ /* Port 1 */
3458+ if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
3459+ /* Port is up and running */
3460+ si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
3461+ di = (lnk_status->mac1_duplex < 3) ?
3462+ lnk_status->mac1_duplex : 0;
3463+ ai = (lnk_status->active_port == 0) ? 1 : 0;
3464+ printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
3465+ link_speed[si], link_duplex[di], link_state[ai]);
3466+ } else
3467+ printk(KERN_INFO "PortNo. 1: Down\n");
3468+
3469+ return;
3470+}
3471+
3472+static int
3473+be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
3474+ void **ip_hdr, void **tcpudp_hdr,
3475+ u64 *hdr_flags, void *priv)
3476+{
3477+ struct ethhdr *eh;
3478+ struct vlan_ethhdr *veh;
3479+ struct iphdr *iph;
3480+ u8 *va = page_address(frag->page) + frag->page_offset;
3481+ unsigned long ll_hlen;
3482+
3483+ /* find the mac header, abort if not IPv4 */
3484+
3485+ prefetch(va);
3486+ eh = (struct ethhdr *)va;
3487+ *mac_hdr = eh;
3488+ ll_hlen = ETH_HLEN;
3489+ if (eh->h_proto != htons(ETH_P_IP)) {
3490+ if (eh->h_proto == htons(ETH_P_8021Q)) {
3491+ veh = (struct vlan_ethhdr *)va;
3492+ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
3493+ return -1;
3494+
3495+ ll_hlen += VLAN_HLEN;
3496+
3497+ } else {
3498+ return -1;
3499+ }
3500+ }
3501+ *hdr_flags = LRO_IPV4;
3502+
3503+ iph = (struct iphdr *)(va + ll_hlen);
3504+ *ip_hdr = iph;
3505+ if (iph->protocol != IPPROTO_TCP)
3506+ return -1;
3507+ *hdr_flags |= LRO_TCP;
3508+ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
3509+
3510+ return 0;
3511+}
3512+
3513+static int benet_open(struct net_device *netdev)
3514+{
3515+ struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
3516+ struct be_adapter *adapter = pnob->adapter;
3517+ struct net_lro_mgr *lro_mgr;
3518+
3519+ if (adapter->dev_state < BE_DEV_STATE_INIT)
3520+ return -EAGAIN;
3521+
3522+ lro_mgr = &pnob->lro_mgr;
3523+ lro_mgr->dev = netdev;
3524+
3525+ lro_mgr->features = LRO_F_NAPI;
3526+ lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
3527+ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
3528+ lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
3529+ lro_mgr->lro_arr = pnob->lro_desc;
3530+ lro_mgr->get_frag_header = be_get_frag_header;
3531+ lro_mgr->max_aggr = adapter->max_rx_coal;
3532+ lro_mgr->frag_align_pad = 2;
3533+ if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
3534+ lro_mgr->max_aggr = MAX_SKB_FRAGS;
3535+
3536+ adapter->max_rx_coal = BE_LRO_MAX_PKTS;
3537+
3538+ be_update_link_status(adapter);
3539+
3540+ /*
3541+ * Set carrier on only if Physical Link up
3542+ * Either of the port link status up signifies this
3543+ */
3544+ if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
3545+ (adapter->port1_link_sts == BE_PORT_LINK_UP)) {
3546+ netif_start_queue(netdev);
3547+ netif_carrier_on(netdev);
3548+ }
3549+
3550+ adapter->dev_state = BE_DEV_STATE_OPEN;
3551+ napi_enable(&pnob->napi);
3552+ be_enable_intr(pnob);
3553+ be_enable_eq_intr(pnob);
3554+ /*
3555+ * RX completion queue may be in dis-armed state. Arm it.
3556+ */
3557+ be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
3558+
3559+ return 0;
3560+}
3561+
3562+static int benet_close(struct net_device *netdev)
3563+{
3564+ struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
3565+ struct be_adapter *adapter = pnob->adapter;
3566+
3567+ netif_stop_queue(netdev);
3568+ synchronize_irq(netdev->irq);
3569+
3570+ be_wait_nic_tx_cmplx_cmpl(pnob);
3571+ adapter->dev_state = BE_DEV_STATE_INIT;
3572+ netif_carrier_off(netdev);
3573+
3574+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
3575+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
3576+ be_disable_intr(pnob);
3577+ be_disable_eq_intr(pnob);
3578+ napi_disable(&pnob->napi);
3579+
3580+ return 0;
3581+}
3582+
3583+/*
3584+ * Setting a Mac Address for BE
3585+ * Takes netdev and a void pointer as arguments.
3586+ * The pointer holds the new addres to be used.
3587+ */
3588+static int benet_set_mac_addr(struct net_device *netdev, void *p)
3589+{
3590+ struct sockaddr *addr = p;
3591+ struct be_net_object *pnob;
3592+
3593+ pnob = (struct be_net_object *)netdev->priv;
3594+
3595+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3596+ be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
3597+ netdev->dev_addr, NULL, NULL);
3598+ /*
3599+ * Since we are doing Active-Passive failover, both
3600+ * ports should have matching MAC addresses everytime.
3601+ */
3602+ be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
3603+ netdev->dev_addr, NULL, NULL);
3604+
3605+ return 0;
3606+}
3607+
3608+void be_get_stats_timer_handler(unsigned long context)
3609+{
3610+ struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
3611+
3612+ if (atomic_read(&ctxt->get_stat_flag)) {
3613+ atomic_dec(&ctxt->get_stat_flag);
3614+ up((void *)ctxt->get_stat_sem_addr);
3615+ }
3616+ del_timer(&ctxt->get_stats_timer);
3617+ return;
3618+}
3619+
3620+void be_get_stat_cb(void *context, int status,
3621+ struct MCC_WRB_AMAP *optional_wrb)
3622+{
3623+ struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
3624+ /*
3625+ * just up the semaphore if the get_stat_flag
3626+ * reads 1. so that the waiter can continue.
3627+ * If it is 0, then it was handled by the timer handler.
3628+ */
3629+ del_timer(&ctxt->get_stats_timer);
3630+ if (atomic_read(&ctxt->get_stat_flag)) {
3631+ atomic_dec(&ctxt->get_stat_flag);
3632+ up((void *)ctxt->get_stat_sem_addr);
3633+ }
3634+}
3635+
3636+struct net_device_stats *benet_get_stats(struct net_device *dev)
3637+{
3638+ struct be_net_object *pnob = dev->priv;
3639+ struct be_adapter *adapter = pnob->adapter;
3640+ u64 pa;
3641+ struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
3642+
3643+ if (adapter->dev_state != BE_DEV_STATE_OPEN) {
3644+ /* Return previously read stats */
3645+ return &(adapter->benet_stats);
3646+ }
3647+ /* Get Physical Addr */
3648+ pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
3649+ sizeof(struct FWCMD_ETH_GET_STATISTICS),
3650+ PCI_DMA_FROMDEVICE);
3651+ ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
3652+ atomic_inc(&ctxt->get_stat_flag);
3653+
3654+ be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
3655+ cpu_to_le64(pa), be_get_stat_cb, ctxt,
3656+ NULL);
3657+
3658+ ctxt->get_stats_timer.data = (unsigned long)ctxt;
3659+ mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
3660+ down((void *)ctxt->get_stat_sem_addr); /* callback will unblock us */
3661+
3662+ /* Adding port0 and port1 stats. */
3663+ adapter->benet_stats.rx_packets =
3664+ adapter->eth_statsp->params.response.p0recvdtotalframes +
3665+ adapter->eth_statsp->params.response.p1recvdtotalframes;
3666+ adapter->benet_stats.tx_packets =
3667+ adapter->eth_statsp->params.response.p0xmitunicastframes +
3668+ adapter->eth_statsp->params.response.p1xmitunicastframes;
3669+ adapter->benet_stats.tx_bytes =
3670+ adapter->eth_statsp->params.response.p0xmitbyteslsd +
3671+ adapter->eth_statsp->params.response.p1xmitbyteslsd;
3672+ adapter->benet_stats.rx_errors =
3673+ adapter->eth_statsp->params.response.p0crcerrors +
3674+ adapter->eth_statsp->params.response.p1crcerrors;
3675+ adapter->benet_stats.rx_errors +=
3676+ adapter->eth_statsp->params.response.p0alignmentsymerrs +
3677+ adapter->eth_statsp->params.response.p1alignmentsymerrs;
3678+ adapter->benet_stats.rx_errors +=
3679+ adapter->eth_statsp->params.response.p0inrangelenerrors +
3680+ adapter->eth_statsp->params.response.p1inrangelenerrors;
3681+ adapter->benet_stats.rx_bytes =
3682+ adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
3683+ adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
3684+ adapter->benet_stats.rx_crc_errors =
3685+ adapter->eth_statsp->params.response.p0crcerrors +
3686+ adapter->eth_statsp->params.response.p1crcerrors;
3687+
3688+ adapter->benet_stats.tx_packets +=
3689+ adapter->eth_statsp->params.response.p0xmitmulticastframes +
3690+ adapter->eth_statsp->params.response.p1xmitmulticastframes;
3691+ adapter->benet_stats.tx_packets +=
3692+ adapter->eth_statsp->params.response.p0xmitbroadcastframes +
3693+ adapter->eth_statsp->params.response.p1xmitbroadcastframes;
3694+ adapter->benet_stats.tx_errors = 0;
3695+
3696+ adapter->benet_stats.multicast =
3697+ adapter->eth_statsp->params.response.p0xmitmulticastframes +
3698+ adapter->eth_statsp->params.response.p1xmitmulticastframes;
3699+
3700+ adapter->benet_stats.rx_fifo_errors =
3701+ adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
3702+ adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
3703+ adapter->benet_stats.rx_frame_errors =
3704+ adapter->eth_statsp->params.response.p0alignmentsymerrs +
3705+ adapter->eth_statsp->params.response.p1alignmentsymerrs;
3706+ adapter->benet_stats.rx_length_errors =
3707+ adapter->eth_statsp->params.response.p0inrangelenerrors +
3708+ adapter->eth_statsp->params.response.p1inrangelenerrors;
3709+ adapter->benet_stats.rx_length_errors +=
3710+ adapter->eth_statsp->params.response.p0outrangeerrors +
3711+ adapter->eth_statsp->params.response.p1outrangeerrors;
3712+ adapter->benet_stats.rx_length_errors +=
3713+ adapter->eth_statsp->params.response.p0frametoolongerrors +
3714+ adapter->eth_statsp->params.response.p1frametoolongerrors;
3715+
3716+ pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
3717+ sizeof(struct FWCMD_ETH_GET_STATISTICS),
3718+ PCI_DMA_FROMDEVICE);
3719+ return &(adapter->benet_stats);
3720+
3721+}
3722+
3723+static void be_start_tx(struct be_net_object *pnob, u32 nposted)
3724+{
3725+#define CSR_ETH_MAX_SQPOSTS 255
3726+ struct SQ_DB_AMAP sqdb;
3727+
3728+ sqdb.dw[0] = 0;
3729+
3730+ AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
3731+ while (nposted) {
3732+ if (nposted > CSR_ETH_MAX_SQPOSTS) {
3733+ AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
3734+ CSR_ETH_MAX_SQPOSTS);
3735+ nposted -= CSR_ETH_MAX_SQPOSTS;
3736+ } else {
3737+ AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
3738+ nposted = 0;
3739+ }
3740+ PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
3741+ }
3742+
3743+ return;
3744+}
3745+
3746+static void update_tx_rate(struct be_adapter *adapter)
3747+{
3748+ /* update the rate once in two seconds */
3749+ if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
3750+ u32 r;
3751+ r = adapter->eth_tx_bytes /
3752+ ((jiffies - adapter->eth_tx_jiffies) / (HZ));
3753+ r = (r / 1000000); /* M bytes/s */
3754+ adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
3755+ adapter->eth_tx_jiffies = jiffies;
3756+ adapter->eth_tx_bytes = 0;
3757+ }
3758+}
3759+
3760+static int wrb_cnt_in_skb(struct sk_buff *skb)
3761+{
3762+ int cnt = 0;
3763+ while (skb) {
3764+ if (skb->len > skb->data_len)
3765+ cnt++;
3766+ cnt += skb_shinfo(skb)->nr_frags;
3767+ skb = skb_shinfo(skb)->frag_list;
3768+ }
3769+ BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
3770+ return cnt;
3771+}
3772+
3773+static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
3774+{
3775+ AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
3776+ AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
3777+ AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
3778+}
3779+
3780+static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
3781+ struct be_net_object *pnob)
3782+{
3783+ wrb->dw[2] = wrb->dw[3] = 0;
3784+ AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
3785+ if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
3786+ AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
3787+ AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
3788+ skb_shinfo(skb)->gso_size);
3789+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
3790+ u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
3791+ if (proto == IPPROTO_TCP)
3792+ AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
3793+ else if (proto == IPPROTO_UDP)
3794+ AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
3795+ }
3796+ if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
3797+ AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
3798+ AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
3799+ }
3800+}
3801+
3802+static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
3803+ struct ETH_WRB_AMAP *from)
3804+{
3805+
3806+ to->dw[2] = from->dw[2];
3807+ to->dw[3] = from->dw[3];
3808+}
3809+
3810+/* Returns the actual count of wrbs used including a possible dummy */
3811+static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
3812+ u32 wrb_cnt, u32 *copied)
3813+{
3814+ u64 busaddr;
3815+ struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
3816+ u32 i;
3817+ bool dummy = true;
3818+ struct pci_dev *pdev = pnob->adapter->pdev;
3819+
3820+ if (wrb_cnt & 1)
3821+ wrb_cnt++;
3822+ else
3823+ dummy = false;
3824+
3825+ atomic_add(wrb_cnt, &pnob->tx_q_used);
3826+
3827+ while (skb) {
3828+ if (skb->len > skb->data_len) {
3829+ int len = skb->len - skb->data_len;
3830+ busaddr = pci_map_single(pdev, skb->data, len,
3831+ PCI_DMA_TODEVICE);
3832+ busaddr = cpu_to_le64(busaddr);
3833+ wrb = &pnob->tx_q[pnob->tx_q_hd];
3834+ if (first == NULL) {
3835+ wrb_fill_extra(wrb, skb, pnob);
3836+ first = wrb;
3837+ } else {
3838+ wrb_copy_extra(wrb, first);
3839+ }
3840+ wrb_fill(wrb, busaddr, len);
3841+ be_adv_txq_hd(pnob);
3842+ *copied += len;
3843+ }
3844+
3845+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3846+ struct skb_frag_struct *frag =
3847+ &skb_shinfo(skb)->frags[i];
3848+ busaddr = pci_map_page(pdev, frag->page,
3849+ frag->page_offset, frag->size,
3850+ PCI_DMA_TODEVICE);
3851+ busaddr = cpu_to_le64(busaddr);
3852+ wrb = &pnob->tx_q[pnob->tx_q_hd];
3853+ if (first == NULL) {
3854+ wrb_fill_extra(wrb, skb, pnob);
3855+ first = wrb;
3856+ } else {
3857+ wrb_copy_extra(wrb, first);
3858+ }
3859+ wrb_fill(wrb, busaddr, frag->size);
3860+ be_adv_txq_hd(pnob);
3861+ *copied += frag->size;
3862+ }
3863+ skb = skb_shinfo(skb)->frag_list;
3864+ }
3865+
3866+ if (dummy) {
3867+ wrb = &pnob->tx_q[pnob->tx_q_hd];
3868+ BUG_ON(first == NULL);
3869+ wrb_copy_extra(wrb, first);
3870+ wrb_fill(wrb, 0, 0);
3871+ be_adv_txq_hd(pnob);
3872+ }
3873+ AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
3874+ AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
3875+ return wrb_cnt;
3876+}
3877+
3878+/* For each skb transmitted, tx_ctxt stores the num of wrbs in the
3879+ * start index and skb pointer in the end index
3880+ */
3881+static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
3882+ struct sk_buff *skb, int wrb_cnt,
3883+ u32 start)
3884+{
3885+ *(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
3886+ index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
3887+ pnob->tx_ctxt[start] = skb;
3888+}
3889+
3890+static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
3891+{
3892+ struct be_net_object *pnob = netdev->priv;
3893+ struct be_adapter *adapter = pnob->adapter;
3894+ u32 wrb_cnt, copied = 0;
3895+ u32 start = pnob->tx_q_hd;
3896+
3897+ adapter->be_stat.bes_tx_reqs++;
3898+
3899+ wrb_cnt = wrb_cnt_in_skb(skb);
3900+ spin_lock_bh(&adapter->txq_lock);
3901+ if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
3902+ netif_stop_queue(pnob->netdev);
3903+ spin_unlock_bh(&adapter->txq_lock);
3904+ adapter->be_stat.bes_tx_fails++;
3905+ return NETDEV_TX_BUSY;
3906+ }
3907+ spin_unlock_bh(&adapter->txq_lock);
3908+
3909+ wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
3910+ be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
3911+
3912+ be_start_tx(pnob, wrb_cnt);
3913+
3914+ adapter->eth_tx_bytes += copied;
3915+ adapter->be_stat.bes_tx_wrbs += wrb_cnt;
3916+ update_tx_rate(adapter);
3917+ netdev->trans_start = jiffies;
3918+
3919+ return NETDEV_TX_OK;
3920+}
3921+
3922+/*
3923+ * This is the driver entry point to change the mtu of the device
3924+ * Returns 0 for success and errno for failure.
3925+ */
3926+static int benet_change_mtu(struct net_device *netdev, int new_mtu)
3927+{
3928+ /*
3929+ * BE supports jumbo frame size upto 9000 bytes including the link layer
3930+ * header. Considering the different variants of frame formats possible
3931+ * like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
3932+ */
3933+
3934+ if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
3935+ dev_info(&netdev->dev, "Invalid MTU requested. "
3936+ "Must be between %d and %d bytes\n",
3937+ (ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
3938+ return -EINVAL;
3939+ }
3940+ dev_info(&netdev->dev, "MTU changed from %d to %d\n",
3941+ netdev->mtu, new_mtu);
3942+ netdev->mtu = new_mtu;
3943+ return 0;
3944+}
3945+
3946+/*
3947+ * This is the driver entry point to register a vlan with the device
3948+ */
3949+static void benet_vlan_register(struct net_device *netdev,
3950+ struct vlan_group *grp)
3951+{
3952+ struct be_net_object *pnob = netdev->priv;
3953+
3954+ be_disable_eq_intr(pnob);
3955+ pnob->vlan_grp = grp;
3956+ pnob->num_vlans = 0;
3957+ be_enable_eq_intr(pnob);
3958+}
3959+
3960+/*
3961+ * This is the driver entry point to add a vlan vlan_id
3962+ * with the device netdev
3963+ */
3964+static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
3965+{
3966+ struct be_net_object *pnob = netdev->priv;
3967+
3968+ if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
3969+ /* no way to return an error */
3970+ dev_info(&netdev->dev,
3971+ "BladeEngine: Cannot configure more than %d Vlans\n",
3972+ BE_NUM_VLAN_SUPPORTED);
3973+ return;
3974+ }
3975+ /* The new vlan tag will be in the slot indicated by num_vlans. */
3976+ pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
3977+ be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
3978+ pnob->vlan_tag, NULL, NULL, NULL);
3979+}
3980+
3981+/*
3982+ * This is the driver entry point to remove a vlan vlan_id
3983+ * with the device netdev
3984+ */
3985+static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
3986+{
3987+ struct be_net_object *pnob = netdev->priv;
3988+
3989+ u32 i, value;
3990+
3991+ /*
3992+ * In Blade Engine, we support 32 vlan tag filters across both ports.
3993+ * To program a vlan tag, the RXF_RTPR_CSR register is used.
3994+ * Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
3995+ * The Vlan table is of depth 16. thus we support 32 tags.
3996+ */
3997+
3998+ value = vlan_id | VLAN_VALID_BIT;
3999+ for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
4000+ if (pnob->vlan_tag[i] == vlan_id)
4001+ break;
4002+ }
4003+
4004+ if (i == BE_NUM_VLAN_SUPPORTED)
4005+ return;
4006+ /* Now compact the vlan tag array by removing hole created. */
4007+ while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
4008+ pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
4009+ i++;
4010+ }
4011+ if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
4012+ pnob->vlan_tag[i] = (u16) 0x0;
4013+ pnob->num_vlans--;
4014+ be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
4015+ pnob->vlan_tag, NULL, NULL, NULL);
4016+}
4017+
4018+/*
4019+ * This function is called to program multicast
4020+ * address in the multicast filter of the ASIC.
4021+ */
4022+static void be_set_multicast_filter(struct net_device *netdev)
4023+{
4024+ struct be_net_object *pnob = netdev->priv;
4025+ struct dev_mc_list *mc_ptr;
4026+ u8 mac_addr[32][ETH_ALEN];
4027+ int i;
4028+
4029+ if (netdev->flags & IFF_ALLMULTI) {
4030+ /* set BE in Multicast promiscuous */
4031+ be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
4032+ NULL, NULL);
4033+ return;
4034+ }
4035+
4036+ for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
4037+ mc_ptr = mc_ptr->next, i++) {
4038+ memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
4039+ }
4040+
4041+ /* reset the promiscuous mode also. */
4042+ be_rxf_multicast_config(&pnob->fn_obj, false, i,
4043+ &mac_addr[0][0], NULL, NULL, NULL);
4044+}
4045+
4046+/*
4047+ * This is the driver entry point to set multicast list
4048+ * with the device netdev. This function will be used to
4049+ * set promiscuous mode or multicast promiscuous mode
4050+ * or multicast mode....
4051+ */
4052+static void benet_set_multicast_list(struct net_device *netdev)
4053+{
4054+ struct be_net_object *pnob = netdev->priv;
4055+
4056+ if (netdev->flags & IFF_PROMISC) {
4057+ be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
4058+ } else {
4059+ be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
4060+ be_set_multicast_filter(netdev);
4061+ }
4062+}
4063+
4064+int benet_init(struct net_device *netdev)
4065+{
4066+ struct be_net_object *pnob = netdev->priv;
4067+ struct be_adapter *adapter = pnob->adapter;
4068+
4069+ ether_setup(netdev);
4070+
4071+ netdev->open = &benet_open;
4072+ netdev->stop = &benet_close;
4073+ netdev->hard_start_xmit = &benet_xmit;
4074+
4075+ netdev->get_stats = &benet_get_stats;
4076+
4077+ netdev->set_multicast_list = &benet_set_multicast_list;
4078+
4079+ netdev->change_mtu = &benet_change_mtu;
4080+ netdev->set_mac_address = &benet_set_mac_addr;
4081+
4082+ netdev->vlan_rx_register = benet_vlan_register;
4083+ netdev->vlan_rx_add_vid = benet_vlan_add_vid;
4084+ netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
4085+
4086+ netdev->features =
4087+ NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
4088+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
4089+
4090+ netdev->flags |= IFF_MULTICAST;
4091+
4092+ /* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
4093+ if (adapter->dma_64bit_cap)
4094+ netdev->features |= NETIF_F_HIGHDMA;
4095+
4096+ SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4097+ return 0;
4098+}
4099--- /dev/null
4100+++ b/drivers/staging/benet/bestatus.h
4101@@ -0,0 +1,103 @@
4102+/*
4103+ * Copyright (C) 2005 - 2008 ServerEngines
4104+ * All rights reserved.
4105+ *
4106+ * This program is free software; you can redistribute it and/or
4107+ * modify it under the terms of the GNU General Public License version 2
4108+ * as published by the Free Software Foundation. The full GNU General
4109+ * Public License is included in this distribution in the file called COPYING.
4110+ *
4111+ * Contact Information:
4112+ * linux-drivers@serverengines.com
4113+ *
4114+ * ServerEngines
4115+ * 209 N. Fair Oaks Ave
4116+ * Sunnyvale, CA 94085
4117+ */
4118+#ifndef _BESTATUS_H_
4119+#define _BESTATUS_H_
4120+
4121+#define BE_SUCCESS (0x00000000L)
4122+/*
4123+ * MessageId: BE_PENDING
4124+ * The BladeEngine Driver call succeeded, and pended operation.
4125+ */
4126+#define BE_PENDING (0x20070001L)
4127+#define BE_STATUS_PENDING (BE_PENDING)
4128+/*
4129+ * MessageId: BE_NOT_OK
4130+ * An error occurred.
4131+ */
4132+#define BE_NOT_OK (0xE0070002L)
4133+/*
4134+ * MessageId: BE_STATUS_SYSTEM_RESOURCES
4135+ * Insufficient host system resources exist to complete the API.
4136+ */
4137+#define BE_STATUS_SYSTEM_RESOURCES (0xE0070003L)
4138+/*
4139+ * MessageId: BE_STATUS_CHIP_RESOURCES
4140+ * Insufficient chip resources exist to complete the API.
4141+ */
4142+#define BE_STATUS_CHIP_RESOURCES (0xE0070004L)
4143+/*
4144+ * MessageId: BE_STATUS_NO_RESOURCE
4145+ * Insufficient resources to complete request.
4146+ */
4147+#define BE_STATUS_NO_RESOURCE (0xE0070005L)
4148+/*
4149+ * MessageId: BE_STATUS_BUSY
4150+ * Resource is currently busy.
4151+ */
4152+#define BE_STATUS_BUSY (0xE0070006L)
4153+/*
4154+ * MessageId: BE_STATUS_INVALID_PARAMETER
4155+ * Invalid Parameter in request.
4156+ */
4157+#define BE_STATUS_INVALID_PARAMETER (0xE0000007L)
4158+/*
4159+ * MessageId: BE_STATUS_NOT_SUPPORTED
4160+ * Requested operation is not supported.
4161+ */
4162+#define BE_STATUS_NOT_SUPPORTED (0xE000000DL)
4163+
4164+/*
4165+ * ***************************************************************************
4166+ * E T H E R N E T S T A T U S
4167+ * ***************************************************************************
4168+ */
4169+
4170+/*
4171+ * MessageId: BE_ETH_TX_ERROR
4172+ * The Ethernet device driver failed to transmit a packet.
4173+ */
4174+#define BE_ETH_TX_ERROR (0xE0070101L)
4175+
4176+/*
4177+ * ***************************************************************************
4178+ * S H A R E D S T A T U S
4179+ * ***************************************************************************
4180+ */
4181+
4182+/*
4183+ * MessageId: BE_STATUS_VBD_INVALID_VERSION
4184+ * The device driver is not compatible with this version of the VBD.
4185+ */
4186+#define BE_STATUS_INVALID_VERSION (0xE0070402L)
4187+/*
4188+ * MessageId: BE_STATUS_DOMAIN_DENIED
4189+ * The operation failed to complete due to insufficient access
4190+ * rights for the requesting domain.
4191+ */
4192+#define BE_STATUS_DOMAIN_DENIED (0xE0070403L)
4193+/*
4194+ * MessageId: BE_STATUS_TCP_NOT_STARTED
4195+ * The embedded TCP/IP stack has not been started.
4196+ */
4197+#define BE_STATUS_TCP_NOT_STARTED (0xE0070409L)
4198+/*
4199+ * MessageId: BE_STATUS_NO_MCC_WRB
4200+ * No free MCC WRB are available for posting the request.
4201+ */
4202+#define BE_STATUS_NO_MCC_WRB (0xE0070414L)
4203+
4204+#endif /* _BESTATUS_ */
4205--- /dev/null
4206+++ b/drivers/staging/benet/cev.h
4207@@ -0,0 +1,243 @@
4208+/*
4209+ * Copyright (C) 2005 - 2008 ServerEngines
4210+ * All rights reserved.
4211+ *
4212+ * This program is free software; you can redistribute it and/or
4213+ * modify it under the terms of the GNU General Public License version 2
4214+ * as published by the Free Software Foundation. The full GNU General
4215+ * Public License is included in this distribution in the file called COPYING.
4216+ *
4217+ * Contact Information:
4218+ * linux-drivers@serverengines.com
4219+ *
4220+ * ServerEngines
4221+ * 209 N. Fair Oaks Ave
4222+ * Sunnyvale, CA 94085
4223+ */
4224+/*
4225+ * Autogenerated by srcgen version: 0127
4226+ */
4227+#ifndef __cev_amap_h__
4228+#define __cev_amap_h__
4229+#include "ep.h"
4230+
4231+/*
4232+ * Host Interrupt Status Register 0. The first of four application
4233+ * interrupt status registers. This register contains the interrupts
4234+ * for Event Queues EQ0 through EQ31.
4235+ */
4236+struct BE_CEV_ISR0_CSR_AMAP {
4237+ u8 interrupt0; /* DWORD 0 */
4238+ u8 interrupt1; /* DWORD 0 */
4239+ u8 interrupt2; /* DWORD 0 */
4240+ u8 interrupt3; /* DWORD 0 */
4241+ u8 interrupt4; /* DWORD 0 */
4242+ u8 interrupt5; /* DWORD 0 */
4243+ u8 interrupt6; /* DWORD 0 */
4244+ u8 interrupt7; /* DWORD 0 */
4245+ u8 interrupt8; /* DWORD 0 */
4246+ u8 interrupt9; /* DWORD 0 */
4247+ u8 interrupt10; /* DWORD 0 */
4248+ u8 interrupt11; /* DWORD 0 */
4249+ u8 interrupt12; /* DWORD 0 */
4250+ u8 interrupt13; /* DWORD 0 */
4251+ u8 interrupt14; /* DWORD 0 */
4252+ u8 interrupt15; /* DWORD 0 */
4253+ u8 interrupt16; /* DWORD 0 */
4254+ u8 interrupt17; /* DWORD 0 */
4255+ u8 interrupt18; /* DWORD 0 */
4256+ u8 interrupt19; /* DWORD 0 */
4257+ u8 interrupt20; /* DWORD 0 */
4258+ u8 interrupt21; /* DWORD 0 */
4259+ u8 interrupt22; /* DWORD 0 */
4260+ u8 interrupt23; /* DWORD 0 */
4261+ u8 interrupt24; /* DWORD 0 */
4262+ u8 interrupt25; /* DWORD 0 */
4263+ u8 interrupt26; /* DWORD 0 */
4264+ u8 interrupt27; /* DWORD 0 */
4265+ u8 interrupt28; /* DWORD 0 */
4266+ u8 interrupt29; /* DWORD 0 */
4267+ u8 interrupt30; /* DWORD 0 */
4268+ u8 interrupt31; /* DWORD 0 */
4269+} __packed;
4270+struct CEV_ISR0_CSR_AMAP {
4271+ u32 dw[1];
4272+};
4273+
4274+/*
4275+ * Host Interrupt Status Register 1. The second of four application
4276+ * interrupt status registers. This register contains the interrupts
4277+ * for Event Queues EQ32 through EQ63.
4278+ */
4279+struct BE_CEV_ISR1_CSR_AMAP {
4280+ u8 interrupt32; /* DWORD 0 */
4281+ u8 interrupt33; /* DWORD 0 */
4282+ u8 interrupt34; /* DWORD 0 */
4283+ u8 interrupt35; /* DWORD 0 */
4284+ u8 interrupt36; /* DWORD 0 */
4285+ u8 interrupt37; /* DWORD 0 */
4286+ u8 interrupt38; /* DWORD 0 */
4287+ u8 interrupt39; /* DWORD 0 */
4288+ u8 interrupt40; /* DWORD 0 */
4289+ u8 interrupt41; /* DWORD 0 */
4290+ u8 interrupt42; /* DWORD 0 */
4291+ u8 interrupt43; /* DWORD 0 */
4292+ u8 interrupt44; /* DWORD 0 */
4293+ u8 interrupt45; /* DWORD 0 */
4294+ u8 interrupt46; /* DWORD 0 */
4295+ u8 interrupt47; /* DWORD 0 */
4296+ u8 interrupt48; /* DWORD 0 */
4297+ u8 interrupt49; /* DWORD 0 */
4298+ u8 interrupt50; /* DWORD 0 */
4299+ u8 interrupt51; /* DWORD 0 */
4300+ u8 interrupt52; /* DWORD 0 */
4301+ u8 interrupt53; /* DWORD 0 */
4302+ u8 interrupt54; /* DWORD 0 */
4303+ u8 interrupt55; /* DWORD 0 */
4304+ u8 interrupt56; /* DWORD 0 */
4305+ u8 interrupt57; /* DWORD 0 */
4306+ u8 interrupt58; /* DWORD 0 */
4307+ u8 interrupt59; /* DWORD 0 */
4308+ u8 interrupt60; /* DWORD 0 */
4309+ u8 interrupt61; /* DWORD 0 */
4310+ u8 interrupt62; /* DWORD 0 */
4311+ u8 interrupt63; /* DWORD 0 */
4312+} __packed;
4313+struct CEV_ISR1_CSR_AMAP {
4314+ u32 dw[1];
4315+};
4316+/*
4317+ * Host Interrupt Status Register 2. The third of four application
4318+ * interrupt status registers. This register contains the interrupts
4319+ * for Event Queues EQ64 through EQ95.
4320+ */
4321+struct BE_CEV_ISR2_CSR_AMAP {
4322+ u8 interrupt64; /* DWORD 0 */
4323+ u8 interrupt65; /* DWORD 0 */
4324+ u8 interrupt66; /* DWORD 0 */
4325+ u8 interrupt67; /* DWORD 0 */
4326+ u8 interrupt68; /* DWORD 0 */
4327+ u8 interrupt69; /* DWORD 0 */
4328+ u8 interrupt70; /* DWORD 0 */
4329+ u8 interrupt71; /* DWORD 0 */
4330+ u8 interrupt72; /* DWORD 0 */
4331+ u8 interrupt73; /* DWORD 0 */
4332+ u8 interrupt74; /* DWORD 0 */
4333+ u8 interrupt75; /* DWORD 0 */
4334+ u8 interrupt76; /* DWORD 0 */
4335+ u8 interrupt77; /* DWORD 0 */
4336+ u8 interrupt78; /* DWORD 0 */
4337+ u8 interrupt79; /* DWORD 0 */
4338+ u8 interrupt80; /* DWORD 0 */
4339+ u8 interrupt81; /* DWORD 0 */
4340+ u8 interrupt82; /* DWORD 0 */
4341+ u8 interrupt83; /* DWORD 0 */
4342+ u8 interrupt84; /* DWORD 0 */
4343+ u8 interrupt85; /* DWORD 0 */
4344+ u8 interrupt86; /* DWORD 0 */
4345+ u8 interrupt87; /* DWORD 0 */
4346+ u8 interrupt88; /* DWORD 0 */
4347+ u8 interrupt89; /* DWORD 0 */
4348+ u8 interrupt90; /* DWORD 0 */
4349+ u8 interrupt91; /* DWORD 0 */
4350+ u8 interrupt92; /* DWORD 0 */
4351+ u8 interrupt93; /* DWORD 0 */
4352+ u8 interrupt94; /* DWORD 0 */
4353+ u8 interrupt95; /* DWORD 0 */
4354+} __packed;
4355+struct CEV_ISR2_CSR_AMAP {
4356+ u32 dw[1];
4357+};
4358+
4359+/*
4360+ * Host Interrupt Status Register 3. The fourth of four application
4361+ * interrupt status registers. This register contains the interrupts
4362+ * for Event Queues EQ96 through EQ127.
4363+ */
4364+struct BE_CEV_ISR3_CSR_AMAP {
4365+ u8 interrupt96; /* DWORD 0 */
4366+ u8 interrupt97; /* DWORD 0 */
4367+ u8 interrupt98; /* DWORD 0 */
4368+ u8 interrupt99; /* DWORD 0 */
4369+ u8 interrupt100; /* DWORD 0 */
4370+ u8 interrupt101; /* DWORD 0 */
4371+ u8 interrupt102; /* DWORD 0 */
4372+ u8 interrupt103; /* DWORD 0 */
4373+ u8 interrupt104; /* DWORD 0 */
4374+ u8 interrupt105; /* DWORD 0 */
4375+ u8 interrupt106; /* DWORD 0 */
4376+ u8 interrupt107; /* DWORD 0 */
4377+ u8 interrupt108; /* DWORD 0 */
4378+ u8 interrupt109; /* DWORD 0 */
4379+ u8 interrupt110; /* DWORD 0 */
4380+ u8 interrupt111; /* DWORD 0 */
4381+ u8 interrupt112; /* DWORD 0 */
4382+ u8 interrupt113; /* DWORD 0 */
4383+ u8 interrupt114; /* DWORD 0 */
4384+ u8 interrupt115; /* DWORD 0 */
4385+ u8 interrupt116; /* DWORD 0 */
4386+ u8 interrupt117; /* DWORD 0 */
4387+ u8 interrupt118; /* DWORD 0 */
4388+ u8 interrupt119; /* DWORD 0 */
4389+ u8 interrupt120; /* DWORD 0 */
4390+ u8 interrupt121; /* DWORD 0 */
4391+ u8 interrupt122; /* DWORD 0 */
4392+ u8 interrupt123; /* DWORD 0 */
4393+ u8 interrupt124; /* DWORD 0 */
4394+ u8 interrupt125; /* DWORD 0 */
4395+ u8 interrupt126; /* DWORD 0 */
4396+ u8 interrupt127; /* DWORD 0 */
4397+} __packed;
4398+struct CEV_ISR3_CSR_AMAP {
4399+ u32 dw[1];
4400+};
4401+
4402+/* Completions and Events block Registers. */
4403+struct BE_CEV_CSRMAP_AMAP {
4404+ u8 rsvd0[32]; /* DWORD 0 */
4405+ u8 rsvd1[32]; /* DWORD 1 */
4406+ u8 rsvd2[32]; /* DWORD 2 */
4407+ u8 rsvd3[32]; /* DWORD 3 */
4408+ struct BE_CEV_ISR0_CSR_AMAP isr0;
4409+ struct BE_CEV_ISR1_CSR_AMAP isr1;
4410+ struct BE_CEV_ISR2_CSR_AMAP isr2;
4411+ struct BE_CEV_ISR3_CSR_AMAP isr3;
4412+ u8 rsvd4[32]; /* DWORD 8 */
4413+ u8 rsvd5[32]; /* DWORD 9 */
4414+ u8 rsvd6[32]; /* DWORD 10 */
4415+ u8 rsvd7[32]; /* DWORD 11 */
4416+ u8 rsvd8[32]; /* DWORD 12 */
4417+ u8 rsvd9[32]; /* DWORD 13 */
4418+ u8 rsvd10[32]; /* DWORD 14 */
4419+ u8 rsvd11[32]; /* DWORD 15 */
4420+ u8 rsvd12[32]; /* DWORD 16 */
4421+ u8 rsvd13[32]; /* DWORD 17 */
4422+ u8 rsvd14[32]; /* DWORD 18 */
4423+ u8 rsvd15[32]; /* DWORD 19 */
4424+ u8 rsvd16[32]; /* DWORD 20 */
4425+ u8 rsvd17[32]; /* DWORD 21 */
4426+ u8 rsvd18[32]; /* DWORD 22 */
4427+ u8 rsvd19[32]; /* DWORD 23 */
4428+ u8 rsvd20[32]; /* DWORD 24 */
4429+ u8 rsvd21[32]; /* DWORD 25 */
4430+ u8 rsvd22[32]; /* DWORD 26 */
4431+ u8 rsvd23[32]; /* DWORD 27 */
4432+ u8 rsvd24[32]; /* DWORD 28 */
4433+ u8 rsvd25[32]; /* DWORD 29 */
4434+ u8 rsvd26[32]; /* DWORD 30 */
4435+ u8 rsvd27[32]; /* DWORD 31 */
4436+ u8 rsvd28[32]; /* DWORD 32 */
4437+ u8 rsvd29[32]; /* DWORD 33 */
4438+ u8 rsvd30[192]; /* DWORD 34 */
4439+ u8 rsvd31[192]; /* DWORD 40 */
4440+ u8 rsvd32[160]; /* DWORD 46 */
4441+ u8 rsvd33[160]; /* DWORD 51 */
4442+ u8 rsvd34[160]; /* DWORD 56 */
4443+ u8 rsvd35[96]; /* DWORD 61 */
4444+ u8 rsvd36[192][32]; /* DWORD 64 */
4445+} __packed;
4446+struct CEV_CSRMAP_AMAP {
4447+ u32 dw[256];
4448+};
4449+
4450+#endif /* __cev_amap_h__ */
4451--- /dev/null
4452+++ b/drivers/staging/benet/cq.c
4453@@ -0,0 +1,211 @@
4454+/*
4455+ * Copyright (C) 2005 - 2008 ServerEngines
4456+ * All rights reserved.
4457+ *
4458+ * This program is free software; you can redistribute it and/or
4459+ * modify it under the terms of the GNU General Public License version 2
4460+ * as published by the Free Software Foundation. The full GNU General
4461+ * Public License is included in this distribution in the file called COPYING.
4462+ *
4463+ * Contact Information:
4464+ * linux-drivers@serverengines.com
4465+ *
4466+ * ServerEngines
4467+ * 209 N. Fair Oaks Ave
4468+ * Sunnyvale, CA 94085
4469+ */
4470+#include "hwlib.h"
4471+#include "bestatus.h"
4472+
4473+/*
4474+ * Completion Queue Objects
4475+ */
4476+/*
4477+ *============================================================================
4478+ * P U B L I C R O U T I N E S
4479+ *============================================================================
4480+ */
4481+
4482+/*
4483+ This routine creates a completion queue based on the client completion
4484+ queue configuration information.
4485+
4486+
4487+ FunctionObject - Handle to a function object
4488+ CqBaseVa - Base VA for a the CQ ring
4489+ NumEntries - CEV_CQ_CNT_* values
4490+ solEventEnable - 0 = All CQEs can generate Events if CQ is eventable
4491+ 1 = only CQEs with solicited bit set are eventable
4492+ eventable - Eventable CQ, generates interrupts.
4493+ nodelay - 1 = Force interrupt, relevent if CQ eventable.
4494+ Interrupt is asserted immediately after EQE
4495+ write is confirmed, regardless of EQ Timer
4496+ or watermark settings.
4497+ wme - Enable watermark based coalescing
4498+ wmThresh - High watermark(CQ fullness at which event
4499+ or interrupt should be asserted). These are the
4500+ CEV_WATERMARK encoded values.
4501+ EqObject - EQ Handle to assign to this CQ
4502+ ppCqObject - Internal CQ Handle returned.
4503+
4504+ Returns BE_SUCCESS if successfull, otherwise a useful error code is
4505+ returned.
4506+
4507+ IRQL < DISPATCH_LEVEL
4508+
4509+*/
4510+int be_cq_create(struct be_function_object *pfob,
4511+ struct ring_desc *rd, u32 length, bool solicited_eventable,
4512+ bool no_delay, u32 wm_thresh,
4513+ struct be_eq_object *eq_object, struct be_cq_object *cq_object)
4514+{
4515+ int status = BE_SUCCESS;
4516+ u32 num_entries_encoding;
4517+ u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP);
4518+ struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL;
4519+ struct MCC_WRB_AMAP *wrb = NULL;
4520+ u32 n;
4521+ unsigned long irql;
4522+
4523+ ASSERT(rd);
4524+ ASSERT(cq_object);
4525+ ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0);
4526+
4527+ switch (num_entries) {
4528+ case 256:
4529+ num_entries_encoding = CEV_CQ_CNT_256;
4530+ break;
4531+ case 512:
4532+ num_entries_encoding = CEV_CQ_CNT_512;
4533+ break;
4534+ case 1024:
4535+ num_entries_encoding = CEV_CQ_CNT_1024;
4536+ break;
4537+ default:
4538+ ASSERT(0);
4539+ return BE_STATUS_INVALID_PARAMETER;
4540+ }
4541+
4542+ /*
4543+ * All cq entries all the same size. Use iSCSI version
4544+ * as a test for the proper rd length.
4545+ */
4546+ memset(cq_object, 0, sizeof(*cq_object));
4547+
4548+ atomic_set(&cq_object->ref_count, 0);
4549+ cq_object->parent_function = pfob;
4550+ cq_object->eq_object = eq_object;
4551+ cq_object->num_entries = num_entries;
4552+ /* save for MCC cq processing */
4553+ cq_object->va = rd->va;
4554+
4555+ /* map into UT. */
4556+ length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP);
4557+
4558+ spin_lock_irqsave(&pfob->post_lock, irql);
4559+
4560+ wrb = be_function_peek_mcc_wrb(pfob);
4561+ if (!wrb) {
4562+ ASSERT(wrb);
4563+ TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
4564+ status = BE_STATUS_NO_MCC_WRB;
4565+ goto Error;
4566+ }
4567+ /* Prepares an embedded fwcmd, including request/response sizes. */
4568+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE);
4569+
4570+ fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
4571+ length);
4572+
4573+ AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
4574+ n = pfob->pci_function_number;
4575+ AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n);
4576+
4577+ n = (eq_object != NULL);
4578+ AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable,
4579+ &fwcmd->params.request.context, n);
4580+ AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1);
4581+
4582+ n = eq_object ? eq_object->eq_id : 0;
4583+ AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n);
4584+ AMAP_SET_BITS_PTR(CQ_CONTEXT, Count,
4585+ &fwcmd->params.request.context, num_entries_encoding);
4586+
4587+ n = 0; /* Protection Domain is always 0 in Linux driver */
4588+ AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n);
4589+ AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay,
4590+ &fwcmd->params.request.context, no_delay);
4591+ AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent,
4592+ &fwcmd->params.request.context, solicited_eventable);
4593+
4594+ n = (wm_thresh != 0xFFFFFFFF);
4595+ AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n);
4596+
4597+ n = (n ? wm_thresh : 0);
4598+ AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark,
4599+ &fwcmd->params.request.context, n);
4600+ /* Create a page list for the FWCMD. */
4601+ be_rd_to_pa_list(rd, fwcmd->params.request.pages,
4602+ ARRAY_SIZE(fwcmd->params.request.pages));
4603+
4604+ /* Post the f/w command */
4605+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
4606+ NULL, NULL, fwcmd, NULL);
4607+ if (status != BE_SUCCESS) {
4608+ TRACE(DL_ERR, "MCC to create CQ failed.");
4609+ goto Error;
4610+ }
4611+ /* Remember the CQ id. */
4612+ cq_object->cq_id = fwcmd->params.response.cq_id;
4613+
4614+ /* insert this cq into eq_object reference */
4615+ if (eq_object) {
4616+ atomic_inc(&eq_object->ref_count);
4617+ list_add_tail(&cq_object->cqlist_for_eq,
4618+ &eq_object->cq_list_head);
4619+ }
4620+
4621+Error:
4622+ spin_unlock_irqrestore(&pfob->post_lock, irql);
4623+
4624+ if (pfob->pend_queue_driving && pfob->mcc) {
4625+ pfob->pend_queue_driving = 0;
4626+ be_drive_mcc_wrb_queue(pfob->mcc);
4627+ }
4628+ return status;
4629+}
4630+
4631+/*
4632+
4633+ Deferences the given object. Once the object's reference count drops to
4634+ zero, the object is destroyed and all resources that are held by this object
4635+ are released. The on-chip context is also destroyed along with the queue
4636+ ID, and any mappings made into the UT.
4637+
4638+ cq_object - CQ handle returned from cq_object_create.
4639+
4640+ returns the current reference count on the object
4641+
4642+ IRQL: IRQL < DISPATCH_LEVEL
4643+*/
4644+int be_cq_destroy(struct be_cq_object *cq_object)
4645+{
4646+ int status = 0;
4647+
4648+ /* Nothing should reference this CQ at this point. */
4649+ ASSERT(atomic_read(&cq_object->ref_count) == 0);
4650+
4651+ /* Send fwcmd to destroy the CQ. */
4652+ status = be_function_ring_destroy(cq_object->parent_function,
4653+ cq_object->cq_id, FWCMD_RING_TYPE_CQ,
4654+ NULL, NULL, NULL, NULL);
4655+ ASSERT(status == 0);
4656+
4657+ /* Remove reference if this is an eventable CQ. */
4658+ if (cq_object->eq_object) {
4659+ atomic_dec(&cq_object->eq_object->ref_count);
4660+ list_del(&cq_object->cqlist_for_eq);
4661+ }
4662+ return BE_SUCCESS;
4663+}
4664+
4665--- /dev/null
4666+++ b/drivers/staging/benet/descriptors.h
4667@@ -0,0 +1,71 @@
4668+/*
4669+ * Copyright (C) 2005 - 2008 ServerEngines
4670+ * All rights reserved.
4671+ *
4672+ * This program is free software; you can redistribute it and/or
4673+ * modify it under the terms of the GNU General Public License version 2
4674+ * as published by the Free Software Foundation. The full GNU General
4675+ * Public License is included in this distribution in the file called COPYING.
4676+ *
4677+ * Contact Information:
4678+ * linux-drivers@serverengines.com
4679+ *
4680+ * ServerEngines
4681+ * 209 N. Fair Oaks Ave
4682+ * Sunnyvale, CA 94085
4683+ */
4684+/*
4685+ * Autogenerated by srcgen version: 0127
4686+ */
4687+#ifndef __descriptors_amap_h__
4688+#define __descriptors_amap_h__
4689+
4690+/*
4691+ * --- IPC_NODE_ID_ENUM ---
4692+ * IPC processor id values
4693+ */
4694+#define TPOST_NODE_ID (0) /* TPOST ID */
4695+#define TPRE_NODE_ID (1) /* TPRE ID */
4696+#define TXULP0_NODE_ID (2) /* TXULP0 ID */
4697+#define TXULP1_NODE_ID (3) /* TXULP1 ID */
4698+#define TXULP2_NODE_ID (4) /* TXULP2 ID */
4699+#define RXULP0_NODE_ID (5) /* RXULP0 ID */
4700+#define RXULP1_NODE_ID (6) /* RXULP1 ID */
4701+#define RXULP2_NODE_ID (7) /* RXULP2 ID */
4702+#define MPU_NODE_ID (15) /* MPU ID */
4703+
4704+/*
4705+ * --- MAC_ID_ENUM ---
4706+ * Meaning of the mac_id field in rxpp_eth_d
4707+ */
4708+#define PORT0_HOST_MAC0 (0) /* PD 0, Port 0, host networking, MAC 0. */
4709+#define PORT0_HOST_MAC1 (1) /* PD 0, Port 0, host networking, MAC 1. */
4710+#define PORT0_STORAGE_MAC0 (2) /* PD 0, Port 0, host storage, MAC 0. */
4711+#define PORT0_STORAGE_MAC1 (3) /* PD 0, Port 0, host storage, MAC 1. */
4712+#define PORT1_HOST_MAC0 (4) /* PD 0, Port 1 host networking, MAC 0. */
4713+#define PORT1_HOST_MAC1 (5) /* PD 0, Port 1 host networking, MAC 1. */
4714+#define PORT1_STORAGE_MAC0 (6) /* PD 0, Port 1 host storage, MAC 0. */
4715+#define PORT1_STORAGE_MAC1 (7) /* PD 0, Port 1 host storage, MAC 1. */
4716+#define FIRST_VM_MAC (8) /* PD 1 MAC. Protection domains have IDs */
4717+ /* from 0x8-0x26, one per PD. */
4718+#define LAST_VM_MAC (38) /* PD 31 MAC. */
4719+#define MGMT_MAC (39) /* Management port MAC. */
4720+#define MARBLE_MAC0 (59) /* Used for flushing function 0 receive */
4721+ /*
4722+ * queues before re-using a torn-down
4723+ * receive ring. the DA =
4724+ * 00-00-00-00-00-00, and the MSB of the
4725+ * SA = 00
4726+ */
4727+#define MARBLE_MAC1 (60) /* Used for flushing function 1 receive */
4728+ /*
4729+ * queues before re-using a torn-down
4730+ * receive ring. the DA =
4731+ * 00-00-00-00-00-00, and the MSB of the
4732+ * SA != 00
4733+ */
4734+#define NULL_MAC (61) /* Promiscuous mode, indicates no match */
4735+#define MCAST_MAC (62) /* Multicast match. */
4736+#define BCAST_MATCH (63) /* Broadcast match. */
4737+
4738+#endif /* __descriptors_amap_h__ */
4739--- /dev/null
4740+++ b/drivers/staging/benet/doorbells.h
4741@@ -0,0 +1,179 @@
4742+/*
4743+ * Copyright (C) 2005 - 2008 ServerEngines
4744+ * All rights reserved.
4745+ *
4746+ * This program is free software; you can redistribute it and/or
4747+ * modify it under the terms of the GNU General Public License version 2
4748+ * as published by the Free Software Foundation. The full GNU General
4749+ * Public License is included in this distribution in the file called COPYING.
4750+ *
4751+ * Contact Information:
4752+ * linux-drivers@serverengines.com
4753+ *
4754+ * ServerEngines
4755+ * 209 N. Fair Oaks Ave
4756+ * Sunnyvale, CA 94085
4757+ */
4758+/*
4759+ * Autogenerated by srcgen version: 0127
4760+ */
4761+#ifndef __doorbells_amap_h__
4762+#define __doorbells_amap_h__
4763+
4764+/* The TX/RDMA send queue doorbell. */
4765+struct BE_SQ_DB_AMAP {
4766+ u8 cid[11]; /* DWORD 0 */
4767+ u8 rsvd0[5]; /* DWORD 0 */
4768+ u8 numPosted[14]; /* DWORD 0 */
4769+ u8 rsvd1[2]; /* DWORD 0 */
4770+} __packed;
4771+struct SQ_DB_AMAP {
4772+ u32 dw[1];
4773+};
4774+
4775+/* The receive queue doorbell. */
4776+struct BE_RQ_DB_AMAP {
4777+ u8 rq[10]; /* DWORD 0 */
4778+ u8 rsvd0[13]; /* DWORD 0 */
4779+ u8 Invalidate; /* DWORD 0 */
4780+ u8 numPosted[8]; /* DWORD 0 */
4781+} __packed;
4782+struct RQ_DB_AMAP {
4783+ u32 dw[1];
4784+};
4785+
4786+/*
4787+ * The CQ/EQ doorbell. Software MUST set reserved fields in this
4788+ * descriptor to zero, otherwise (CEV) hardware will not execute the
4789+ * doorbell (flagging a bad_db_qid error instead).
4790+ */
4791+struct BE_CQ_DB_AMAP {
4792+ u8 qid[10]; /* DWORD 0 */
4793+ u8 rsvd0[4]; /* DWORD 0 */
4794+ u8 rearm; /* DWORD 0 */
4795+ u8 event; /* DWORD 0 */
4796+ u8 num_popped[13]; /* DWORD 0 */
4797+ u8 rsvd1[3]; /* DWORD 0 */
4798+} __packed;
4799+struct CQ_DB_AMAP {
4800+ u32 dw[1];
4801+};
4802+
4803+struct BE_TPM_RQ_DB_AMAP {
4804+ u8 qid[10]; /* DWORD 0 */
4805+ u8 rsvd0[6]; /* DWORD 0 */
4806+ u8 numPosted[11]; /* DWORD 0 */
4807+ u8 mss_cnt[5]; /* DWORD 0 */
4808+} __packed;
4809+struct TPM_RQ_DB_AMAP {
4810+ u32 dw[1];
4811+};
4812+
4813+/*
4814+ * Post WRB Queue Doorbell Register used by the host Storage stack
4815+ * to notify the controller of a posted Work Request Block
4816+ */
4817+struct BE_WRB_POST_DB_AMAP {
4818+ u8 wrb_cid[10]; /* DWORD 0 */
4819+ u8 rsvd0[6]; /* DWORD 0 */
4820+ u8 wrb_index[8]; /* DWORD 0 */
4821+ u8 numberPosted[8]; /* DWORD 0 */
4822+} __packed;
4823+struct WRB_POST_DB_AMAP {
4824+ u32 dw[1];
4825+};
4826+
4827+/*
4828+ * Update Default PDU Queue Doorbell Register used to communicate
4829+ * to the controller that the driver has stopped processing the queue
4830+ * and where in the queue it stopped, this is
4831+ * a CQ Entry Type. Used by storage driver.
4832+ */
4833+struct BE_DEFAULT_PDU_DB_AMAP {
4834+ u8 qid[10]; /* DWORD 0 */
4835+ u8 rsvd0[4]; /* DWORD 0 */
4836+ u8 rearm; /* DWORD 0 */
4837+ u8 event; /* DWORD 0 */
4838+ u8 cqproc[14]; /* DWORD 0 */
4839+ u8 rsvd1[2]; /* DWORD 0 */
4840+} __packed;
4841+struct DEFAULT_PDU_DB_AMAP {
4842+ u32 dw[1];
4843+};
4844+
4845+/* Management Command and Controller default fragment ring */
4846+struct BE_MCC_DB_AMAP {
4847+ u8 rid[11]; /* DWORD 0 */
4848+ u8 rsvd0[5]; /* DWORD 0 */
4849+ u8 numPosted[14]; /* DWORD 0 */
4850+ u8 rsvd1[2]; /* DWORD 0 */
4851+} __packed;
4852+struct MCC_DB_AMAP {
4853+ u32 dw[1];
4854+};
4855+
4856+/*
4857+ * Used for bootstrapping the Host interface. This register is
4858+ * used for driver communication with the MPU when no MCC Rings exist.
4859+ * The software must write this register twice to post any MCC
4860+ * command. First, it writes the register with hi=1 and the upper bits of
4861+ * the physical address for the MCC_MAILBOX structure. Software must poll
4862+ * the ready bit until this is acknowledged. Then, sotware writes the
4863+ * register with hi=0 with the lower bits in the address. It must
4864+ * poll the ready bit until the MCC command is complete. Upon completion,
4865+ * the MCC_MAILBOX will contain a valid completion queue entry.
4866+ */
4867+struct BE_MPU_MAILBOX_DB_AMAP {
4868+ u8 ready; /* DWORD 0 */
4869+ u8 hi; /* DWORD 0 */
4870+ u8 address[30]; /* DWORD 0 */
4871+} __packed;
4872+struct MPU_MAILBOX_DB_AMAP {
4873+ u32 dw[1];
4874+};
4875+
4876+/*
4877+ * This is the protection domain doorbell register map. Note that
4878+ * while this map shows doorbells for all Blade Engine supported
4879+ * protocols, not all of these may be valid in a given function or
4880+ * protection domain. It is the responsibility of the application
4881+ * accessing the doorbells to know which are valid. Each doorbell
4882+ * occupies 32 bytes of space, but unless otherwise specified,
4883+ * only the first 4 bytes should be written. There are 32 instances
4884+ * of these doorbells for the host and 31 virtual machines respectively.
4885+ * The host and VMs will only map the doorbell pages belonging to its
4886+ * protection domain. It will not be able to touch the doorbells for
4887+ * another VM. The doorbells are the only registers directly accessible
4888+ * by a virtual machine. Similarly, there are 511 additional
4889+ * doorbells for RDMA protection domains. PD 0 for RDMA shares
4890+ * the same physical protection domain doorbell page as ETH/iSCSI.
4891+ *
4892+ */
4893+struct BE_PROTECTION_DOMAIN_DBMAP_AMAP {
4894+ u8 rsvd0[512]; /* DWORD 0 */
4895+ struct BE_SQ_DB_AMAP rdma_sq_db;
4896+ u8 rsvd1[7][32]; /* DWORD 17 */
4897+ struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db;
4898+ u8 rsvd2[7][32]; /* DWORD 25 */
4899+ struct BE_SQ_DB_AMAP etx_sq_db;
4900+ u8 rsvd3[7][32]; /* DWORD 33 */
4901+ struct BE_RQ_DB_AMAP rdma_rq_db;
4902+ u8 rsvd4[7][32]; /* DWORD 41 */
4903+ struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db;
4904+ u8 rsvd5[7][32]; /* DWORD 49 */
4905+ struct BE_TPM_RQ_DB_AMAP tpm_rq_db;
4906+ u8 rsvd6[7][32]; /* DWORD 57 */
4907+ struct BE_RQ_DB_AMAP erx_rq_db;
4908+ u8 rsvd7[7][32]; /* DWORD 65 */
4909+ struct BE_CQ_DB_AMAP cq_db;
4910+ u8 rsvd8[7][32]; /* DWORD 73 */
4911+ struct BE_MCC_DB_AMAP mpu_mcc_db;
4912+ u8 rsvd9[7][32]; /* DWORD 81 */
4913+ struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db;
4914+ u8 rsvd10[935][32]; /* DWORD 89 */
4915+} __packed;
4916+struct PROTECTION_DOMAIN_DBMAP_AMAP {
4917+ u32 dw[1024];
4918+};
4919+
4920+#endif /* __doorbells_amap_h__ */
4921--- /dev/null
4922+++ b/drivers/staging/benet/ep.h
4923@@ -0,0 +1,66 @@
4924+/*
4925+ * Copyright (C) 2005 - 2008 ServerEngines
4926+ * All rights reserved.
4927+ *
4928+ * This program is free software; you can redistribute it and/or
4929+ * modify it under the terms of the GNU General Public License version 2
4930+ * as published by the Free Software Foundation. The full GNU General
4931+ * Public License is included in this distribution in the file called COPYING.
4932+ *
4933+ * Contact Information:
4934+ * linux-drivers@serverengines.com
4935+ *
4936+ * ServerEngines
4937+ * 209 N. Fair Oaks Ave
4938+ * Sunnyvale, CA 94085
4939+ */
4940+/*
4941+ * Autogenerated by srcgen version: 0127
4942+ */
4943+#ifndef __ep_amap_h__
4944+#define __ep_amap_h__
4945+
4946+/* General Control and Status Register. */
4947+struct BE_EP_CONTROL_CSR_AMAP {
4948+ u8 m0_RxPbuf; /* DWORD 0 */
4949+ u8 m1_RxPbuf; /* DWORD 0 */
4950+ u8 m2_RxPbuf; /* DWORD 0 */
4951+ u8 ff_en; /* DWORD 0 */
4952+ u8 rsvd0[27]; /* DWORD 0 */
4953+ u8 CPU_reset; /* DWORD 0 */
4954+} __packed;
4955+struct EP_CONTROL_CSR_AMAP {
4956+ u32 dw[1];
4957+};
4958+
4959+/* Semaphore Register. */
4960+struct BE_EP_SEMAPHORE_CSR_AMAP {
4961+ u8 value[32]; /* DWORD 0 */
4962+} __packed;
4963+struct EP_SEMAPHORE_CSR_AMAP {
4964+ u32 dw[1];
4965+};
4966+
4967+/* Embedded Processor Specific Registers. */
4968+struct BE_EP_CSRMAP_AMAP {
4969+ struct BE_EP_CONTROL_CSR_AMAP ep_control;
4970+ u8 rsvd0[32]; /* DWORD 1 */
4971+ u8 rsvd1[32]; /* DWORD 2 */
4972+ u8 rsvd2[32]; /* DWORD 3 */
4973+ u8 rsvd3[32]; /* DWORD 4 */
4974+ u8 rsvd4[32]; /* DWORD 5 */
4975+ u8 rsvd5[8][128]; /* DWORD 6 */
4976+ u8 rsvd6[32]; /* DWORD 38 */
4977+ u8 rsvd7[32]; /* DWORD 39 */
4978+ u8 rsvd8[32]; /* DWORD 40 */
4979+ u8 rsvd9[32]; /* DWORD 41 */
4980+ u8 rsvd10[32]; /* DWORD 42 */
4981+ struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore;
4982+ u8 rsvd11[32]; /* DWORD 44 */
4983+ u8 rsvd12[19][32]; /* DWORD 45 */
4984+} __packed;
4985+struct EP_CSRMAP_AMAP {
4986+ u32 dw[64];
4987+};
4988+
4989+#endif /* __ep_amap_h__ */
4990--- /dev/null
4991+++ b/drivers/staging/benet/eq.c
4992@@ -0,0 +1,299 @@
4993+/*
4994+ * Copyright (C) 2005 - 2008 ServerEngines
4995+ * All rights reserved.
4996+ *
4997+ * This program is free software; you can redistribute it and/or
4998+ * modify it under the terms of the GNU General Public License version 2
4999+ * as published by the Free Software Foundation. The full GNU General
5000+ * Public License is included in this distribution in the file called COPYING.
5001+ *
5002+ * Contact Information:
5003+ * linux-drivers@serverengines.com
5004+ *
5005+ * ServerEngines
5006+ * 209 N. Fair Oaks Ave
5007+ * Sunnyvale, CA 94085
5008+ */
5009+#include "hwlib.h"
5010+#include "bestatus.h"
5011+/*
5012+ This routine creates an event queue based on the client completion
5013+ queue configuration information.
5014+
5015+ FunctionObject - Handle to a function object
5016+ EqBaseVa - Base VA for a the EQ ring
5017+ SizeEncoding - The encoded size for the EQ entries. This value is
5018+ either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16
5019+ NumEntries - CEV_CQ_CNT_* values.
5020+ Watermark - Enables watermark based coalescing. This parameter
5021+ must be of the type CEV_WMARK_* if watermarks
5022+ are enabled. If watermarks to to be disabled
5023+ this value should be-1.
5024+ TimerDelay - If a timer delay is enabled this value should be the
5025+ time of the delay in 8 microsecond units. If
5026+ delays are not used this parameter should be
5027+ set to -1.
5028+ ppEqObject - Internal EQ Handle returned.
5029+
5030+ Returns BE_SUCCESS if successfull,, otherwise a useful error code
5031+ is returned.
5032+
5033+ IRQL < DISPATCH_LEVEL
5034+*/
5035+int
5036+be_eq_create(struct be_function_object *pfob,
5037+ struct ring_desc *rd, u32 eqe_size, u32 num_entries,
5038+ u32 watermark, /* CEV_WMARK_* or -1 */
5039+ u32 timer_delay, /* in 8us units, or -1 */
5040+ struct be_eq_object *eq_object)
5041+{
5042+ int status = BE_SUCCESS;
5043+ u32 num_entries_encoding, eqe_size_encoding, length;
5044+ struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL;
5045+ struct MCC_WRB_AMAP *wrb = NULL;
5046+ u32 n;
5047+ unsigned long irql;
5048+
5049+ ASSERT(rd);
5050+ ASSERT(eq_object);
5051+
5052+ switch (num_entries) {
5053+ case 256:
5054+ num_entries_encoding = CEV_EQ_CNT_256;
5055+ break;
5056+ case 512:
5057+ num_entries_encoding = CEV_EQ_CNT_512;
5058+ break;
5059+ case 1024:
5060+ num_entries_encoding = CEV_EQ_CNT_1024;
5061+ break;
5062+ case 2048:
5063+ num_entries_encoding = CEV_EQ_CNT_2048;
5064+ break;
5065+ case 4096:
5066+ num_entries_encoding = CEV_EQ_CNT_4096;
5067+ break;
5068+ default:
5069+ ASSERT(0);
5070+ return BE_STATUS_INVALID_PARAMETER;
5071+ }
5072+
5073+ switch (eqe_size) {
5074+ case 4:
5075+ eqe_size_encoding = CEV_EQ_SIZE_4;
5076+ break;
5077+ case 16:
5078+ eqe_size_encoding = CEV_EQ_SIZE_16;
5079+ break;
5080+ default:
5081+ ASSERT(0);
5082+ return BE_STATUS_INVALID_PARAMETER;
5083+ }
5084+
5085+ if ((eqe_size == 4 && num_entries < 1024) ||
5086+ (eqe_size == 16 && num_entries == 4096)) {
5087+ TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d",
5088+ eqe_size, num_entries);
5089+ ASSERT(0);
5090+ return BE_STATUS_INVALID_PARAMETER;
5091+ }
5092+
5093+ memset(eq_object, 0, sizeof(*eq_object));
5094+
5095+ atomic_set(&eq_object->ref_count, 0);
5096+ eq_object->parent_function = pfob;
5097+ eq_object->eq_id = 0xFFFFFFFF;
5098+
5099+ INIT_LIST_HEAD(&eq_object->cq_list_head);
5100+
5101+ length = num_entries * eqe_size;
5102+
5103+ spin_lock_irqsave(&pfob->post_lock, irql);
5104+
5105+ wrb = be_function_peek_mcc_wrb(pfob);
5106+ if (!wrb) {
5107+ ASSERT(wrb);
5108+ TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
5109+ status = BE_STATUS_NO_MCC_WRB;
5110+ goto Error;
5111+ }
5112+ /* Prepares an embedded fwcmd, including request/response sizes. */
5113+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE);
5114+
5115+ fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
5116+ length);
5117+ n = pfob->pci_function_number;
5118+ AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n);
5119+
5120+ AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
5121+
5122+ AMAP_SET_BITS_PTR(EQ_CONTEXT, Size,
5123+ &fwcmd->params.request.context, eqe_size_encoding);
5124+
5125+ n = 0; /* Protection Domain is always 0 in Linux driver */
5126+ AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n);
5127+
5128+ /* Let the caller ARM the EQ with the doorbell. */
5129+ AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0);
5130+
5131+ AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context,
5132+ num_entries_encoding);
5133+
5134+ n = pfob->pci_function_number * 32;
5135+ AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect,
5136+ &fwcmd->params.request.context, n);
5137+ if (watermark != -1) {
5138+ AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
5139+ &fwcmd->params.request.context, 1);
5140+ AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark,
5141+ &fwcmd->params.request.context, watermark);
5142+ ASSERT(watermark <= CEV_WMARK_240);
5143+ } else
5144+ AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
5145+ &fwcmd->params.request.context, 0);
5146+ if (timer_delay != -1) {
5147+ AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
5148+ &fwcmd->params.request.context, 1);
5149+
5150+ ASSERT(timer_delay <= 250); /* max value according to EAS */
5151+ timer_delay = min(timer_delay, (u32)250);
5152+
5153+ AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay,
5154+ &fwcmd->params.request.context, timer_delay);
5155+ } else {
5156+ AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
5157+ &fwcmd->params.request.context, 0);
5158+ }
5159+ /* Create a page list for the FWCMD. */
5160+ be_rd_to_pa_list(rd, fwcmd->params.request.pages,
5161+ ARRAY_SIZE(fwcmd->params.request.pages));
5162+
5163+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5164+ NULL, NULL, fwcmd, NULL);
5165+ if (status != BE_SUCCESS) {
5166+ TRACE(DL_ERR, "MCC to create EQ failed.");
5167+ goto Error;
5168+ }
5169+ /* Get the EQ id. The MPU allocates the IDs. */
5170+ eq_object->eq_id = fwcmd->params.response.eq_id;
5171+
5172+Error:
5173+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5174+
5175+ if (pfob->pend_queue_driving && pfob->mcc) {
5176+ pfob->pend_queue_driving = 0;
5177+ be_drive_mcc_wrb_queue(pfob->mcc);
5178+ }
5179+ return status;
5180+}
5181+
5182+/*
5183+ Deferences the given object. Once the object's reference count drops to
5184+ zero, the object is destroyed and all resources that are held by this
5185+ object are released. The on-chip context is also destroyed along with
5186+ the queue ID, and any mappings made into the UT.
5187+
5188+ eq_object - EQ handle returned from eq_object_create.
5189+
5190+ Returns BE_SUCCESS if successfull, otherwise a useful error code
5191+ is returned.
5192+
5193+ IRQL: IRQL < DISPATCH_LEVEL
5194+*/
5195+int be_eq_destroy(struct be_eq_object *eq_object)
5196+{
5197+ int status = 0;
5198+
5199+ ASSERT(atomic_read(&eq_object->ref_count) == 0);
5200+ /* no CQs should reference this EQ now */
5201+ ASSERT(list_empty(&eq_object->cq_list_head));
5202+
5203+ /* Send fwcmd to destroy the EQ. */
5204+ status = be_function_ring_destroy(eq_object->parent_function,
5205+ eq_object->eq_id, FWCMD_RING_TYPE_EQ,
5206+ NULL, NULL, NULL, NULL);
5207+ ASSERT(status == 0);
5208+
5209+ return BE_SUCCESS;
5210+}
5211+/*
5212+ *---------------------------------------------------------------------------
5213+ * Function: be_eq_modify_delay
5214+ * Changes the EQ delay for a group of EQs.
5215+ * num_eq - The number of EQs in the eq_array to adjust.
5216+ * This also is the number of delay values in
5217+ * the eq_delay_array.
5218+ * eq_array - Array of struct be_eq_object pointers to adjust.
5219+ * eq_delay_array - Array of "num_eq" timer delays in units
5220+ * of microseconds. The be_eq_query_delay_range
5221+ * fwcmd returns the resolution and range of
5222+ * legal EQ delays.
5223+ * cb -
5224+ * cb_context -
5225+ * q_ctxt - Optional. Pointer to a previously allocated
5226+ * struct. If the MCC WRB ring is full, this
5227+ * structure is used to queue the operation. It
5228+ * will be posted to the MCC ring when space
5229+ * becomes available. All queued commands will
5230+ * be posted to the ring in the order they are
5231+ * received. It is always valid to pass a pointer to
5232+ * a generic be_generic_q_cntxt. However,
5233+ * the specific context structs
5234+ * are generally smaller than the generic struct.
5235+ * return pend_status - BE_SUCCESS (0) on success.
5236+ * BE_PENDING (postive value) if the FWCMD
5237+ * completion is pending. Negative error code on failure.
5238+ *-------------------------------------------------------------------------
5239+ */
5240+int
5241+be_eq_modify_delay(struct be_function_object *pfob,
5242+ u32 num_eq, struct be_eq_object **eq_array,
5243+ u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
5244+ void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt)
5245+{
5246+ struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL;
5247+ struct MCC_WRB_AMAP *wrb = NULL;
5248+ int status = 0;
5249+ struct be_generic_q_ctxt *gen_ctxt = NULL;
5250+ u32 i;
5251+ unsigned long irql;
5252+
5253+ spin_lock_irqsave(&pfob->post_lock, irql);
5254+
5255+ wrb = be_function_peek_mcc_wrb(pfob);
5256+ if (!wrb) {
5257+ if (q_ctxt && cb) {
5258+ wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
5259+ gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
5260+ gen_ctxt->context.bytes = sizeof(*q_ctxt);
5261+ } else {
5262+ status = BE_STATUS_NO_MCC_WRB;
5263+ goto Error;
5264+ }
5265+ }
5266+ /* Prepares an embedded fwcmd, including request/response sizes. */
5267+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY);
5268+
5269+ ASSERT(num_eq > 0);
5270+ ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay));
5271+ fwcmd->params.request.num_eq = num_eq;
5272+ for (i = 0; i < num_eq; i++) {
5273+ fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id;
5274+ fwcmd->params.request.delay[i].delay_in_microseconds =
5275+ eq_delay_array[i];
5276+ }
5277+
5278+ /* Post the f/w command */
5279+ status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt,
5280+ cb, cb_context, NULL, NULL, fwcmd, NULL);
5281+
5282+Error:
5283+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5284+
5285+ if (pfob->pend_queue_driving && pfob->mcc) {
5286+ pfob->pend_queue_driving = 0;
5287+ be_drive_mcc_wrb_queue(pfob->mcc);
5288+ }
5289+ return status;
5290+}
5291+
5292--- /dev/null
5293+++ b/drivers/staging/benet/eth.c
5294@@ -0,0 +1,1273 @@
5295+/*
5296+ * Copyright (C) 2005 - 2008 ServerEngines
5297+ * All rights reserved.
5298+ *
5299+ * This program is free software; you can redistribute it and/or
5300+ * modify it under the terms of the GNU General Public License version 2
5301+ * as published by the Free Software Foundation. The full GNU General
5302+ * Public License is included in this distribution in the file called COPYING.
5303+ *
5304+ * Contact Information:
5305+ * linux-drivers@serverengines.com
5306+ *
5307+ * ServerEngines
5308+ * 209 N. Fair Oaks Ave
5309+ * Sunnyvale, CA 94085
5310+ */
5311+#include <linux/if_ether.h>
5312+#include "hwlib.h"
5313+#include "bestatus.h"
5314+
5315+/*
5316+ *---------------------------------------------------------
5317+ * Function: be_eth_sq_create_ex
5318+ * Creates an ethernet send ring - extended version with
5319+ * additional parameters.
5320+ * pfob -
5321+ * rd - ring address
5322+ * length_in_bytes -
5323+ * type - The type of ring to create.
5324+ * ulp - The requested ULP number for the ring.
5325+ * This should be zero based, i.e. 0,1,2. This must
5326+ * be valid NIC ULP based on the firmware config.
5327+ * All doorbells for this ring must be sent to
5328+ * this ULP. The first network ring allocated for
5329+ * each ULP are higher performance than subsequent rings.
5330+ * cq_object - cq object for completions
5331+ * ex_parameters - Additional parameters (that may increase in
5332+ * future revisions). These parameters are only used
5333+ * for certain ring types -- see
5334+ * struct be_eth_sq_parameters for details.
5335+ * eth_sq -
5336+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
5337+ *---------------------------------------------------------
5338+ */
5339+int
5340+be_eth_sq_create_ex(struct be_function_object *pfob, struct ring_desc *rd,
5341+ u32 length, u32 type, u32 ulp, struct be_cq_object *cq_object,
5342+ struct be_eth_sq_parameters *ex_parameters,
5343+ struct be_ethsq_object *eth_sq)
5344+{
5345+ struct FWCMD_COMMON_ETH_TX_CREATE *fwcmd = NULL;
5346+ struct MCC_WRB_AMAP *wrb = NULL;
5347+ int status = 0;
5348+ u32 n;
5349+ unsigned long irql;
5350+
5351+ ASSERT(rd);
5352+ ASSERT(eth_sq);
5353+ ASSERT(ex_parameters);
5354+
5355+ spin_lock_irqsave(&pfob->post_lock, irql);
5356+
5357+ memset(eth_sq, 0, sizeof(*eth_sq));
5358+
5359+ eth_sq->parent_function = pfob;
5360+ eth_sq->bid = 0xFFFFFFFF;
5361+ eth_sq->cq_object = cq_object;
5362+
5363+ /* Translate hwlib interface to arm interface. */
5364+ switch (type) {
5365+ case BE_ETH_TX_RING_TYPE_FORWARDING:
5366+ type = ETH_TX_RING_TYPE_FORWARDING;
5367+ break;
5368+ case BE_ETH_TX_RING_TYPE_STANDARD:
5369+ type = ETH_TX_RING_TYPE_STANDARD;
5370+ break;
5371+ case BE_ETH_TX_RING_TYPE_BOUND:
5372+ ASSERT(ex_parameters->port < 2);
5373+ type = ETH_TX_RING_TYPE_BOUND;
5374+ break;
5375+ default:
5376+ TRACE(DL_ERR, "Invalid eth tx ring type:%d", type);
5377+ return BE_NOT_OK;
5378+ break;
5379+ }
5380+
5381+ wrb = be_function_peek_mcc_wrb(pfob);
5382+ if (!wrb) {
5383+ ASSERT(wrb);
5384+ TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
5385+ status = BE_STATUS_NO_MCC_WRB;
5386+ goto Error;
5387+ }
5388+ /* NIC must be supported by the current config. */
5389+ ASSERT(pfob->fw_config.nic_ulp_mask);
5390+
5391+ /*
5392+ * The ulp parameter must select a valid NIC ULP
5393+ * for the current config.
5394+ */
5395+ ASSERT((1 << ulp) & pfob->fw_config.nic_ulp_mask);
5396+
5397+ /* Prepares an embedded fwcmd, including request/response sizes. */
5398+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_TX_CREATE);
5399+ fwcmd->header.request.port_number = ex_parameters->port;
5400+
5401+ AMAP_SET_BITS_PTR(ETX_CONTEXT, pd_id,
5402+ &fwcmd->params.request.context, 0);
5403+
5404+ n = be_ring_length_to_encoding(length, sizeof(struct ETH_WRB_AMAP));
5405+ AMAP_SET_BITS_PTR(ETX_CONTEXT, tx_ring_size,
5406+ &fwcmd->params.request.context, n);
5407+
5408+ AMAP_SET_BITS_PTR(ETX_CONTEXT, cq_id_send,
5409+ &fwcmd->params.request.context, cq_object->cq_id);
5410+
5411+ n = pfob->pci_function_number;
5412+ AMAP_SET_BITS_PTR(ETX_CONTEXT, func, &fwcmd->params.request.context, n);
5413+
5414+ fwcmd->params.request.type = type;
5415+ fwcmd->params.request.ulp_num = (1 << ulp);
5416+ fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
5417+ ASSERT(PAGES_SPANNED(rd->va, rd->length) >=
5418+ fwcmd->params.request.num_pages);
5419+
5420+ /* Create a page list for the FWCMD. */
5421+ be_rd_to_pa_list(rd, fwcmd->params.request.pages,
5422+ ARRAY_SIZE(fwcmd->params.request.pages));
5423+
5424+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5425+ NULL, NULL, fwcmd, NULL);
5426+ if (status != BE_SUCCESS) {
5427+ TRACE(DL_ERR, "MCC to create etx queue failed.");
5428+ goto Error;
5429+ }
5430+ /* save the butler ID */
5431+ eth_sq->bid = fwcmd->params.response.cid;
5432+
5433+ /* add a reference to the corresponding CQ */
5434+ atomic_inc(&cq_object->ref_count);
5435+
5436+Error:
5437+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5438+
5439+ if (pfob->pend_queue_driving && pfob->mcc) {
5440+ pfob->pend_queue_driving = 0;
5441+ be_drive_mcc_wrb_queue(pfob->mcc);
5442+ }
5443+ return status;
5444+}
5445+
5446+
5447+/*
5448+ This routine destroys an ethernet send queue
5449+
5450+ EthSq - EthSq Handle returned from EthSqCreate
5451+
5452+ This function always return BE_SUCCESS.
5453+
5454+ This function frees memory allocated by EthSqCreate for the EthSq Object.
5455+
5456+*/
5457+int be_eth_sq_destroy(struct be_ethsq_object *eth_sq)
5458+{
5459+ int status = 0;
5460+
5461+ /* Send fwcmd to destroy the queue. */
5462+ status = be_function_ring_destroy(eth_sq->parent_function, eth_sq->bid,
5463+ FWCMD_RING_TYPE_ETH_TX, NULL, NULL, NULL, NULL);
5464+ ASSERT(status == 0);
5465+
5466+ /* Derefence any associated CQs. */
5467+ atomic_dec(&eth_sq->cq_object->ref_count);
5468+ return status;
5469+}
5470+/*
5471+ This routine attempts to set the transmit flow control parameters.
5472+
5473+ FunctionObject - Handle to a function object
5474+
5475+ txfc_enable - transmit flow control enable - true for
5476+ enable, false for disable
5477+
5478+ rxfc_enable - receive flow control enable - true for
5479+ enable, false for disable
5480+
5481+ Returns BE_SUCCESS if successfull, otherwise a useful int error
5482+ code is returned.
5483+
5484+ IRQL: < DISPATCH_LEVEL
5485+
5486+ This function always fails in non-privileged machine context.
5487+*/
5488+int
5489+be_eth_set_flow_control(struct be_function_object *pfob,
5490+ bool txfc_enable, bool rxfc_enable)
5491+{
5492+ struct FWCMD_COMMON_SET_FLOW_CONTROL *fwcmd = NULL;
5493+ struct MCC_WRB_AMAP *wrb = NULL;
5494+ int status = 0;
5495+ unsigned long irql;
5496+
5497+ spin_lock_irqsave(&pfob->post_lock, irql);
5498+
5499+ wrb = be_function_peek_mcc_wrb(pfob);
5500+ if (!wrb) {
5501+ TRACE(DL_ERR, "MCC wrb peek failed.");
5502+ status = BE_STATUS_NO_MCC_WRB;
5503+ goto error;
5504+ }
5505+ /* Prepares an embedded fwcmd, including request/response sizes. */
5506+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FLOW_CONTROL);
5507+
5508+ fwcmd->params.request.rx_flow_control = rxfc_enable;
5509+ fwcmd->params.request.tx_flow_control = txfc_enable;
5510+
5511+ /* Post the f/w command */
5512+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5513+ NULL, NULL, fwcmd, NULL);
5514+
5515+ if (status != 0) {
5516+ TRACE(DL_ERR, "set flow control fwcmd failed.");
5517+ goto error;
5518+ }
5519+
5520+error:
5521+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5522+
5523+ if (pfob->pend_queue_driving && pfob->mcc) {
5524+ pfob->pend_queue_driving = 0;
5525+ be_drive_mcc_wrb_queue(pfob->mcc);
5526+ }
5527+ return status;
5528+}
5529+
5530+/*
5531+ This routine attempts to get the transmit flow control parameters.
5532+
5533+ pfob - Handle to a function object
5534+
5535+ txfc_enable - transmit flow control enable - true for
5536+ enable, false for disable
5537+
5538+ rxfc_enable - receive flow control enable - true for enable,
5539+ false for disable
5540+
5541+ Returns BE_SUCCESS if successfull, otherwise a useful int error code
5542+ is returned.
5543+
5544+ IRQL: < DISPATCH_LEVEL
5545+
5546+ This function always fails in non-privileged machine context.
5547+*/
5548+int
5549+be_eth_get_flow_control(struct be_function_object *pfob,
5550+ bool *txfc_enable, bool *rxfc_enable)
5551+{
5552+ struct FWCMD_COMMON_GET_FLOW_CONTROL *fwcmd = NULL;
5553+ struct MCC_WRB_AMAP *wrb = NULL;
5554+ int status = 0;
5555+ unsigned long irql;
5556+
5557+ spin_lock_irqsave(&pfob->post_lock, irql);
5558+
5559+ wrb = be_function_peek_mcc_wrb(pfob);
5560+ if (!wrb) {
5561+ TRACE(DL_ERR, "MCC wrb peek failed.");
5562+ status = BE_STATUS_NO_MCC_WRB;
5563+ goto error;
5564+ }
5565+ /* Prepares an embedded fwcmd, including request/response sizes. */
5566+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FLOW_CONTROL);
5567+
5568+ /* Post the f/w command */
5569+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5570+ NULL, NULL, fwcmd, NULL);
5571+
5572+ if (status != 0) {
5573+ TRACE(DL_ERR, "get flow control fwcmd failed.");
5574+ goto error;
5575+ }
5576+
5577+ *txfc_enable = fwcmd->params.response.tx_flow_control;
5578+ *rxfc_enable = fwcmd->params.response.rx_flow_control;
5579+
5580+error:
5581+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5582+
5583+ if (pfob->pend_queue_driving && pfob->mcc) {
5584+ pfob->pend_queue_driving = 0;
5585+ be_drive_mcc_wrb_queue(pfob->mcc);
5586+ }
5587+ return status;
5588+}
5589+
5590+/*
5591+ *---------------------------------------------------------
5592+ * Function: be_eth_set_qos
5593+ * This function sets the ethernet transmit Quality of Service (QoS)
5594+ * characteristics of BladeEngine for the domain. All ethernet
5595+ * transmit rings of the domain will evenly share the bandwidth.
5596+ * The exeception to sharing is the host primary (super) ethernet
5597+ * transmit ring as well as the host ethernet forwarding ring
5598+ * for missed offload data.
5599+ * pfob -
5600+ * max_bps - the maximum bits per second in units of
5601+ * 10 Mbps (valid 0-100)
5602+ * max_pps - the maximum packets per second in units
5603+ * of 1 Kpps (0 indicates no limit)
5604+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
5605+ *---------------------------------------------------------
5606+ */
5607+int
5608+be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps)
5609+{
5610+ struct FWCMD_COMMON_SET_QOS *fwcmd = NULL;
5611+ struct MCC_WRB_AMAP *wrb = NULL;
5612+ int status = 0;
5613+ unsigned long irql;
5614+
5615+ spin_lock_irqsave(&pfob->post_lock, irql);
5616+
5617+ wrb = be_function_peek_mcc_wrb(pfob);
5618+ if (!wrb) {
5619+ TRACE(DL_ERR, "MCC wrb peek failed.");
5620+ status = BE_STATUS_NO_MCC_WRB;
5621+ goto error;
5622+ }
5623+ /* Prepares an embedded fwcmd, including request/response sizes. */
5624+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_QOS);
5625+
5626+ /* Set fields in fwcmd */
5627+ fwcmd->params.request.max_bits_per_second_NIC = max_bps;
5628+ fwcmd->params.request.max_packets_per_second_NIC = max_pps;
5629+ fwcmd->params.request.valid_flags = QOS_BITS_NIC | QOS_PKTS_NIC;
5630+
5631+ /* Post the f/w command */
5632+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5633+ NULL, NULL, fwcmd, NULL);
5634+
5635+ if (status != 0)
5636+ TRACE(DL_ERR, "network set qos fwcmd failed.");
5637+
5638+error:
5639+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5640+ if (pfob->pend_queue_driving && pfob->mcc) {
5641+ pfob->pend_queue_driving = 0;
5642+ be_drive_mcc_wrb_queue(pfob->mcc);
5643+ }
5644+ return status;
5645+}
5646+
5647+/*
5648+ *---------------------------------------------------------
5649+ * Function: be_eth_get_qos
5650+ * This function retrieves the ethernet transmit Quality of Service (QoS)
5651+ * characteristics for the domain.
5652+ * max_bps - the maximum bits per second in units of
5653+ * 10 Mbps (valid 0-100)
5654+ * max_pps - the maximum packets per second in units of
5655+ * 1 Kpps (0 indicates no limit)
5656+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
5657+ *---------------------------------------------------------
5658+ */
5659+int
5660+be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps)
5661+{
5662+ struct FWCMD_COMMON_GET_QOS *fwcmd = NULL;
5663+ struct MCC_WRB_AMAP *wrb = NULL;
5664+ int status = 0;
5665+ unsigned long irql;
5666+
5667+ spin_lock_irqsave(&pfob->post_lock, irql);
5668+
5669+ wrb = be_function_peek_mcc_wrb(pfob);
5670+ if (!wrb) {
5671+ TRACE(DL_ERR, "MCC wrb peek failed.");
5672+ status = BE_STATUS_NO_MCC_WRB;
5673+ goto error;
5674+ }
5675+ /* Prepares an embedded fwcmd, including request/response sizes. */
5676+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_QOS);
5677+
5678+ /* Post the f/w command */
5679+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5680+ NULL, NULL, fwcmd, NULL);
5681+
5682+ if (status != 0) {
5683+ TRACE(DL_ERR, "network get qos fwcmd failed.");
5684+ goto error;
5685+ }
5686+
5687+ *max_bps = fwcmd->params.response.max_bits_per_second_NIC;
5688+ *max_pps = fwcmd->params.response.max_packets_per_second_NIC;
5689+
5690+error:
5691+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5692+ if (pfob->pend_queue_driving && pfob->mcc) {
5693+ pfob->pend_queue_driving = 0;
5694+ be_drive_mcc_wrb_queue(pfob->mcc);
5695+ }
5696+ return status;
5697+}
5698+
5699+/*
5700+ *---------------------------------------------------------
5701+ * Function: be_eth_set_frame_size
5702+ * This function sets the ethernet maximum frame size. The previous
5703+ * values are returned.
5704+ * pfob -
5705+ * tx_frame_size - maximum transmit frame size in bytes
5706+ * rx_frame_size - maximum receive frame size in bytes
5707+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
5708+ *---------------------------------------------------------
5709+ */
5710+int
5711+be_eth_set_frame_size(struct be_function_object *pfob,
5712+ u32 *tx_frame_size, u32 *rx_frame_size)
5713+{
5714+ struct FWCMD_COMMON_SET_FRAME_SIZE *fwcmd = NULL;
5715+ struct MCC_WRB_AMAP *wrb = NULL;
5716+ int status = 0;
5717+ unsigned long irql;
5718+
5719+ spin_lock_irqsave(&pfob->post_lock, irql);
5720+
5721+ wrb = be_function_peek_mcc_wrb(pfob);
5722+ if (!wrb) {
5723+ TRACE(DL_ERR, "MCC wrb peek failed.");
5724+ status = BE_STATUS_NO_MCC_WRB;
5725+ goto error;
5726+ }
5727+ /* Prepares an embedded fwcmd, including request/response sizes. */
5728+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FRAME_SIZE);
5729+ fwcmd->params.request.max_tx_frame_size = *tx_frame_size;
5730+ fwcmd->params.request.max_rx_frame_size = *rx_frame_size;
5731+
5732+ /* Post the f/w command */
5733+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5734+ NULL, NULL, fwcmd, NULL);
5735+
5736+ if (status != 0) {
5737+ TRACE(DL_ERR, "network set frame size fwcmd failed.");
5738+ goto error;
5739+ }
5740+
5741+ *tx_frame_size = fwcmd->params.response.chip_max_tx_frame_size;
5742+ *rx_frame_size = fwcmd->params.response.chip_max_rx_frame_size;
5743+
5744+error:
5745+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5746+ if (pfob->pend_queue_driving && pfob->mcc) {
5747+ pfob->pend_queue_driving = 0;
5748+ be_drive_mcc_wrb_queue(pfob->mcc);
5749+ }
5750+ return status;
5751+}
5752+
5753+
5754+/*
5755+ This routine creates a Ethernet receive ring.
5756+
5757+ pfob - handle to a function object
5758+ rq_base_va - base VA for the default receive ring. this must be
5759+ exactly 8K in length and continguous physical memory.
5760+ cq_object - handle to a previously created CQ to be associated
5761+ with the RQ.
5762+ pp_eth_rq - pointer to an opqaue handle where an eth
5763+ receive object is returned.
5764+ Returns BE_SUCCESS if successfull, , otherwise a useful
5765+ int error code is returned.
5766+
5767+ IRQL: < DISPATCH_LEVEL
5768+ this function allocates a struct be_ethrq_object *object.
5769+ there must be no more than 1 of these per function object, unless the
5770+ function object supports RSS (is networking and on the host).
5771+ the rq_base_va must point to a buffer of exactly 8K.
5772+ the erx::host_cqid (or host_stor_cqid) register and erx::ring_page registers
5773+ will be updated as appropriate on return
5774+*/
5775+int
5776+be_eth_rq_create(struct be_function_object *pfob,
5777+ struct ring_desc *rd, struct be_cq_object *cq_object,
5778+ struct be_cq_object *bcmc_cq_object,
5779+ struct be_ethrq_object *eth_rq)
5780+{
5781+ int status = 0;
5782+ struct MCC_WRB_AMAP *wrb = NULL;
5783+ struct FWCMD_COMMON_ETH_RX_CREATE *fwcmd = NULL;
5784+ unsigned long irql;
5785+
5786+ /* MPU will set the */
5787+ ASSERT(rd);
5788+ ASSERT(eth_rq);
5789+
5790+ spin_lock_irqsave(&pfob->post_lock, irql);
5791+
5792+ eth_rq->parent_function = pfob;
5793+ eth_rq->cq_object = cq_object;
5794+
5795+ wrb = be_function_peek_mcc_wrb(pfob);
5796+ if (!wrb) {
5797+ TRACE(DL_ERR, "MCC wrb peek failed.");
5798+ status = BE_STATUS_NO_MCC_WRB;
5799+ goto Error;
5800+ }
5801+ /* Prepares an embedded fwcmd, including request/response sizes. */
5802+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_RX_CREATE);
5803+
5804+ fwcmd->params.request.num_pages = 2; /* required length */
5805+ fwcmd->params.request.cq_id = cq_object->cq_id;
5806+
5807+ if (bcmc_cq_object)
5808+ fwcmd->params.request.bcmc_cq_id = bcmc_cq_object->cq_id;
5809+ else
5810+ fwcmd->params.request.bcmc_cq_id = 0xFFFF;
5811+
5812+ /* Create a page list for the FWCMD. */
5813+ be_rd_to_pa_list(rd, fwcmd->params.request.pages,
5814+ ARRAY_SIZE(fwcmd->params.request.pages));
5815+
5816+ /* Post the f/w command */
5817+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5818+ NULL, NULL, fwcmd, NULL);
5819+ if (status != BE_SUCCESS) {
5820+ TRACE(DL_ERR, "fwcmd to map eth rxq frags failed.");
5821+ goto Error;
5822+ }
5823+ /* Save the ring ID for cleanup. */
5824+ eth_rq->rid = fwcmd->params.response.id;
5825+
5826+ atomic_inc(&cq_object->ref_count);
5827+
5828+Error:
5829+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5830+
5831+ if (pfob->pend_queue_driving && pfob->mcc) {
5832+ pfob->pend_queue_driving = 0;
5833+ be_drive_mcc_wrb_queue(pfob->mcc);
5834+ }
5835+ return status;
5836+}
5837+
5838+/*
5839+ This routine destroys an Ethernet receive queue
5840+
5841+ eth_rq - ethernet receive queue handle returned from eth_rq_create
5842+
5843+ Returns BE_SUCCESS on success and an appropriate int on failure.
5844+
5845+ This function frees resourcs allocated by EthRqCreate.
5846+ The erx::host_cqid (or host_stor_cqid) register and erx::ring_page
5847+ registers will be updated as appropriate on return
5848+ IRQL: < DISPATCH_LEVEL
5849+*/
5850+
5851+static void be_eth_rq_destroy_internal_cb(void *context, int status,
5852+ struct MCC_WRB_AMAP *wrb)
5853+{
5854+ struct be_ethrq_object *eth_rq = (struct be_ethrq_object *) context;
5855+
5856+ if (status != BE_SUCCESS) {
5857+ TRACE(DL_ERR, "Destroy eth rq failed in internal callback.\n");
5858+ } else {
5859+ /* Dereference any CQs associated with this queue. */
5860+ atomic_dec(&eth_rq->cq_object->ref_count);
5861+ }
5862+
5863+ return;
5864+}
5865+
5866+int be_eth_rq_destroy(struct be_ethrq_object *eth_rq)
5867+{
5868+ int status = BE_SUCCESS;
5869+
5870+ /* Send fwcmd to destroy the RQ. */
5871+ status = be_function_ring_destroy(eth_rq->parent_function,
5872+ eth_rq->rid, FWCMD_RING_TYPE_ETH_RX, NULL, NULL,
5873+ be_eth_rq_destroy_internal_cb, eth_rq);
5874+
5875+ return status;
5876+}
5877+
5878+/*
5879+ *---------------------------------------------------------------------------
5880+ * Function: be_eth_rq_destroy_options
5881+ * Destroys an ethernet receive ring with finer granularity options
5882+ * than the standard be_eth_rq_destroy() API function.
5883+ * eth_rq -
5884+ * flush - Set to 1 to flush the ring, set to 0 to bypass the flush
5885+ * cb - Callback function on completion
5886+ * cb_context - Callback context
5887+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
5888+ *----------------------------------------------------------------------------
5889+ */
5890+int
5891+be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
5892+ mcc_wrb_cqe_callback cb, void *cb_context)
5893+{
5894+ struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
5895+ struct MCC_WRB_AMAP *wrb = NULL;
5896+ int status = BE_SUCCESS;
5897+ struct be_function_object *pfob = NULL;
5898+ unsigned long irql;
5899+
5900+ pfob = eth_rq->parent_function;
5901+
5902+ spin_lock_irqsave(&pfob->post_lock, irql);
5903+
5904+ TRACE(DL_INFO, "Destroy eth_rq ring id:%d, flush:%d", eth_rq->rid,
5905+ flush);
5906+
5907+ wrb = be_function_peek_mcc_wrb(pfob);
5908+ if (!wrb) {
5909+ ASSERT(wrb);
5910+ TRACE(DL_ERR, "No free MCC WRBs in destroy eth_rq ring.");
5911+ status = BE_STATUS_NO_MCC_WRB;
5912+ goto Error;
5913+ }
5914+ /* Prepares an embedded fwcmd, including request/response sizes. */
5915+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
5916+
5917+ fwcmd->params.request.id = eth_rq->rid;
5918+ fwcmd->params.request.ring_type = FWCMD_RING_TYPE_ETH_RX;
5919+ fwcmd->params.request.bypass_flush = ((0 == flush) ? 1 : 0);
5920+
5921+ /* Post the f/w command */
5922+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
5923+ be_eth_rq_destroy_internal_cb, eth_rq, fwcmd, NULL);
5924+
5925+ if (status != BE_SUCCESS && status != BE_PENDING) {
5926+ TRACE(DL_ERR, "eth_rq ring destroy failed. id:%d, flush:%d",
5927+ eth_rq->rid, flush);
5928+ goto Error;
5929+ }
5930+
5931+Error:
5932+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5933+
5934+ if (pfob->pend_queue_driving && pfob->mcc) {
5935+ pfob->pend_queue_driving = 0;
5936+ be_drive_mcc_wrb_queue(pfob->mcc);
5937+ }
5938+ return status;
5939+}
5940+
5941+/*
5942+ This routine queries the frag size for erx.
5943+
5944+ pfob - handle to a function object
5945+
5946+ frag_size_bytes - erx frag size in bytes that is/was set.
5947+
5948+ Returns BE_SUCCESS if successfull, otherwise a useful int error
5949+ code is returned.
5950+
5951+ IRQL: < DISPATCH_LEVEL
5952+
5953+*/
5954+int
5955+be_eth_rq_get_frag_size(struct be_function_object *pfob, u32 *frag_size_bytes)
5956+{
5957+ struct FWCMD_ETH_GET_RX_FRAG_SIZE *fwcmd = NULL;
5958+ struct MCC_WRB_AMAP *wrb = NULL;
5959+ int status = 0;
5960+ unsigned long irql;
5961+
5962+ ASSERT(frag_size_bytes);
5963+
5964+ spin_lock_irqsave(&pfob->post_lock, irql);
5965+
5966+ wrb = be_function_peek_mcc_wrb(pfob);
5967+ if (!wrb) {
5968+ TRACE(DL_ERR, "MCC wrb peek failed.");
5969+ return BE_STATUS_NO_MCC_WRB;
5970+ }
5971+ /* Prepares an embedded fwcmd, including request/response sizes. */
5972+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_GET_RX_FRAG_SIZE);
5973+
5974+ /* Post the f/w command */
5975+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
5976+ NULL, NULL, fwcmd, NULL);
5977+
5978+ if (status != 0) {
5979+ TRACE(DL_ERR, "get frag size fwcmd failed.");
5980+ goto error;
5981+ }
5982+
5983+ *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
5984+
5985+error:
5986+ spin_unlock_irqrestore(&pfob->post_lock, irql);
5987+
5988+ if (pfob->pend_queue_driving && pfob->mcc) {
5989+ pfob->pend_queue_driving = 0;
5990+ be_drive_mcc_wrb_queue(pfob->mcc);
5991+ }
5992+ return status;
5993+}
5994+
5995+/*
5996+ This routine attempts to set the frag size for erx. If the frag size is
5997+ already set, the attempt fails and the current frag size is returned.
5998+
5999+ pfob - Handle to a function object
6000+
6001+ frag_size - Erx frag size in bytes that is/was set.
6002+
6003+ current_frag_size_bytes - Pointer to location where currrent frag
6004+ is to be rturned
6005+
6006+ Returns BE_SUCCESS if successfull, otherwise a useful int error
6007+ code is returned.
6008+
6009+ IRQL: < DISPATCH_LEVEL
6010+
6011+ This function always fails in non-privileged machine context.
6012+*/
6013+int
6014+be_eth_rq_set_frag_size(struct be_function_object *pfob,
6015+ u32 frag_size, u32 *frag_size_bytes)
6016+{
6017+ struct FWCMD_ETH_SET_RX_FRAG_SIZE *fwcmd = NULL;
6018+ struct MCC_WRB_AMAP *wrb = NULL;
6019+ int status = 0;
6020+ unsigned long irql;
6021+
6022+ ASSERT(frag_size_bytes);
6023+
6024+ spin_lock_irqsave(&pfob->post_lock, irql);
6025+
6026+ wrb = be_function_peek_mcc_wrb(pfob);
6027+ if (!wrb) {
6028+ TRACE(DL_ERR, "MCC wrb peek failed.");
6029+ status = BE_STATUS_NO_MCC_WRB;
6030+ goto error;
6031+ }
6032+ /* Prepares an embedded fwcmd, including request/response sizes. */
6033+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_SET_RX_FRAG_SIZE);
6034+
6035+ ASSERT(frag_size >= 128 && frag_size <= 16 * 1024);
6036+
6037+ /* This is the log2 of the fragsize. This is not the exact
6038+ * ERX encoding. */
6039+ fwcmd->params.request.new_fragsize_log2 = __ilog2_u32(frag_size);
6040+
6041+ /* Post the f/w command */
6042+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
6043+ NULL, NULL, fwcmd, NULL);
6044+
6045+ if (status != 0) {
6046+ TRACE(DL_ERR, "set frag size fwcmd failed.");
6047+ goto error;
6048+ }
6049+
6050+ *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
6051+error:
6052+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6053+
6054+ if (pfob->pend_queue_driving && pfob->mcc) {
6055+ pfob->pend_queue_driving = 0;
6056+ be_drive_mcc_wrb_queue(pfob->mcc);
6057+ }
6058+ return status;
6059+}
6060+
6061+
6062+/*
6063+ This routine gets or sets a mac address for a domain
6064+ given the port and mac.
6065+
6066+ FunctionObject - Function object handle.
6067+ port1 - Set to TRUE if this function will set/get the Port 1
6068+ address. Only the host may set this to TRUE.
6069+ mac1 - Set to TRUE if this function will set/get the
6070+ MAC 1 address. Only the host may set this to TRUE.
6071+ write - Set to TRUE if this function should write the mac address.
6072+ mac_address - Buffer of the mac address to read or write.
6073+
6074+ Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
6075+
6076+ IRQL: < DISPATCH_LEVEL
6077+*/
6078+int be_rxf_mac_address_read_write(struct be_function_object *pfob,
6079+ bool port1, /* VM must always set to false */
6080+ bool mac1, /* VM must always set to false */
6081+ bool mgmt, bool write,
6082+ bool permanent, u8 *mac_address,
6083+ mcc_wrb_cqe_callback cb, /* optional */
6084+ void *cb_context) /* optional */
6085+{
6086+ int status = BE_SUCCESS;
6087+ union {
6088+ struct FWCMD_COMMON_NTWK_MAC_QUERY *query;
6089+ struct FWCMD_COMMON_NTWK_MAC_SET *set;
6090+ } fwcmd = {NULL};
6091+ struct MCC_WRB_AMAP *wrb = NULL;
6092+ u32 type = 0;
6093+ unsigned long irql;
6094+ struct be_mcc_wrb_response_copy rc;
6095+
6096+ spin_lock_irqsave(&pfob->post_lock, irql);
6097+
6098+ ASSERT(mac_address);
6099+
6100+ ASSERT(port1 == false);
6101+ ASSERT(mac1 == false);
6102+
6103+ wrb = be_function_peek_mcc_wrb(pfob);
6104+ if (!wrb) {
6105+ TRACE(DL_ERR, "MCC wrb peek failed.");
6106+ status = BE_STATUS_NO_MCC_WRB;
6107+ goto Error;
6108+ }
6109+
6110+ if (mgmt) {
6111+ type = MAC_ADDRESS_TYPE_MANAGEMENT;
6112+ } else {
6113+ if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
6114+ type = MAC_ADDRESS_TYPE_NETWORK;
6115+ else
6116+ type = MAC_ADDRESS_TYPE_STORAGE;
6117+ }
6118+
6119+ if (write) {
6120+ /* Prepares an embedded fwcmd, including
6121+ * request/response sizes.
6122+ */
6123+ fwcmd.set = BE_PREPARE_EMBEDDED_FWCMD(pfob,
6124+ wrb, COMMON_NTWK_MAC_SET);
6125+
6126+ fwcmd.set->params.request.invalidate = 0;
6127+ fwcmd.set->params.request.mac1 = (mac1 ? 1 : 0);
6128+ fwcmd.set->params.request.port = (port1 ? 1 : 0);
6129+ fwcmd.set->params.request.type = type;
6130+
6131+ /* Copy the mac address to set. */
6132+ fwcmd.set->params.request.mac.SizeOfStructure =
6133+ sizeof(fwcmd.set->params.request.mac);
6134+ memcpy(fwcmd.set->params.request.mac.MACAddress,
6135+ mac_address, ETH_ALEN);
6136+
6137+ /* Post the f/w command */
6138+ status = be_function_post_mcc_wrb(pfob, wrb, NULL,
6139+ cb, cb_context, NULL, NULL, fwcmd.set, NULL);
6140+
6141+ } else {
6142+
6143+ /*
6144+ * Prepares an embedded fwcmd, including
6145+ * request/response sizes.
6146+ */
6147+ fwcmd.query = BE_PREPARE_EMBEDDED_FWCMD(pfob,
6148+ wrb, COMMON_NTWK_MAC_QUERY);
6149+
6150+ fwcmd.query->params.request.mac1 = (mac1 ? 1 : 0);
6151+ fwcmd.query->params.request.port = (port1 ? 1 : 0);
6152+ fwcmd.query->params.request.type = type;
6153+ fwcmd.query->params.request.permanent = permanent;
6154+
6155+ rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_MAC_QUERY,
6156+ params.response.mac.MACAddress);
6157+ rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_MAC_QUERY,
6158+ params.response.mac.MACAddress);
6159+ rc.va = mac_address;
6160+ /* Post the f/w command (with a copy for the response) */
6161+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
6162+ cb_context, NULL, NULL, fwcmd.query, &rc);
6163+ }
6164+
6165+ if (status < 0) {
6166+ TRACE(DL_ERR, "mac set/query failed.");
6167+ goto Error;
6168+ }
6169+
6170+Error:
6171+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6172+ if (pfob->pend_queue_driving && pfob->mcc) {
6173+ pfob->pend_queue_driving = 0;
6174+ be_drive_mcc_wrb_queue(pfob->mcc);
6175+ }
6176+ return status;
6177+}
6178+
6179+/*
6180+ This routine writes data to context memory.
6181+
6182+ pfob - Function object handle.
6183+ mac_table - Set to the 128-bit multicast address hash table.
6184+
6185+ Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
6186+
6187+ IRQL: < DISPATCH_LEVEL
6188+*/
6189+
6190+int be_rxf_multicast_config(struct be_function_object *pfob,
6191+ bool promiscuous, u32 num, u8 *mac_table,
6192+ mcc_wrb_cqe_callback cb, /* optional */
6193+ void *cb_context,
6194+ struct be_multicast_q_ctxt *q_ctxt)
6195+{
6196+ int status = BE_SUCCESS;
6197+ struct FWCMD_COMMON_NTWK_MULTICAST_SET *fwcmd = NULL;
6198+ struct MCC_WRB_AMAP *wrb = NULL;
6199+ struct be_generic_q_ctxt *generic_ctxt = NULL;
6200+ unsigned long irql;
6201+
6202+ ASSERT(num <= ARRAY_SIZE(fwcmd->params.request.mac));
6203+
6204+ if (num > ARRAY_SIZE(fwcmd->params.request.mac)) {
6205+ TRACE(DL_ERR, "Too many multicast addresses. BE supports %d.",
6206+ (int) ARRAY_SIZE(fwcmd->params.request.mac));
6207+ return BE_NOT_OK;
6208+ }
6209+
6210+ spin_lock_irqsave(&pfob->post_lock, irql);
6211+
6212+ wrb = be_function_peek_mcc_wrb(pfob);
6213+ if (!wrb) {
6214+ if (q_ctxt && cb) {
6215+ wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
6216+ generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
6217+ generic_ctxt->context.bytes = sizeof(*q_ctxt);
6218+ } else {
6219+ status = BE_STATUS_NO_MCC_WRB;
6220+ goto Error;
6221+ }
6222+ }
6223+ /* Prepares an embedded fwcmd, including request/response sizes. */
6224+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_MULTICAST_SET);
6225+
6226+ fwcmd->params.request.promiscuous = promiscuous;
6227+ if (!promiscuous) {
6228+ fwcmd->params.request.num_mac = num;
6229+ if (num > 0) {
6230+ ASSERT(mac_table);
6231+ memcpy(fwcmd->params.request.mac,
6232+ mac_table, ETH_ALEN * num);
6233+ }
6234+ }
6235+
6236+ /* Post the f/w command */
6237+ status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
6238+ cb, cb_context, NULL, NULL, fwcmd, NULL);
6239+ if (status < 0) {
6240+ TRACE(DL_ERR, "multicast fwcmd failed.");
6241+ goto Error;
6242+ }
6243+
6244+Error:
6245+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6246+ if (pfob->pend_queue_driving && pfob->mcc) {
6247+ pfob->pend_queue_driving = 0;
6248+ be_drive_mcc_wrb_queue(pfob->mcc);
6249+ }
6250+ return status;
6251+}
6252+
6253+/*
6254+ This routine adds or removes a vlan tag from the rxf table.
6255+
6256+ FunctionObject - Function object handle.
6257+ VLanTag - VLan tag to add or remove.
6258+ Add - Set to TRUE if this will add a vlan tag
6259+
6260+ Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
6261+
6262+ IRQL: < DISPATCH_LEVEL
6263+*/
6264+int be_rxf_vlan_config(struct be_function_object *pfob,
6265+ bool promiscuous, u32 num, u16 *vlan_tag_array,
6266+ mcc_wrb_cqe_callback cb, /* optional */
6267+ void *cb_context,
6268+ struct be_vlan_q_ctxt *q_ctxt) /* optional */
6269+{
6270+ int status = BE_SUCCESS;
6271+ struct FWCMD_COMMON_NTWK_VLAN_CONFIG *fwcmd = NULL;
6272+ struct MCC_WRB_AMAP *wrb = NULL;
6273+ struct be_generic_q_ctxt *generic_ctxt = NULL;
6274+ unsigned long irql;
6275+
6276+ if (num > ARRAY_SIZE(fwcmd->params.request.vlan_tag)) {
6277+ TRACE(DL_ERR, "Too many VLAN tags.");
6278+ return BE_NOT_OK;
6279+ }
6280+
6281+ spin_lock_irqsave(&pfob->post_lock, irql);
6282+
6283+ wrb = be_function_peek_mcc_wrb(pfob);
6284+ if (!wrb) {
6285+ if (q_ctxt && cb) {
6286+ wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
6287+ generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
6288+ generic_ctxt->context.bytes = sizeof(*q_ctxt);
6289+ } else {
6290+ status = BE_STATUS_NO_MCC_WRB;
6291+ goto Error;
6292+ }
6293+ }
6294+ /* Prepares an embedded fwcmd, including request/response sizes. */
6295+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_VLAN_CONFIG);
6296+
6297+ fwcmd->params.request.promiscuous = promiscuous;
6298+ if (!promiscuous) {
6299+ fwcmd->params.request.num_vlan = num;
6300+
6301+ if (num > 0) {
6302+ ASSERT(vlan_tag_array);
6303+ memcpy(fwcmd->params.request.vlan_tag, vlan_tag_array,
6304+ num * sizeof(vlan_tag_array[0]));
6305+ }
6306+ }
6307+
6308+ /* Post the commadn */
6309+ status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
6310+ cb, cb_context, NULL, NULL, fwcmd, NULL);
6311+ if (status < 0) {
6312+ TRACE(DL_ERR, "vlan fwcmd failed.");
6313+ goto Error;
6314+ }
6315+
6316+Error:
6317+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6318+ if (pfob->pend_queue_driving && pfob->mcc) {
6319+ pfob->pend_queue_driving = 0;
6320+ be_drive_mcc_wrb_queue(pfob->mcc);
6321+ }
6322+ return status;
6323+}
6324+
6325+
6326+int be_rxf_link_status(struct be_function_object *pfob,
6327+ struct BE_LINK_STATUS *link_status,
6328+ mcc_wrb_cqe_callback cb,
6329+ void *cb_context,
6330+ struct be_link_status_q_ctxt *q_ctxt)
6331+{
6332+ struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY *fwcmd = NULL;
6333+ struct MCC_WRB_AMAP *wrb = NULL;
6334+ int status = 0;
6335+ struct be_generic_q_ctxt *generic_ctxt = NULL;
6336+ unsigned long irql;
6337+ struct be_mcc_wrb_response_copy rc;
6338+
6339+ ASSERT(link_status);
6340+
6341+ spin_lock_irqsave(&pfob->post_lock, irql);
6342+
6343+ wrb = be_function_peek_mcc_wrb(pfob);
6344+
6345+ if (!wrb) {
6346+ if (q_ctxt && cb) {
6347+ wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
6348+ generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
6349+ generic_ctxt->context.bytes = sizeof(*q_ctxt);
6350+ } else {
6351+ status = BE_STATUS_NO_MCC_WRB;
6352+ goto Error;
6353+ }
6354+ }
6355+ /* Prepares an embedded fwcmd, including request/response sizes. */
6356+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb,
6357+ COMMON_NTWK_LINK_STATUS_QUERY);
6358+
6359+ rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
6360+ params.response);
6361+ rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
6362+ params.response);
6363+ rc.va = link_status;
6364+ /* Post or queue the f/w command */
6365+ status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
6366+ cb, cb_context, NULL, NULL, fwcmd, &rc);
6367+
6368+ if (status < 0) {
6369+ TRACE(DL_ERR, "link status fwcmd failed.");
6370+ goto Error;
6371+ }
6372+
6373+Error:
6374+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6375+ if (pfob->pend_queue_driving && pfob->mcc) {
6376+ pfob->pend_queue_driving = 0;
6377+ be_drive_mcc_wrb_queue(pfob->mcc);
6378+ }
6379+ return status;
6380+}
6381+
6382+int
6383+be_rxf_query_eth_statistics(struct be_function_object *pfob,
6384+ struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
6385+ u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
6386+ void *cb_context,
6387+ struct be_nonembedded_q_ctxt *q_ctxt)
6388+{
6389+ struct MCC_WRB_AMAP *wrb = NULL;
6390+ int status = 0;
6391+ struct be_generic_q_ctxt *generic_ctxt = NULL;
6392+ unsigned long irql;
6393+
6394+ ASSERT(va_for_fwcmd);
6395+ ASSERT(pa_for_fwcmd);
6396+
6397+ spin_lock_irqsave(&pfob->post_lock, irql);
6398+
6399+ wrb = be_function_peek_mcc_wrb(pfob);
6400+
6401+ if (!wrb) {
6402+ if (q_ctxt && cb) {
6403+ wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
6404+ generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
6405+ generic_ctxt->context.bytes = sizeof(*q_ctxt);
6406+ } else {
6407+ status = BE_STATUS_NO_MCC_WRB;
6408+ goto Error;
6409+ }
6410+ }
6411+
6412+ TRACE(DL_INFO, "Query eth stats. fwcmd va:%p pa:0x%08x_%08x",
6413+ va_for_fwcmd, upper_32_bits(pa_for_fwcmd), (u32)pa_for_fwcmd);
6414+
6415+ /* Prepares an embedded fwcmd, including request/response sizes. */
6416+ va_for_fwcmd = BE_PREPARE_NONEMBEDDED_FWCMD(pfob, wrb,
6417+ va_for_fwcmd, pa_for_fwcmd, ETH_GET_STATISTICS);
6418+
6419+ /* Post the f/w command */
6420+ status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
6421+ cb, cb_context, NULL, NULL, va_for_fwcmd, NULL);
6422+ if (status < 0) {
6423+ TRACE(DL_ERR, "eth stats fwcmd failed.");
6424+ goto Error;
6425+ }
6426+
6427+Error:
6428+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6429+ if (pfob->pend_queue_driving && pfob->mcc) {
6430+ pfob->pend_queue_driving = 0;
6431+ be_drive_mcc_wrb_queue(pfob->mcc);
6432+ }
6433+ return status;
6434+}
6435+
6436+int
6437+be_rxf_promiscuous(struct be_function_object *pfob,
6438+ bool enable_port0, bool enable_port1,
6439+ mcc_wrb_cqe_callback cb, void *cb_context,
6440+ struct be_promiscuous_q_ctxt *q_ctxt)
6441+{
6442+ struct FWCMD_ETH_PROMISCUOUS *fwcmd = NULL;
6443+ struct MCC_WRB_AMAP *wrb = NULL;
6444+ int status = 0;
6445+ struct be_generic_q_ctxt *generic_ctxt = NULL;
6446+ unsigned long irql;
6447+
6448+
6449+ spin_lock_irqsave(&pfob->post_lock, irql);
6450+
6451+ wrb = be_function_peek_mcc_wrb(pfob);
6452+
6453+ if (!wrb) {
6454+ if (q_ctxt && cb) {
6455+ wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
6456+ generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
6457+ generic_ctxt->context.bytes = sizeof(*q_ctxt);
6458+ } else {
6459+ status = BE_STATUS_NO_MCC_WRB;
6460+ goto Error;
6461+ }
6462+ }
6463+ /* Prepares an embedded fwcmd, including request/response sizes. */
6464+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_PROMISCUOUS);
6465+
6466+ fwcmd->params.request.port0_promiscuous = enable_port0;
6467+ fwcmd->params.request.port1_promiscuous = enable_port1;
6468+
6469+ /* Post the f/w command */
6470+ status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
6471+ cb, cb_context, NULL, NULL, fwcmd, NULL);
6472+
6473+ if (status < 0) {
6474+ TRACE(DL_ERR, "promiscuous fwcmd failed.");
6475+ goto Error;
6476+ }
6477+
6478+Error:
6479+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6480+ if (pfob->pend_queue_driving && pfob->mcc) {
6481+ pfob->pend_queue_driving = 0;
6482+ be_drive_mcc_wrb_queue(pfob->mcc);
6483+ }
6484+ return status;
6485+}
6486+
6487+
6488+/*
6489+ *-------------------------------------------------------------------------
6490+ * Function: be_rxf_filter_config
6491+ * Configures BladeEngine ethernet receive filter settings.
6492+ * pfob -
6493+ * settings - Pointer to the requested filter settings.
6494+ * The response from BladeEngine will be placed back
6495+ * in this structure.
6496+ * cb - optional
6497+ * cb_context - optional
6498+ * q_ctxt - Optional. Pointer to a previously allocated struct.
6499+ * If the MCC WRB ring is full, this structure is
6500+ * used to queue the operation. It will be posted
6501+ * to the MCC ring when space becomes available. All
6502+ * queued commands will be posted to the ring in
6503+ * the order they are received. It is always valid
6504+ * to pass a pointer to a generic
6505+ * be_generic_q_ctxt. However, the specific
6506+ * context structs are generally smaller than
6507+ * the generic struct.
6508+ * return pend_status - BE_SUCCESS (0) on success.
6509+ * BE_PENDING (postive value) if the FWCMD
6510+ * completion is pending. Negative error code on failure.
6511+ *---------------------------------------------------------------------------
6512+ */
6513+int
6514+be_rxf_filter_config(struct be_function_object *pfob,
6515+ struct NTWK_RX_FILTER_SETTINGS *settings,
6516+ mcc_wrb_cqe_callback cb, void *cb_context,
6517+ struct be_rxf_filter_q_ctxt *q_ctxt)
6518+{
6519+ struct FWCMD_COMMON_NTWK_RX_FILTER *fwcmd = NULL;
6520+ struct MCC_WRB_AMAP *wrb = NULL;
6521+ int status = 0;
6522+ struct be_generic_q_ctxt *generic_ctxt = NULL;
6523+ unsigned long irql;
6524+ struct be_mcc_wrb_response_copy rc;
6525+
6526+ ASSERT(settings);
6527+
6528+ spin_lock_irqsave(&pfob->post_lock, irql);
6529+
6530+ wrb = be_function_peek_mcc_wrb(pfob);
6531+
6532+ if (!wrb) {
6533+ if (q_ctxt && cb) {
6534+ wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
6535+ generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
6536+ generic_ctxt->context.bytes = sizeof(*q_ctxt);
6537+ } else {
6538+ status = BE_STATUS_NO_MCC_WRB;
6539+ goto Error;
6540+ }
6541+ }
6542+ /* Prepares an embedded fwcmd, including request/response sizes. */
6543+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_RX_FILTER);
6544+ memcpy(&fwcmd->params.request, settings, sizeof(*settings));
6545+
6546+ rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_RX_FILTER,
6547+ params.response);
6548+ rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_RX_FILTER,
6549+ params.response);
6550+ rc.va = settings;
6551+ /* Post or queue the f/w command */
6552+ status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
6553+ cb, cb_context, NULL, NULL, fwcmd, &rc);
6554+
6555+ if (status < 0) {
6556+ TRACE(DL_ERR, "RXF/ERX filter config fwcmd failed.");
6557+ goto Error;
6558+ }
6559+
6560+Error:
6561+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6562+ if (pfob->pend_queue_driving && pfob->mcc) {
6563+ pfob->pend_queue_driving = 0;
6564+ be_drive_mcc_wrb_queue(pfob->mcc);
6565+ }
6566+ return status;
6567+}
6568--- /dev/null
6569+++ b/drivers/staging/benet/etx_context.h
6570@@ -0,0 +1,55 @@
6571+/*
6572+ * Copyright (C) 2005 - 2008 ServerEngines
6573+ * All rights reserved.
6574+ *
6575+ * This program is free software; you can redistribute it and/or
6576+ * modify it under the terms of the GNU General Public License version 2
6577+ * as published by the Free Software Foundation. The full GNU General
6578+ * Public License is included in this distribution in the file called COPYING.
6579+ *
6580+ * Contact Information:
6581+ * linux-drivers@serverengines.com
6582+ *
6583+ * ServerEngines
6584+ * 209 N. Fair Oaks Ave
6585+ * Sunnyvale, CA 94085
6586+ */
6587+/*
6588+ * Autogenerated by srcgen version: 0127
6589+ */
6590+#ifndef __etx_context_amap_h__
6591+#define __etx_context_amap_h__
6592+
6593+/* ETX ring context structure. */
6594+struct BE_ETX_CONTEXT_AMAP {
6595+ u8 tx_cidx[11]; /* DWORD 0 */
6596+ u8 rsvd0[5]; /* DWORD 0 */
6597+ u8 rsvd1[16]; /* DWORD 0 */
6598+ u8 tx_pidx[11]; /* DWORD 1 */
6599+ u8 rsvd2; /* DWORD 1 */
6600+ u8 tx_ring_size[4]; /* DWORD 1 */
6601+ u8 pd_id[5]; /* DWORD 1 */
6602+ u8 pd_id_not_valid; /* DWORD 1 */
6603+ u8 cq_id_send[10]; /* DWORD 1 */
6604+ u8 rsvd3[32]; /* DWORD 2 */
6605+ u8 rsvd4[32]; /* DWORD 3 */
6606+ u8 cur_bytes[32]; /* DWORD 4 */
6607+ u8 max_bytes[32]; /* DWORD 5 */
6608+ u8 time_stamp[32]; /* DWORD 6 */
6609+ u8 rsvd5[11]; /* DWORD 7 */
6610+ u8 func; /* DWORD 7 */
6611+ u8 rsvd6[20]; /* DWORD 7 */
6612+ u8 cur_txd_count[32]; /* DWORD 8 */
6613+ u8 max_txd_count[32]; /* DWORD 9 */
6614+ u8 rsvd7[32]; /* DWORD 10 */
6615+ u8 rsvd8[32]; /* DWORD 11 */
6616+ u8 rsvd9[32]; /* DWORD 12 */
6617+ u8 rsvd10[32]; /* DWORD 13 */
6618+ u8 rsvd11[32]; /* DWORD 14 */
6619+ u8 rsvd12[32]; /* DWORD 15 */
6620+} __packed;
6621+struct ETX_CONTEXT_AMAP {
6622+ u32 dw[16];
6623+};
6624+
6625+#endif /* __etx_context_amap_h__ */
6626--- /dev/null
6627+++ b/drivers/staging/benet/funcobj.c
6628@@ -0,0 +1,565 @@
6629+/*
6630+ * Copyright (C) 2005 - 2008 ServerEngines
6631+ * All rights reserved.
6632+ *
6633+ * This program is free software; you can redistribute it and/or
6634+ * modify it under the terms of the GNU General Public License version 2
6635+ * as published by the Free Software Foundation. The full GNU General
6636+ * Public License is included in this distribution in the file called COPYING.
6637+ *
6638+ * Contact Information:
6639+ * linux-drivers@serverengines.com
6640+ *
6641+ * ServerEngines
6642+ * 209 N. Fair Oaks Ave
6643+ * Sunnyvale, CA 94085
6644+ */
6645+#include "hwlib.h"
6646+#include "bestatus.h"
6647+
6648+
6649+int
6650+be_function_internal_query_firmware_config(struct be_function_object *pfob,
6651+ struct BE_FIRMWARE_CONFIG *config)
6652+{
6653+ struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
6654+ struct MCC_WRB_AMAP *wrb = NULL;
6655+ int status = 0;
6656+ unsigned long irql;
6657+ struct be_mcc_wrb_response_copy rc;
6658+
6659+ spin_lock_irqsave(&pfob->post_lock, irql);
6660+
6661+ wrb = be_function_peek_mcc_wrb(pfob);
6662+ if (!wrb) {
6663+ TRACE(DL_ERR, "MCC wrb peek failed.");
6664+ status = BE_STATUS_NO_MCC_WRB;
6665+ goto error;
6666+ }
6667+ /* Prepares an embedded fwcmd, including request/response sizes. */
6668+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
6669+
6670+ rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
6671+ params.response);
6672+ rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
6673+ params.response);
6674+ rc.va = config;
6675+
6676+ /* Post the f/w command */
6677+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
6678+ NULL, NULL, NULL, fwcmd, &rc);
6679+error:
6680+ spin_unlock_irqrestore(&pfob->post_lock, irql);
6681+ if (pfob->pend_queue_driving && pfob->mcc) {
6682+ pfob->pend_queue_driving = 0;
6683+ be_drive_mcc_wrb_queue(pfob->mcc);
6684+ }
6685+ return status;
6686+}
6687+
6688+/*
6689+ This allocates and initializes a function object based on the information
6690+ provided by upper layer drivers.
6691+
6692+ Returns BE_SUCCESS on success and an appropriate int on failure.
6693+
6694+ A function object represents a single BladeEngine (logical) PCI function.
6695+ That is a function object either represents
6696+ the networking side of BladeEngine or the iSCSI side of BladeEngine.
6697+
6698+ This routine will also detect and create an appropriate PD object for the
6699+ PCI function as needed.
6700+*/
6701+int
6702+be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
6703+ u8 __iomem *pci_va, u32 function_type,
6704+ struct ring_desc *mailbox, struct be_function_object *pfob)
6705+{
6706+ int status;
6707+
6708+ ASSERT(pfob); /* not a magic assert */
6709+ ASSERT(function_type <= 2);
6710+
6711+ TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
6712+ (function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
6713+ (function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
6714+ "Arm")), pfob);
6715+
6716+ memset(pfob, 0, sizeof(*pfob));
6717+
6718+ pfob->type = function_type;
6719+ pfob->csr_va = csr_va;
6720+ pfob->db_va = db_va;
6721+ pfob->pci_va = pci_va;
6722+
6723+ spin_lock_init(&pfob->cq_lock);
6724+ spin_lock_init(&pfob->post_lock);
6725+ spin_lock_init(&pfob->mcc_context_lock);
6726+
6727+
6728+ pfob->pci_function_number = 1;
6729+
6730+
6731+ pfob->emulate = false;
6732+ TRACE(DL_NOTE, "Non-emulation mode");
6733+ status = be_drive_POST(pfob);
6734+ if (status != BE_SUCCESS) {
6735+ TRACE(DL_ERR, "BladeEngine POST failed.");
6736+ goto error;
6737+ }
6738+
6739+ /* Initialize the mailbox */
6740+ status = be_mpu_init_mailbox(pfob, mailbox);
6741+ if (status != BE_SUCCESS) {
6742+ TRACE(DL_ERR, "Failed to initialize mailbox.");
6743+ goto error;
6744+ }
6745+ /*
6746+ * Cache the firmware config for ASSERTs in hwclib and later
6747+ * driver queries.
6748+ */
6749+ status = be_function_internal_query_firmware_config(pfob,
6750+ &pfob->fw_config);
6751+ if (status != BE_SUCCESS) {
6752+ TRACE(DL_ERR, "Failed to query firmware config.");
6753+ goto error;
6754+ }
6755+
6756+error:
6757+ if (status != BE_SUCCESS) {
6758+ /* No cleanup necessary */
6759+ TRACE(DL_ERR, "Failed to create function.");
6760+ memset(pfob, 0, sizeof(*pfob));
6761+ }
6762+ return status;
6763+}
6764+
6765+/*
6766+ This routine drops the reference count on a given function object. Once
6767+ the reference count falls to zero, the function object is destroyed and all
6768+ resources held are freed.
6769+
6770+ FunctionObject - The function object to drop the reference to.
6771+*/
6772+int be_function_object_destroy(struct be_function_object *pfob)
6773+{
6774+ TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
6775+ pfob);
6776+
6777+
6778+ ASSERT(pfob->mcc == NULL);
6779+
6780+ return BE_SUCCESS;
6781+}
6782+
6783+int be_function_cleanup(struct be_function_object *pfob)
6784+{
6785+ int status = 0;
6786+ u32 isr;
6787+ u32 host_intr;
6788+ struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
6789+
6790+
6791+ if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
6792+ status = be_rxf_multicast_config(pfob, false, 0,
6793+ NULL, NULL, NULL, NULL);
6794+ ASSERT(status == BE_SUCCESS);
6795+ }
6796+ /* VLAN */
6797+ status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
6798+ ASSERT(status == BE_SUCCESS);
6799+ /*
6800+ * MCC Queue -- Switches to mailbox mode. May want to destroy
6801+ * all but the MCC CQ before this call if polling CQ is much better
6802+ * performance than polling mailbox register.
6803+ */
6804+ if (pfob->mcc)
6805+ status = be_mcc_ring_destroy(pfob->mcc);
6806+ /*
6807+ * If interrupts are disabled, clear any CEV interrupt assertions that
6808+ * fired after we stopped processing EQs.
6809+ */
6810+ ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
6811+ host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
6812+ hostintr, ctrl.dw);
6813+ if (!host_intr)
6814+ if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
6815+ isr = CSR_READ(pfob, cev.isr1);
6816+ else
6817+ isr = CSR_READ(pfob, cev.isr0);
6818+ else
6819+ /* This should never happen... */
6820+ TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
6821+ /* Function object destroy */
6822+ status = be_function_object_destroy(pfob);
6823+ ASSERT(status == BE_SUCCESS);
6824+
6825+ return status;
6826+}
6827+
6828+
6829+void *
6830+be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
6831+ struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
6832+ u32 response_length, u32 opcode, u32 subsystem)
6833+{
6834+ struct FWCMD_REQUEST_HEADER *header = NULL;
6835+ u32 n;
6836+
6837+ ASSERT(wrb);
6838+
6839+ n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
6840+ AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
6841+ AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
6842+ header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
6843+
6844+ header->timeout = 0;
6845+ header->domain = 0;
6846+ header->request_length = max(request_length, response_length);
6847+ header->opcode = opcode;
6848+ header->subsystem = subsystem;
6849+
6850+ return header;
6851+}
6852+
6853+void *
6854+be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
6855+ struct MCC_WRB_AMAP *wrb,
6856+ void *fwcmd_va, u64 fwcmd_pa,
6857+ u32 payld_len,
6858+ u32 request_length,
6859+ u32 response_length,
6860+ u32 opcode, u32 subsystem)
6861+{
6862+ struct FWCMD_REQUEST_HEADER *header = NULL;
6863+ u32 n;
6864+ struct MCC_WRB_PAYLOAD_AMAP *plp;
6865+
6866+ ASSERT(wrb);
6867+ ASSERT(fwcmd_va);
6868+
6869+ header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
6870+
6871+ AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
6872+ AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
6873+
6874+ /*
6875+ * Assume one fragment. The caller may override the SGL by
6876+ * rewriting the 0th length and adding more entries. They
6877+ * will also need to update the sge_count.
6878+ */
6879+ AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
6880+
6881+ n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
6882+ plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
6883+ AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
6884+ AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
6885+ AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
6886+ upper_32_bits(fwcmd_pa));
6887+
6888+ header->timeout = 0;
6889+ header->domain = 0;
6890+ header->request_length = max(request_length, response_length);
6891+ header->opcode = opcode;
6892+ header->subsystem = subsystem;
6893+
6894+ return header;
6895+}
6896+
6897+struct MCC_WRB_AMAP *
6898+be_function_peek_mcc_wrb(struct be_function_object *pfob)
6899+{
6900+ struct MCC_WRB_AMAP *wrb = NULL;
6901+ u32 offset;
6902+
6903+ if (pfob->mcc)
6904+ wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
6905+ else {
6906+ offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
6907+ wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
6908+ offset);
6909+ }
6910+
6911+ if (wrb)
6912+ memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
6913+
6914+ return wrb;
6915+}
6916+
6917+#if defined(BE_DEBUG)
6918+void be_function_debug_print_wrb(struct be_function_object *pfob,
6919+ struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
6920+ struct be_mcc_wrb_context *wrb_context)
6921+{
6922+
6923+ struct FWCMD_REQUEST_HEADER *header = NULL;
6924+ u8 embedded;
6925+ u32 n;
6926+
6927+ embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
6928+
6929+ if (embedded) {
6930+ n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
6931+ header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
6932+ } else {
6933+ header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
6934+ }
6935+
6936+ /* Save the completed count before posting for a debug assert. */
6937+
6938+ if (header) {
6939+ wrb_context->opcode = header->opcode;
6940+ wrb_context->subsystem = header->subsystem;
6941+
6942+ } else {
6943+ wrb_context->opcode = 0;
6944+ wrb_context->subsystem = 0;
6945+ }
6946+}
6947+#else
6948+#define be_function_debug_print_wrb(a_, b_, c_, d_)
6949+#endif
6950+
6951+int
6952+be_function_post_mcc_wrb(struct be_function_object *pfob,
6953+ struct MCC_WRB_AMAP *wrb,
6954+ struct be_generic_q_ctxt *q_ctxt,
6955+ mcc_wrb_cqe_callback cb, void *cb_context,
6956+ mcc_wrb_cqe_callback internal_cb,
6957+ void *internal_cb_context, void *optional_fwcmd_va,
6958+ struct be_mcc_wrb_response_copy *rc)
6959+{
6960+ int status;
6961+ struct be_mcc_wrb_context *wrb_context = NULL;
6962+ u64 *p;
6963+
6964+ if (q_ctxt) {
6965+ /* Initialize context. */
6966+ q_ctxt->context.internal_cb = internal_cb;
6967+ q_ctxt->context.internal_cb_context = internal_cb_context;
6968+ q_ctxt->context.cb = cb;
6969+ q_ctxt->context.cb_context = cb_context;
6970+ if (rc) {
6971+ q_ctxt->context.copy.length = rc->length;
6972+ q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
6973+ q_ctxt->context.copy.va = rc->va;
6974+ } else
6975+ q_ctxt->context.copy.length = 0;
6976+
6977+ q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
6978+
6979+ /* Queue this request */
6980+ status = be_function_queue_mcc_wrb(pfob, q_ctxt);
6981+
6982+ goto Error;
6983+ }
6984+ /*
6985+ * Allocate a WRB context struct to hold the callback pointers,
6986+ * status, etc. This is required if commands complete out of order.
6987+ */
6988+ wrb_context = _be_mcc_allocate_wrb_context(pfob);
6989+ if (!wrb_context) {
6990+ TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
6991+ status = BE_STATUS_SYSTEM_RESOURCES;
6992+ goto Error;
6993+ }
6994+ /* Initialize context. */
6995+ memset(wrb_context, 0, sizeof(*wrb_context));
6996+ wrb_context->internal_cb = internal_cb;
6997+ wrb_context->internal_cb_context = internal_cb_context;
6998+ wrb_context->cb = cb;
6999+ wrb_context->cb_context = cb_context;
7000+ if (rc) {
7001+ wrb_context->copy.length = rc->length;
7002+ wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
7003+ wrb_context->copy.va = rc->va;
7004+ } else
7005+ wrb_context->copy.length = 0;
7006+ wrb_context->wrb = wrb;
7007+
7008+ /*
7009+ * Copy the context pointer into the WRB opaque tag field.
7010+ * Verify assumption of 64-bit tag with a compile time assert.
7011+ */
7012+ p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8);
7013+ *p = (u64)(size_t)wrb_context;
7014+
7015+ /* Print info about this FWCMD for debug builds. */
7016+ be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
7017+
7018+ /*
7019+ * issue the WRB to the MPU as appropriate
7020+ */
7021+ if (pfob->mcc) {
7022+ /*
7023+ * we're in WRB mode, pass to the mcc layer
7024+ */
7025+ status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
7026+ } else {
7027+ /*
7028+ * we're in mailbox mode
7029+ */
7030+ status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
7031+
7032+ /* mailbox mode always completes synchronously */
7033+ ASSERT(status != BE_STATUS_PENDING);
7034+ }
7035+
7036+Error:
7037+
7038+ return status;
7039+}
7040+
7041+int
7042+be_function_ring_destroy(struct be_function_object *pfob,
7043+ u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
7044+ void *cb_context, mcc_wrb_cqe_callback internal_cb,
7045+ void *internal_cb_context)
7046+{
7047+
7048+ struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
7049+ struct MCC_WRB_AMAP *wrb = NULL;
7050+ int status = 0;
7051+ unsigned long irql;
7052+
7053+ spin_lock_irqsave(&pfob->post_lock, irql);
7054+
7055+ TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
7056+
7057+ wrb = be_function_peek_mcc_wrb(pfob);
7058+ if (!wrb) {
7059+ ASSERT(wrb);
7060+ TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
7061+ status = BE_STATUS_NO_MCC_WRB;
7062+ goto Error;
7063+ }
7064+ /* Prepares an embedded fwcmd, including request/response sizes. */
7065+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
7066+
7067+ fwcmd->params.request.id = id;
7068+ fwcmd->params.request.ring_type = ring_type;
7069+
7070+ /* Post the f/w command */
7071+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
7072+ internal_cb, internal_cb_context, fwcmd, NULL);
7073+ if (status != BE_SUCCESS && status != BE_PENDING) {
7074+ TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
7075+ id, ring_type);
7076+ goto Error;
7077+ }
7078+
7079+Error:
7080+ spin_unlock_irqrestore(&pfob->post_lock, irql);
7081+ if (pfob->pend_queue_driving && pfob->mcc) {
7082+ pfob->pend_queue_driving = 0;
7083+ be_drive_mcc_wrb_queue(pfob->mcc);
7084+ }
7085+ return status;
7086+}
7087+
7088+void
7089+be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
7090+{
7091+ u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
7092+ u32 i = 0;
7093+ u64 pa = rd->pa;
7094+ __le64 lepa;
7095+
7096+ ASSERT(pa_list);
7097+ ASSERT(pa);
7098+
7099+ for (i = 0; i < min(num_pages, max_num); i++) {
7100+ lepa = cpu_to_le64(pa);
7101+ pa_list[i].lo = (u32)lepa;
7102+ pa_list[i].hi = upper_32_bits(lepa);
7103+ pa += PAGE_SIZE;
7104+ }
7105+}
7106+
7107+
7108+
7109+/*-----------------------------------------------------------------------------
7110+ * Function: be_function_get_fw_version
7111+ * Retrieves the firmware version on the adpater. If the callback is
7112+ * NULL this call executes synchronously. If the callback is not NULL,
7113+ * the returned status will be BE_PENDING if the command was issued
7114+ * successfully.
7115+ * pfob -
7116+ * fwv - Pointer to response buffer if callback is NULL.
7117+ * cb - Callback function invoked when the FWCMD completes.
7118+ * cb_context - Passed to the callback function.
7119+ * return pend_status - BE_SUCCESS (0) on success.
7120+ * BE_PENDING (postive value) if the FWCMD
7121+ * completion is pending. Negative error code on failure.
7122+ *---------------------------------------------------------------------------
7123+ */
7124+int
7125+be_function_get_fw_version(struct be_function_object *pfob,
7126+ struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
7127+ mcc_wrb_cqe_callback cb, void *cb_context)
7128+{
7129+ int status = BE_SUCCESS;
7130+ struct MCC_WRB_AMAP *wrb = NULL;
7131+ struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
7132+ unsigned long irql;
7133+ struct be_mcc_wrb_response_copy rc;
7134+
7135+ spin_lock_irqsave(&pfob->post_lock, irql);
7136+
7137+ wrb = be_function_peek_mcc_wrb(pfob);
7138+ if (!wrb) {
7139+ TRACE(DL_ERR, "MCC wrb peek failed.");
7140+ status = BE_STATUS_NO_MCC_WRB;
7141+ goto Error;
7142+ }
7143+
7144+ if (!cb && !fwv) {
7145+ TRACE(DL_ERR, "callback and response buffer NULL!");
7146+ status = BE_NOT_OK;
7147+ goto Error;
7148+ }
7149+ /* Prepares an embedded fwcmd, including request/response sizes. */
7150+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
7151+
7152+ rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
7153+ params.response);
7154+ rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
7155+ params.response);
7156+ rc.va = fwv;
7157+
7158+ /* Post the f/w command */
7159+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
7160+ cb_context, NULL, NULL, fwcmd, &rc);
7161+
7162+Error:
7163+ spin_unlock_irqrestore(&pfob->post_lock, irql);
7164+ if (pfob->pend_queue_driving && pfob->mcc) {
7165+ pfob->pend_queue_driving = 0;
7166+ be_drive_mcc_wrb_queue(pfob->mcc);
7167+ }
7168+ return status;
7169+}
7170+
7171+int
7172+be_function_queue_mcc_wrb(struct be_function_object *pfob,
7173+ struct be_generic_q_ctxt *q_ctxt)
7174+{
7175+ int status;
7176+
7177+ ASSERT(q_ctxt);
7178+
7179+ /*
7180+ * issue the WRB to the MPU as appropriate
7181+ */
7182+ if (pfob->mcc) {
7183+
7184+ /* We're in ring mode. Queue this item. */
7185+ pfob->mcc->backlog_length++;
7186+ list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);
7187+ status = BE_PENDING;
7188+ } else {
7189+ status = BE_NOT_OK;
7190+ }
7191+ return status;
7192+}
7193+
7194--- /dev/null
7195+++ b/drivers/staging/benet/fwcmd_common_bmap.h
7196@@ -0,0 +1,717 @@
7197+/*
7198+ * Copyright (C) 2005 - 2008 ServerEngines
7199+ * All rights reserved.
7200+ *
7201+ * This program is free software; you can redistribute it and/or
7202+ * modify it under the terms of the GNU General Public License version 2
7203+ * as published by the Free Software Foundation. The full GNU General
7204+ * Public License is included in this distribution in the file called COPYING.
7205+ *
7206+ * Contact Information:
7207+ * linux-drivers@serverengines.com
7208+ *
7209+ * ServerEngines
7210+ * 209 N. Fair Oaks Ave
7211+ * Sunnyvale, CA 94085
7212+ */
7213+/*
7214+ * Autogenerated by srcgen version: 0127
7215+ */
7216+#ifndef __fwcmd_common_bmap_h__
7217+#define __fwcmd_common_bmap_h__
7218+#include "fwcmd_types_bmap.h"
7219+#include "fwcmd_hdr_bmap.h"
7220+
7221+#if defined(__BIG_ENDIAN)
7222+ /* Physical Address. */
7223+struct PHYS_ADDR {
7224+ union {
7225+ struct {
7226+ u32 lo; /* DWORD 0 */
7227+ u32 hi; /* DWORD 1 */
7228+ } __packed; /* unnamed struct */
7229+ u32 dw[2]; /* dword union */
7230+ }; /* unnamed union */
7231+} __packed ;
7232+
7233+
7234+#else
7235+ /* Physical Address. */
7236+struct PHYS_ADDR {
7237+ union {
7238+ struct {
7239+ u32 lo; /* DWORD 0 */
7240+ u32 hi; /* DWORD 1 */
7241+ } __packed; /* unnamed struct */
7242+ u32 dw[2]; /* dword union */
7243+ }; /* unnamed union */
7244+} __packed ;
7245+
7246+struct BE_LINK_STATUS {
7247+ u8 mac0_duplex;
7248+ u8 mac0_speed;
7249+ u8 mac1_duplex;
7250+ u8 mac1_speed;
7251+ u8 mgmt_mac_duplex;
7252+ u8 mgmt_mac_speed;
7253+ u8 active_port;
7254+ u8 rsvd0;
7255+ u8 mac0_fault;
7256+ u8 mac1_fault;
7257+ u16 rsvd1;
7258+} __packed;
7259+#endif
7260+
7261+struct FWCMD_COMMON_ANON_170_REQUEST {
7262+ u32 rsvd0;
7263+} __packed;
7264+
7265+union LINK_STATUS_QUERY_PARAMS {
7266+ struct BE_LINK_STATUS response;
7267+ struct FWCMD_COMMON_ANON_170_REQUEST request;
7268+} __packed;
7269+
7270+/*
7271+ * Queries the the link status for all ports. The valid values below
7272+ * DO NOT indicate that a particular duplex or speed is supported by
7273+ * BladeEngine. These enumerations simply list all possible duplexes
7274+ * and speeds for any port. Consult BladeEngine product documentation
7275+ * for the supported parameters.
7276+ */
7277+struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY {
7278+ union FWCMD_HEADER header;
7279+ union LINK_STATUS_QUERY_PARAMS params;
7280+} __packed;
7281+
7282+struct FWCMD_COMMON_ANON_171_REQUEST {
7283+ u8 type;
7284+ u8 port;
7285+ u8 mac1;
7286+ u8 permanent;
7287+} __packed;
7288+
7289+struct FWCMD_COMMON_ANON_172_RESPONSE {
7290+ struct MAC_ADDRESS_FORMAT mac;
7291+} __packed;
7292+
7293+union NTWK_MAC_QUERY_PARAMS {
7294+ struct FWCMD_COMMON_ANON_171_REQUEST request;
7295+ struct FWCMD_COMMON_ANON_172_RESPONSE response;
7296+} __packed;
7297+
7298+/* Queries one MAC address. */
7299+struct FWCMD_COMMON_NTWK_MAC_QUERY {
7300+ union FWCMD_HEADER header;
7301+ union NTWK_MAC_QUERY_PARAMS params;
7302+} __packed;
7303+
7304+struct MAC_SET_PARAMS_IN {
7305+ u8 type;
7306+ u8 port;
7307+ u8 mac1;
7308+ u8 invalidate;
7309+ struct MAC_ADDRESS_FORMAT mac;
7310+} __packed;
7311+
7312+struct MAC_SET_PARAMS_OUT {
7313+ u32 rsvd0;
7314+} __packed;
7315+
7316+union MAC_SET_PARAMS {
7317+ struct MAC_SET_PARAMS_IN request;
7318+ struct MAC_SET_PARAMS_OUT response;
7319+} __packed;
7320+
7321+/* Sets a MAC address. */
7322+struct FWCMD_COMMON_NTWK_MAC_SET {
7323+ union FWCMD_HEADER header;
7324+ union MAC_SET_PARAMS params;
7325+} __packed;
7326+
7327+/* MAC address list. */
7328+struct NTWK_MULTICAST_MAC_LIST {
7329+ u8 byte[6];
7330+} __packed;
7331+
7332+struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD {
7333+ u16 num_mac;
7334+ u8 promiscuous;
7335+ u8 rsvd0;
7336+ struct NTWK_MULTICAST_MAC_LIST mac[32];
7337+} __packed;
7338+
7339+struct FWCMD_COMMON_ANON_174_RESPONSE {
7340+ u32 rsvd0;
7341+} __packed;
7342+
7343+union FWCMD_COMMON_ANON_173_PARAMS {
7344+ struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request;
7345+ struct FWCMD_COMMON_ANON_174_RESPONSE response;
7346+} __packed;
7347+
7348+/*
7349+ * Sets multicast address hash. The MPU will merge the MAC address lists
7350+ * from all clients, including the networking and storage functions.
7351+ * This command may fail if the final merged list of MAC addresses exceeds
7352+ * 32 entries.
7353+ */
7354+struct FWCMD_COMMON_NTWK_MULTICAST_SET {
7355+ union FWCMD_HEADER header;
7356+ union FWCMD_COMMON_ANON_173_PARAMS params;
7357+} __packed;
7358+
7359+struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD {
7360+ u16 num_vlan;
7361+ u8 promiscuous;
7362+ u8 rsvd0;
7363+ u16 vlan_tag[32];
7364+} __packed;
7365+
7366+struct FWCMD_COMMON_ANON_176_RESPONSE {
7367+ u32 rsvd0;
7368+} __packed;
7369+
7370+union FWCMD_COMMON_ANON_175_PARAMS {
7371+ struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request;
7372+ struct FWCMD_COMMON_ANON_176_RESPONSE response;
7373+} __packed;
7374+
7375+/*
7376+ * Sets VLAN tag filter. The MPU will merge the VLAN tag list from all
7377+ * clients, including the networking and storage functions. This command
7378+ * may fail if the final vlan_tag array (from all functions) is longer
7379+ * than 32 entries.
7380+ */
7381+struct FWCMD_COMMON_NTWK_VLAN_CONFIG {
7382+ union FWCMD_HEADER header;
7383+ union FWCMD_COMMON_ANON_175_PARAMS params;
7384+} __packed;
7385+
7386+struct RING_DESTROY_REQUEST {
7387+ u16 ring_type;
7388+ u16 id;
7389+ u8 bypass_flush;
7390+ u8 rsvd0;
7391+ u16 rsvd1;
7392+} __packed;
7393+
7394+struct FWCMD_COMMON_ANON_190_RESPONSE {
7395+ u32 rsvd0;
7396+} __packed;
7397+
7398+union FWCMD_COMMON_ANON_189_PARAMS {
7399+ struct RING_DESTROY_REQUEST request;
7400+ struct FWCMD_COMMON_ANON_190_RESPONSE response;
7401+} __packed;
7402+/*
7403+ * Command for destroying any ring. The connection(s) using the ring should
7404+ * be quiesced before destroying the ring.
7405+ */
7406+struct FWCMD_COMMON_RING_DESTROY {
7407+ union FWCMD_HEADER header;
7408+ union FWCMD_COMMON_ANON_189_PARAMS params;
7409+} __packed;
7410+
7411+struct FWCMD_COMMON_ANON_192_REQUEST {
7412+ u16 num_pages;
7413+ u16 rsvd0;
7414+ struct CQ_CONTEXT_AMAP context;
7415+ struct PHYS_ADDR pages[4];
7416+} __packed ;
7417+
7418+struct FWCMD_COMMON_ANON_193_RESPONSE {
7419+ u16 cq_id;
7420+} __packed ;
7421+
7422+union FWCMD_COMMON_ANON_191_PARAMS {
7423+ struct FWCMD_COMMON_ANON_192_REQUEST request;
7424+ struct FWCMD_COMMON_ANON_193_RESPONSE response;
7425+} __packed ;
7426+
7427+/*
7428+ * Command for creating a completion queue. A Completion Queue must span
7429+ * at least 1 page and at most 4 pages. Each completion queue entry
7430+ * is 16 bytes regardless of CQ entry format. Thus the ring must be
7431+ * at least 256 entries deep (corresponding to 1 page) and can be at
7432+ * most 1024 entries deep (corresponding to 4 pages). The number of
7433+ * pages posted must contain the CQ ring size as encoded in the context.
7434+ *
7435+ */
7436+struct FWCMD_COMMON_CQ_CREATE {
7437+ union FWCMD_HEADER header;
7438+ union FWCMD_COMMON_ANON_191_PARAMS params;
7439+} __packed ;
7440+
7441+struct FWCMD_COMMON_ANON_198_REQUEST {
7442+ u16 num_pages;
7443+ u16 rsvd0;
7444+ struct EQ_CONTEXT_AMAP context;
7445+ struct PHYS_ADDR pages[8];
7446+} __packed ;
7447+
7448+struct FWCMD_COMMON_ANON_199_RESPONSE {
7449+ u16 eq_id;
7450+} __packed ;
7451+
7452+union FWCMD_COMMON_ANON_197_PARAMS {
7453+ struct FWCMD_COMMON_ANON_198_REQUEST request;
7454+ struct FWCMD_COMMON_ANON_199_RESPONSE response;
7455+} __packed ;
7456+
7457+/*
7458+ * Command for creating a event queue. An Event Queue must span at least
7459+ * 1 page and at most 8 pages. The number of pages posted must contain
7460+ * the EQ ring. The ring is defined by the size of the EQ entries (encoded
7461+ * in the context) and the number of EQ entries (also encoded in the
7462+ * context).
7463+ */
7464+struct FWCMD_COMMON_EQ_CREATE {
7465+ union FWCMD_HEADER header;
7466+ union FWCMD_COMMON_ANON_197_PARAMS params;
7467+} __packed ;
7468+
7469+struct FWCMD_COMMON_ANON_201_REQUEST {
7470+ u16 cq_id;
7471+ u16 bcmc_cq_id;
7472+ u16 num_pages;
7473+ u16 rsvd0;
7474+ struct PHYS_ADDR pages[2];
7475+} __packed;
7476+
7477+struct FWCMD_COMMON_ANON_202_RESPONSE {
7478+ u16 id;
7479+} __packed;
7480+
7481+union FWCMD_COMMON_ANON_200_PARAMS {
7482+ struct FWCMD_COMMON_ANON_201_REQUEST request;
7483+ struct FWCMD_COMMON_ANON_202_RESPONSE response;
7484+} __packed;
7485+
7486+/*
7487+ * Command for creating Ethernet receive ring. An ERX ring contains ETH_RX_D
7488+ * entries (8 bytes each). An ERX ring must be 1024 entries deep
7489+ * (corresponding to 2 pages).
7490+ */
7491+struct FWCMD_COMMON_ETH_RX_CREATE {
7492+ union FWCMD_HEADER header;
7493+ union FWCMD_COMMON_ANON_200_PARAMS params;
7494+} __packed;
7495+
7496+struct FWCMD_COMMON_ANON_204_REQUEST {
7497+ u16 num_pages;
7498+ u8 ulp_num;
7499+ u8 type;
7500+ struct ETX_CONTEXT_AMAP context;
7501+ struct PHYS_ADDR pages[8];
7502+} __packed ;
7503+
7504+struct FWCMD_COMMON_ANON_205_RESPONSE {
7505+ u16 cid;
7506+ u8 ulp_num;
7507+ u8 rsvd0;
7508+} __packed ;
7509+
7510+union FWCMD_COMMON_ANON_203_PARAMS {
7511+ struct FWCMD_COMMON_ANON_204_REQUEST request;
7512+ struct FWCMD_COMMON_ANON_205_RESPONSE response;
7513+} __packed ;
7514+
7515+/*
7516+ * Command for creating an Ethernet transmit ring. An ETX ring contains
7517+ * ETH_WRB entries (16 bytes each). An ETX ring must be at least 256
7518+ * entries deep (corresponding to 1 page) and at most 2k entries deep
7519+ * (corresponding to 8 pages).
7520+ */
7521+struct FWCMD_COMMON_ETH_TX_CREATE {
7522+ union FWCMD_HEADER header;
7523+ union FWCMD_COMMON_ANON_203_PARAMS params;
7524+} __packed ;
7525+
7526+struct FWCMD_COMMON_ANON_222_REQUEST {
7527+ u16 num_pages;
7528+ u16 rsvd0;
7529+ struct MCC_RING_CONTEXT_AMAP context;
7530+ struct PHYS_ADDR pages[8];
7531+} __packed ;
7532+
7533+struct FWCMD_COMMON_ANON_223_RESPONSE {
7534+ u16 id;
7535+} __packed ;
7536+
7537+union FWCMD_COMMON_ANON_221_PARAMS {
7538+ struct FWCMD_COMMON_ANON_222_REQUEST request;
7539+ struct FWCMD_COMMON_ANON_223_RESPONSE response;
7540+} __packed ;
7541+
7542+/*
7543+ * Command for creating the MCC ring. An MCC ring must be at least 16
7544+ * entries deep (corresponding to 1 page) and at most 128 entries deep
7545+ * (corresponding to 8 pages).
7546+ */
7547+struct FWCMD_COMMON_MCC_CREATE {
7548+ union FWCMD_HEADER header;
7549+ union FWCMD_COMMON_ANON_221_PARAMS params;
7550+} __packed ;
7551+
7552+struct GET_QOS_IN {
7553+ u32 qos_params_rsvd;
7554+} __packed;
7555+
7556+struct GET_QOS_OUT {
7557+ u32 max_bits_per_second_NIC;
7558+ u32 max_packets_per_second_NIC;
7559+ u32 max_ios_per_second_iSCSI;
7560+ u32 max_bytes_per_second_iSCSI;
7561+ u16 domain_VLAN_tag;
7562+ u16 fabric_domain_ID;
7563+ u32 qos_params_oem[4];
7564+} __packed;
7565+
7566+union GET_QOS_PARAMS {
7567+ struct GET_QOS_IN request;
7568+ struct GET_QOS_OUT response;
7569+} __packed;
7570+
7571+/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
7572+struct FWCMD_COMMON_GET_QOS {
7573+ union FWCMD_HEADER header;
7574+ union GET_QOS_PARAMS params;
7575+} __packed;
7576+
7577+struct SET_QOS_IN {
7578+ u32 valid_flags;
7579+ u32 max_bits_per_second_NIC;
7580+ u32 max_packets_per_second_NIC;
7581+ u32 max_ios_per_second_iSCSI;
7582+ u32 max_bytes_per_second_iSCSI;
7583+ u16 domain_VLAN_tag;
7584+ u16 fabric_domain_ID;
7585+ u32 qos_params_oem[4];
7586+} __packed;
7587+
7588+struct SET_QOS_OUT {
7589+ u32 qos_params_rsvd;
7590+} __packed;
7591+
7592+union SET_QOS_PARAMS {
7593+ struct SET_QOS_IN request;
7594+ struct SET_QOS_OUT response;
7595+} __packed;
7596+
7597+/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
7598+struct FWCMD_COMMON_SET_QOS {
7599+ union FWCMD_HEADER header;
7600+ union SET_QOS_PARAMS params;
7601+} __packed;
7602+
7603+struct SET_FRAME_SIZE_IN {
7604+ u32 max_tx_frame_size;
7605+ u32 max_rx_frame_size;
7606+} __packed;
7607+
7608+struct SET_FRAME_SIZE_OUT {
7609+ u32 chip_max_tx_frame_size;
7610+ u32 chip_max_rx_frame_size;
7611+} __packed;
7612+
7613+union SET_FRAME_SIZE_PARAMS {
7614+ struct SET_FRAME_SIZE_IN request;
7615+ struct SET_FRAME_SIZE_OUT response;
7616+} __packed;
7617+
7618+/* Set frame size command. Only host domain may issue this command. */
7619+struct FWCMD_COMMON_SET_FRAME_SIZE {
7620+ union FWCMD_HEADER header;
7621+ union SET_FRAME_SIZE_PARAMS params;
7622+} __packed;
7623+
7624+struct FORCE_FAILOVER_IN {
7625+ u32 move_to_port;
7626+ u32 failover_config;
7627+} __packed;
7628+
7629+struct FWCMD_COMMON_ANON_231_RESPONSE {
7630+ u32 rsvd0;
7631+} __packed;
7632+
7633+union FWCMD_COMMON_ANON_230_PARAMS {
7634+ struct FORCE_FAILOVER_IN request;
7635+ struct FWCMD_COMMON_ANON_231_RESPONSE response;
7636+} __packed;
7637+
7638+/*
7639+ * Use this command to control failover in BladeEngine. It may be used
7640+ * to failback to a restored port or to forcibly move traffic from
7641+ * one port to another. It may also be used to enable or disable the
7642+ * automatic failover feature. This command can only be issued by domain
7643+ * 0.
7644+ */
7645+struct FWCMD_COMMON_FORCE_FAILOVER {
7646+ union FWCMD_HEADER header;
7647+ union FWCMD_COMMON_ANON_230_PARAMS params;
7648+} __packed;
7649+
7650+struct FWCMD_COMMON_ANON_240_REQUEST {
7651+ u64 context;
7652+} __packed;
7653+
7654+struct FWCMD_COMMON_ANON_241_RESPONSE {
7655+ u64 context;
7656+} __packed;
7657+
7658+union FWCMD_COMMON_ANON_239_PARAMS {
7659+ struct FWCMD_COMMON_ANON_240_REQUEST request;
7660+ struct FWCMD_COMMON_ANON_241_RESPONSE response;
7661+} __packed;
7662+
7663+/*
7664+ * This command can be used by clients as a no-operation request. Typical
7665+ * uses for drivers are as a heartbeat mechanism, or deferred processing
7666+ * catalyst. The ARM will always complete this command with a good completion.
7667+ * The 64-bit parameter is not touched by the ARM processor.
7668+ */
7669+struct FWCMD_COMMON_NOP {
7670+ union FWCMD_HEADER header;
7671+ union FWCMD_COMMON_ANON_239_PARAMS params;
7672+} __packed;
7673+
7674+struct NTWK_RX_FILTER_SETTINGS {
7675+ u8 promiscuous;
7676+ u8 ip_cksum;
7677+ u8 tcp_cksum;
7678+ u8 udp_cksum;
7679+ u8 pass_err;
7680+ u8 pass_ckerr;
7681+ u8 strip_crc;
7682+ u8 mcast_en;
7683+ u8 bcast_en;
7684+ u8 mcast_promiscuous_en;
7685+ u8 unicast_en;
7686+ u8 vlan_promiscuous;
7687+} __packed;
7688+
7689+union FWCMD_COMMON_ANON_242_PARAMS {
7690+ struct NTWK_RX_FILTER_SETTINGS request;
7691+ struct NTWK_RX_FILTER_SETTINGS response;
7692+} __packed;
7693+
7694+/*
7695+ * This command is used to modify the ethernet receive filter configuration.
7696+ * Only domain 0 network function drivers may issue this command. The
7697+ * applied configuration is returned in the response payload. Note:
7698+ * Some receive packet filter settings are global on BladeEngine and
7699+ * can affect both the storage and network function clients that the
7700+ * BladeEngine hardware and firmware serve. Additionaly, depending
7701+ * on the revision of BladeEngine, some ethernet receive filter settings
7702+ * are dependent on others. If a dependency exists between settings
7703+ * for the BladeEngine revision, and the command request settings do
7704+ * not meet the dependency requirement, the invalid settings will not
7705+ * be applied despite the comand succeeding. For example: a driver may
7706+ * request to enable broadcast packets, but not enable multicast packets.
7707+ * On early revisions of BladeEngine, there may be no distinction between
7708+ * broadcast and multicast filters, so broadcast could not be enabled
7709+ * without enabling multicast. In this scenario, the comand would still
7710+ * succeed, but the response payload would indicate the previously
7711+ * configured broadcast and multicast setting.
7712+ */
7713+struct FWCMD_COMMON_NTWK_RX_FILTER {
7714+ union FWCMD_HEADER header;
7715+ union FWCMD_COMMON_ANON_242_PARAMS params;
7716+} __packed;
7717+
7718+
7719+struct FWCMD_COMMON_ANON_244_REQUEST {
7720+ u32 rsvd0;
7721+} __packed;
7722+
7723+struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD {
7724+ u8 firmware_version_string[32];
7725+ u8 fw_on_flash_version_string[32];
7726+} __packed;
7727+
7728+union FWCMD_COMMON_ANON_243_PARAMS {
7729+ struct FWCMD_COMMON_ANON_244_REQUEST request;
7730+ struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response;
7731+} __packed;
7732+
7733+/* This comand retrieves the firmware version. */
7734+struct FWCMD_COMMON_GET_FW_VERSION {
7735+ union FWCMD_HEADER header;
7736+ union FWCMD_COMMON_ANON_243_PARAMS params;
7737+} __packed;
7738+
7739+struct FWCMD_COMMON_ANON_246_REQUEST {
7740+ u16 tx_flow_control;
7741+ u16 rx_flow_control;
7742+} __packed;
7743+
7744+struct FWCMD_COMMON_ANON_247_RESPONSE {
7745+ u32 rsvd0;
7746+} __packed;
7747+
7748+union FWCMD_COMMON_ANON_245_PARAMS {
7749+ struct FWCMD_COMMON_ANON_246_REQUEST request;
7750+ struct FWCMD_COMMON_ANON_247_RESPONSE response;
7751+} __packed;
7752+
7753+/*
7754+ * This comand is used to program BladeEngine flow control behavior.
7755+ * Only the host networking driver is allowed to use this comand.
7756+ */
7757+struct FWCMD_COMMON_SET_FLOW_CONTROL {
7758+ union FWCMD_HEADER header;
7759+ union FWCMD_COMMON_ANON_245_PARAMS params;
7760+} __packed;
7761+
7762+struct FWCMD_COMMON_ANON_249_REQUEST {
7763+ u32 rsvd0;
7764+} __packed;
7765+
7766+struct FWCMD_COMMON_ANON_250_RESPONSE {
7767+ u16 tx_flow_control;
7768+ u16 rx_flow_control;
7769+} __packed;
7770+
7771+union FWCMD_COMMON_ANON_248_PARAMS {
7772+ struct FWCMD_COMMON_ANON_249_REQUEST request;
7773+ struct FWCMD_COMMON_ANON_250_RESPONSE response;
7774+} __packed;
7775+
7776+/* This comand is used to read BladeEngine flow control settings. */
7777+struct FWCMD_COMMON_GET_FLOW_CONTROL {
7778+ union FWCMD_HEADER header;
7779+ union FWCMD_COMMON_ANON_248_PARAMS params;
7780+} __packed;
7781+
7782+struct EQ_DELAY_PARAMS {
7783+ u32 eq_id;
7784+ u32 delay_in_microseconds;
7785+} __packed;
7786+
7787+struct FWCMD_COMMON_ANON_257_REQUEST {
7788+ u32 num_eq;
7789+ u32 rsvd0;
7790+ struct EQ_DELAY_PARAMS delay[16];
7791+} __packed;
7792+
7793+struct FWCMD_COMMON_ANON_258_RESPONSE {
7794+ u32 delay_resolution_in_microseconds;
7795+ u32 delay_max_in_microseconds;
7796+} __packed;
7797+
7798+union MODIFY_EQ_DELAY_PARAMS {
7799+ struct FWCMD_COMMON_ANON_257_REQUEST request;
7800+ struct FWCMD_COMMON_ANON_258_RESPONSE response;
7801+} __packed;
7802+
7803+/* This comand changes the EQ delay for a given set of EQs. */
7804+struct FWCMD_COMMON_MODIFY_EQ_DELAY {
7805+ union FWCMD_HEADER header;
7806+ union MODIFY_EQ_DELAY_PARAMS params;
7807+} __packed;
7808+
7809+struct FWCMD_COMMON_ANON_260_REQUEST {
7810+ u32 rsvd0;
7811+} __packed;
7812+
7813+struct BE_FIRMWARE_CONFIG {
7814+ u16 be_config_number;
7815+ u16 asic_revision;
7816+ u32 nic_ulp_mask;
7817+ u32 tulp_mask;
7818+ u32 iscsi_ulp_mask;
7819+ u32 rdma_ulp_mask;
7820+ u32 rsvd0[4];
7821+ u32 eth_tx_id_start;
7822+ u32 eth_tx_id_count;
7823+ u32 eth_rx_id_start;
7824+ u32 eth_rx_id_count;
7825+ u32 tpm_wrbq_id_start;
7826+ u32 tpm_wrbq_id_count;
7827+ u32 tpm_defq_id_start;
7828+ u32 tpm_defq_id_count;
7829+ u32 iscsi_wrbq_id_start;
7830+ u32 iscsi_wrbq_id_count;
7831+ u32 iscsi_defq_id_start;
7832+ u32 iscsi_defq_id_count;
7833+ u32 rdma_qp_id_start;
7834+ u32 rdma_qp_id_count;
7835+ u32 rsvd1[8];
7836+} __packed;
7837+
7838+union FWCMD_COMMON_ANON_259_PARAMS {
7839+ struct FWCMD_COMMON_ANON_260_REQUEST request;
7840+ struct BE_FIRMWARE_CONFIG response;
7841+} __packed;
7842+
7843+/*
7844+ * This comand queries the current firmware configuration parameters.
7845+ * The static configuration type is defined by be_config_number. This
7846+ * differentiates different BladeEngine builds, such as iSCSI Initiator
7847+ * versus iSCSI Target. For a given static configuration, the Upper
7848+ * Layer Protocol (ULP) processors may be reconfigured to support different
7849+ * protocols. Each ULP processor supports one or more protocols. The
7850+ * masks indicate which processors are configured for each protocol.
7851+ * For a given static configuration, the number of TCP connections
7852+ * supported for each protocol may vary. The *_id_start and *_id_count
7853+ * variables define a linear range of IDs that are available for each
7854+ * supported protocol. The *_id_count may be used by the driver to allocate
7855+ * the appropriate number of connection resources. The *_id_start may
7856+ * be used to map the arbitrary range of IDs to a zero-based range
7857+ * of indices.
7858+ */
7859+struct FWCMD_COMMON_FIRMWARE_CONFIG {
7860+ union FWCMD_HEADER header;
7861+ union FWCMD_COMMON_ANON_259_PARAMS params;
7862+} __packed;
7863+
7864+struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS {
7865+ u32 emph_lev_sel_port0;
7866+ u32 emph_lev_sel_port1;
7867+ u8 xaui_vo_sel;
7868+ u8 xaui_state;
7869+ u16 rsvd0;
7870+ u32 xaui_eq_vector;
7871+} __packed;
7872+
7873+struct FWCMD_COMMON_ANON_262_REQUEST {
7874+ u32 rsvd0;
7875+} __packed;
7876+
7877+union FWCMD_COMMON_ANON_261_PARAMS {
7878+ struct FWCMD_COMMON_ANON_262_REQUEST request;
7879+ struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response;
7880+} __packed;
7881+
7882+/*
7883+ * This comand can be used to read XAUI equalization parameters. The
7884+ * ARM firmware applies default equalization parameters during initialization.
7885+ * These parameters may be customer-specific when derived from the
7886+ * SEEPROM. See SEEPROM_DATA for equalization specific fields.
7887+ */
7888+struct FWCMD_COMMON_GET_PORT_EQUALIZATION {
7889+ union FWCMD_HEADER header;
7890+ union FWCMD_COMMON_ANON_261_PARAMS params;
7891+} __packed;
7892+
7893+struct FWCMD_COMMON_ANON_264_RESPONSE {
7894+ u32 rsvd0;
7895+} __packed;
7896+
7897+union FWCMD_COMMON_ANON_263_PARAMS {
7898+ struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request;
7899+ struct FWCMD_COMMON_ANON_264_RESPONSE response;
7900+} __packed;
7901+
7902+/*
7903+ * This comand can be used to set XAUI equalization parameters. The ARM
7904+ * firmware applies default equalization parameters during initialization.
7905+ * These parameters may be customer-specific when derived from the
7906+ * SEEPROM. See SEEPROM_DATA for equalization specific fields.
7907+ */
7908+struct FWCMD_COMMON_SET_PORT_EQUALIZATION {
7909+ union FWCMD_HEADER header;
7910+ union FWCMD_COMMON_ANON_263_PARAMS params;
7911+} __packed;
7912+
7913+#endif /* __fwcmd_common_bmap_h__ */
7914--- /dev/null
7915+++ b/drivers/staging/benet/fwcmd_common.h
7916@@ -0,0 +1,222 @@
7917+/*
7918+ * Copyright (C) 2005 - 2008 ServerEngines
7919+ * All rights reserved.
7920+ *
7921+ * This program is free software; you can redistribute it and/or
7922+ * modify it under the terms of the GNU General Public License version 2
7923+ * as published by the Free Software Foundation. The full GNU General
7924+ * Public License is included in this distribution in the file called COPYING.
7925+ *
7926+ * Contact Information:
7927+ * linux-drivers@serverengines.com
7928+ *
7929+ * ServerEngines
7930+ * 209 N. Fair Oaks Ave
7931+ * Sunnyvale, CA 94085
7932+ */
7933+/*
7934+ * Autogenerated by srcgen version: 0127
7935+ */
7936+#ifndef __fwcmd_common_amap_h__
7937+#define __fwcmd_common_amap_h__
7938+#include "host_struct.h"
7939+
7940+/* --- PHY_LINK_DUPLEX_ENUM --- */
7941+#define PHY_LINK_DUPLEX_NONE (0)
7942+#define PHY_LINK_DUPLEX_HALF (1)
7943+#define PHY_LINK_DUPLEX_FULL (2)
7944+
7945+/* --- PHY_LINK_SPEED_ENUM --- */
7946+#define PHY_LINK_SPEED_ZERO (0) /* No link. */
7947+#define PHY_LINK_SPEED_10MBPS (1) /* 10 Mbps */
7948+#define PHY_LINK_SPEED_100MBPS (2) /* 100 Mbps */
7949+#define PHY_LINK_SPEED_1GBPS (3) /* 1 Gbps */
7950+#define PHY_LINK_SPEED_10GBPS (4) /* 10 Gbps */
7951+
7952+/* --- PHY_LINK_FAULT_ENUM --- */
7953+#define PHY_LINK_FAULT_NONE (0) /* No fault status
7954+ available or detected */
7955+#define PHY_LINK_FAULT_LOCAL (1) /* Local fault detected */
7956+#define PHY_LINK_FAULT_REMOTE (2) /* Remote fault detected */
7957+
7958+/* --- BE_ULP_MASK --- */
7959+#define BE_ULP0_MASK (1)
7960+#define BE_ULP1_MASK (2)
7961+#define BE_ULP2_MASK (4)
7962+
7963+/* --- NTWK_ACTIVE_PORT --- */
7964+#define NTWK_PORT_A (0) /* Port A is currently active */
7965+#define NTWK_PORT_B (1) /* Port B is currently active */
7966+#define NTWK_NO_ACTIVE_PORT (15) /* Both ports have lost link */
7967+
7968+/* --- NTWK_LINK_TYPE --- */
7969+#define NTWK_LINK_TYPE_PHYSICAL (0) /* link up/down event
7970+ applies to BladeEngine's
7971+ Physical Ports
7972+ */
7973+#define NTWK_LINK_TYPE_VIRTUAL (1) /* Virtual link up/down event
7974+ reported by BladeExchange.
7975+ This applies only when the
7976+ VLD feature is enabled
7977+ */
7978+
7979+/*
7980+ * --- FWCMD_MAC_TYPE_ENUM ---
7981+ * This enum defines the types of MAC addresses in the RXF MAC Address Table.
7982+ */
7983+#define MAC_ADDRESS_TYPE_STORAGE (0) /* Storage MAC Address */
7984+#define MAC_ADDRESS_TYPE_NETWORK (1) /* Network MAC Address */
7985+#define MAC_ADDRESS_TYPE_PD (2) /* Protection Domain MAC Addr */
7986+#define MAC_ADDRESS_TYPE_MANAGEMENT (3) /* Managment MAC Address */
7987+
7988+
7989+/* --- FWCMD_RING_TYPE_ENUM --- */
7990+#define FWCMD_RING_TYPE_ETH_RX (1) /* Ring created with */
7991+ /* FWCMD_COMMON_ETH_RX_CREATE. */
7992+#define FWCMD_RING_TYPE_ETH_TX (2) /* Ring created with */
7993+ /* FWCMD_COMMON_ETH_TX_CREATE. */
7994+#define FWCMD_RING_TYPE_ISCSI_WRBQ (3) /* Ring created with */
7995+ /* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */
7996+#define FWCMD_RING_TYPE_ISCSI_DEFQ (4) /* Ring created with */
7997+ /* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */
7998+#define FWCMD_RING_TYPE_TPM_WRBQ (5) /* Ring created with */
7999+ /* FWCMD_COMMON_TPM_WRBQ_CREATE. */
8000+#define FWCMD_RING_TYPE_TPM_DEFQ (6) /* Ring created with */
8001+ /* FWCMD_COMMONTPM_TDEFQ_CREATE. */
8002+#define FWCMD_RING_TYPE_TPM_RQ (7) /* Ring created with */
8003+ /* FWCMD_COMMON_TPM_RQ_CREATE. */
8004+#define FWCMD_RING_TYPE_MCC (8) /* Ring created with */
8005+ /* FWCMD_COMMON_MCC_CREATE. */
8006+#define FWCMD_RING_TYPE_CQ (9) /* Ring created with */
8007+ /* FWCMD_COMMON_CQ_CREATE. */
8008+#define FWCMD_RING_TYPE_EQ (10) /* Ring created with */
8009+ /* FWCMD_COMMON_EQ_CREATE. */
8010+#define FWCMD_RING_TYPE_QP (11) /* Ring created with */
8011+ /* FWCMD_RDMA_QP_CREATE. */
8012+
8013+
8014+/* --- ETH_TX_RING_TYPE_ENUM --- */
8015+#define ETH_TX_RING_TYPE_FORWARDING (1) /* Ethernet ring for
8016+ forwarding packets */
8017+#define ETH_TX_RING_TYPE_STANDARD (2) /* Ethernet ring for sending
8018+ network packets. */
8019+#define ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring bound to the
8020+ port specified in the command
8021+ header.port_number field.
8022+ Rings of this type are
8023+ NOT subject to the
8024+ failover logic implemented
8025+ in the BladeEngine.
8026+ */
8027+
8028+/* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */
8029+#define QOS_BITS_NIC (1) /* max_bits_per_second_NIC */
8030+ /* field is valid. */
8031+#define QOS_PKTS_NIC (2) /* max_packets_per_second_NIC */
8032+ /* field is valid. */
8033+#define QOS_IOPS_ISCSI (4) /* max_ios_per_second_iSCSI */
8034+ /*field is valid. */
8035+#define QOS_VLAN_TAG (8) /* domain_VLAN_tag field
8036+ is valid. */
8037+#define QOS_FABRIC_ID (16) /* fabric_domain_ID field
8038+ is valid. */
8039+#define QOS_OEM_PARAMS (32) /* qos_params_oem field
8040+ is valid. */
8041+#define QOS_TPUT_ISCSI (64) /* max_bytes_per_second_iSCSI
8042+ field is valid. */
8043+
8044+
8045+/*
8046+ * --- FAILOVER_CONFIG_ENUM ---
8047+ * Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER
8048+ */
8049+#define FAILOVER_CONFIG_NO_CHANGE (0) /* No change to automatic */
8050+ /* port failover setting. */
8051+#define FAILOVER_CONFIG_ON (1) /* Automatic port failover
8052+ on link down is enabled. */
8053+#define FAILOVER_CONFIG_OFF (2) /* Automatic port failover
8054+ on link down is disabled. */
8055+
8056+/*
8057+ * --- FAILOVER_PORT_ENUM ---
8058+ * Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER
8059+ */
8060+#define FAILOVER_PORT_A (0) /* Selects port A. */
8061+#define FAILOVER_PORT_B (1) /* Selects port B. */
8062+#define FAILOVER_PORT_NONE (15) /* No port change requested. */
8063+
8064+
8065+/*
8066+ * --- MGMT_FLASHROM_OPCODE ---
8067+ * Flash ROM operation code
8068+ */
8069+#define MGMT_FLASHROM_OPCODE_FLASH (1) /* Commit downloaded data
8070+ to Flash ROM */
8071+#define MGMT_FLASHROM_OPCODE_SAVE (2) /* Save downloaded data to
8072+ ARM's DDR - do not flash */
8073+#define MGMT_FLASHROM_OPCODE_CLEAR (3) /* Erase specified component
8074+ from FlashROM */
8075+#define MGMT_FLASHROM_OPCODE_REPORT (4) /* Read specified component
8076+ from Flash ROM */
8077+#define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5) /* Returns size of a
8078+ component */
8079+
8080+/*
8081+ * --- MGMT_FLASHROM_OPTYPE ---
8082+ * Flash ROM operation type
8083+ */
8084+#define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0) /* Includes ARM firmware,
8085+ IPSec (optional) and EP
8086+ firmware */
8087+#define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1)
8088+#define MGMT_FLASHROM_OPTYPE_CODE_BIOS (2)
8089+#define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3)
8090+#define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4)
8091+#define MGMT_FLASHROM_OPTYPE_CFG_IPSEC (5)
8092+#define MGMT_FLASHROM_OPTYPE_CFG_INI (6)
8093+#define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7)
8094+
8095+/*
8096+ * --- FLASHROM_TYPE ---
8097+ * Flash ROM manufacturers supported in the f/w
8098+ */
8099+#define INTEL (0)
8100+#define SPANSION (1)
8101+#define MICRON (2)
8102+
8103+/* --- DDR_CAS_TYPE --- */
8104+#define CAS_3 (0)
8105+#define CAS_4 (1)
8106+#define CAS_5 (2)
8107+
8108+/* --- DDR_SIZE_TYPE --- */
8109+#define SIZE_256MB (0)
8110+#define SIZE_512MB (1)
8111+
8112+/* --- DDR_MODE_TYPE --- */
8113+#define DDR_NO_ECC (0)
8114+#define DDR_ECC (1)
8115+
8116+/* --- INTERFACE_10GB_TYPE --- */
8117+#define CX4_TYPE (0)
8118+#define XFP_TYPE (1)
8119+
8120+/* --- BE_CHIP_MAX_MTU --- */
8121+#define CHIP_MAX_MTU (9000)
8122+
8123+/* --- XAUI_STATE_ENUM --- */
8124+#define XAUI_STATE_ENABLE (0) /* This MUST be the default
8125+ value for all requests
8126+ which set/change
8127+ equalization parameter. */
8128+#define XAUI_STATE_DISABLE (255) /* The XAUI for both ports
8129+ may be disabled for EMI
8130+ tests. There is no
8131+ provision for turning off
8132+ individual ports.
8133+ */
8134+/* --- BE_ASIC_REVISION --- */
8135+#define BE_ASIC_REV_A0 (1)
8136+#define BE_ASIC_REV_A1 (2)
8137+
8138+#endif /* __fwcmd_common_amap_h__ */
8139--- /dev/null
8140+++ b/drivers/staging/benet/fwcmd_eth_bmap.h
8141@@ -0,0 +1,280 @@
8142+/*
8143+ * Copyright (C) 2005 - 2008 ServerEngines
8144+ * All rights reserved.
8145+ *
8146+ * This program is free software; you can redistribute it and/or
8147+ * modify it under the terms of the GNU General Public License version 2
8148+ * as published by the Free Software Foundation. The full GNU General
8149+ * Public License is included in this distribution in the file called COPYING.
8150+ *
8151+ * Contact Information:
8152+ * linux-drivers@serverengines.com
8153+ *
8154+ * ServerEngines
8155+ * 209 N. Fair Oaks Ave
8156+ * Sunnyvale, CA 94085
8157+ */
8158+/*
8159+ * Autogenerated by srcgen version: 0127
8160+ */
8161+#ifndef __fwcmd_eth_bmap_h__
8162+#define __fwcmd_eth_bmap_h__
8163+#include "fwcmd_hdr_bmap.h"
8164+#include "fwcmd_types_bmap.h"
8165+
8166+struct MIB_ETH_STATISTICS_PARAMS_IN {
8167+ u32 rsvd0;
8168+} __packed;
8169+
8170+struct BE_RXF_STATS {
8171+ u32 p0recvdtotalbytesLSD; /* DWORD 0 */
8172+ u32 p0recvdtotalbytesMSD; /* DWORD 1 */
8173+ u32 p0recvdtotalframes; /* DWORD 2 */
8174+ u32 p0recvdunicastframes; /* DWORD 3 */
8175+ u32 p0recvdmulticastframes; /* DWORD 4 */
8176+ u32 p0recvdbroadcastframes; /* DWORD 5 */
8177+ u32 p0crcerrors; /* DWORD 6 */
8178+ u32 p0alignmentsymerrs; /* DWORD 7 */
8179+ u32 p0pauseframesrecvd; /* DWORD 8 */
8180+ u32 p0controlframesrecvd; /* DWORD 9 */
8181+ u32 p0inrangelenerrors; /* DWORD 10 */
8182+ u32 p0outrangeerrors; /* DWORD 11 */
8183+ u32 p0frametoolongerrors; /* DWORD 12 */
8184+ u32 p0droppedaddressmatch; /* DWORD 13 */
8185+ u32 p0droppedvlanmismatch; /* DWORD 14 */
8186+ u32 p0ipdroppedtoosmall; /* DWORD 15 */
8187+ u32 p0ipdroppedtooshort; /* DWORD 16 */
8188+ u32 p0ipdroppedhdrtoosmall; /* DWORD 17 */
8189+ u32 p0tcpdroppedlen; /* DWORD 18 */
8190+ u32 p0droppedrunt; /* DWORD 19 */
8191+ u32 p0recvd64; /* DWORD 20 */
8192+ u32 p0recvd65_127; /* DWORD 21 */
8193+ u32 p0recvd128_256; /* DWORD 22 */
8194+ u32 p0recvd256_511; /* DWORD 23 */
8195+ u32 p0recvd512_1023; /* DWORD 24 */
8196+ u32 p0recvd1518_1522; /* DWORD 25 */
8197+ u32 p0recvd1522_2047; /* DWORD 26 */
8198+ u32 p0recvd2048_4095; /* DWORD 27 */
8199+ u32 p0recvd4096_8191; /* DWORD 28 */
8200+ u32 p0recvd8192_9216; /* DWORD 29 */
8201+ u32 p0rcvdipcksmerrs; /* DWORD 30 */
8202+ u32 p0recvdtcpcksmerrs; /* DWORD 31 */
8203+ u32 p0recvdudpcksmerrs; /* DWORD 32 */
8204+ u32 p0recvdnonrsspackets; /* DWORD 33 */
8205+ u32 p0recvdippackets; /* DWORD 34 */
8206+ u32 p0recvdchute1packets; /* DWORD 35 */
8207+ u32 p0recvdchute2packets; /* DWORD 36 */
8208+ u32 p0recvdchute3packets; /* DWORD 37 */
8209+ u32 p0recvdipsecpackets; /* DWORD 38 */
8210+ u32 p0recvdmanagementpackets; /* DWORD 39 */
8211+ u32 p0xmitbyteslsd; /* DWORD 40 */
8212+ u32 p0xmitbytesmsd; /* DWORD 41 */
8213+ u32 p0xmitunicastframes; /* DWORD 42 */
8214+ u32 p0xmitmulticastframes; /* DWORD 43 */
8215+ u32 p0xmitbroadcastframes; /* DWORD 44 */
8216+ u32 p0xmitpauseframes; /* DWORD 45 */
8217+ u32 p0xmitcontrolframes; /* DWORD 46 */
8218+ u32 p0xmit64; /* DWORD 47 */
8219+ u32 p0xmit65_127; /* DWORD 48 */
8220+ u32 p0xmit128_256; /* DWORD 49 */
8221+ u32 p0xmit256_511; /* DWORD 50 */
8222+ u32 p0xmit512_1023; /* DWORD 51 */
8223+ u32 p0xmit1518_1522; /* DWORD 52 */
8224+ u32 p0xmit1522_2047; /* DWORD 53 */
8225+ u32 p0xmit2048_4095; /* DWORD 54 */
8226+ u32 p0xmit4096_8191; /* DWORD 55 */
8227+ u32 p0xmit8192_9216; /* DWORD 56 */
8228+ u32 p0rxfifooverflowdropped; /* DWORD 57 */
8229+ u32 p0ipseclookupfaileddropped; /* DWORD 58 */
8230+ u32 p1recvdtotalbytesLSD; /* DWORD 59 */
8231+ u32 p1recvdtotalbytesMSD; /* DWORD 60 */
8232+ u32 p1recvdtotalframes; /* DWORD 61 */
8233+ u32 p1recvdunicastframes; /* DWORD 62 */
8234+ u32 p1recvdmulticastframes; /* DWORD 63 */
8235+ u32 p1recvdbroadcastframes; /* DWORD 64 */
8236+ u32 p1crcerrors; /* DWORD 65 */
8237+ u32 p1alignmentsymerrs; /* DWORD 66 */
8238+ u32 p1pauseframesrecvd; /* DWORD 67 */
8239+ u32 p1controlframesrecvd; /* DWORD 68 */
8240+ u32 p1inrangelenerrors; /* DWORD 69 */
8241+ u32 p1outrangeerrors; /* DWORD 70 */
8242+ u32 p1frametoolongerrors; /* DWORD 71 */
8243+ u32 p1droppedaddressmatch; /* DWORD 72 */
8244+ u32 p1droppedvlanmismatch; /* DWORD 73 */
8245+ u32 p1ipdroppedtoosmall; /* DWORD 74 */
8246+ u32 p1ipdroppedtooshort; /* DWORD 75 */
8247+ u32 p1ipdroppedhdrtoosmall; /* DWORD 76 */
8248+ u32 p1tcpdroppedlen; /* DWORD 77 */
8249+ u32 p1droppedrunt; /* DWORD 78 */
8250+ u32 p1recvd64; /* DWORD 79 */
8251+ u32 p1recvd65_127; /* DWORD 80 */
8252+ u32 p1recvd128_256; /* DWORD 81 */
8253+ u32 p1recvd256_511; /* DWORD 82 */
8254+ u32 p1recvd512_1023; /* DWORD 83 */
8255+ u32 p1recvd1518_1522; /* DWORD 84 */
8256+ u32 p1recvd1522_2047; /* DWORD 85 */
8257+ u32 p1recvd2048_4095; /* DWORD 86 */
8258+ u32 p1recvd4096_8191; /* DWORD 87 */
8259+ u32 p1recvd8192_9216; /* DWORD 88 */
8260+ u32 p1rcvdipcksmerrs; /* DWORD 89 */
8261+ u32 p1recvdtcpcksmerrs; /* DWORD 90 */
8262+ u32 p1recvdudpcksmerrs; /* DWORD 91 */
8263+ u32 p1recvdnonrsspackets; /* DWORD 92 */
8264+ u32 p1recvdippackets; /* DWORD 93 */
8265+ u32 p1recvdchute1packets; /* DWORD 94 */
8266+ u32 p1recvdchute2packets; /* DWORD 95 */
8267+ u32 p1recvdchute3packets; /* DWORD 96 */
8268+ u32 p1recvdipsecpackets; /* DWORD 97 */
8269+ u32 p1recvdmanagementpackets; /* DWORD 98 */
8270+ u32 p1xmitbyteslsd; /* DWORD 99 */
8271+ u32 p1xmitbytesmsd; /* DWORD 100 */
8272+ u32 p1xmitunicastframes; /* DWORD 101 */
8273+ u32 p1xmitmulticastframes; /* DWORD 102 */
8274+ u32 p1xmitbroadcastframes; /* DWORD 103 */
8275+ u32 p1xmitpauseframes; /* DWORD 104 */
8276+ u32 p1xmitcontrolframes; /* DWORD 105 */
8277+ u32 p1xmit64; /* DWORD 106 */
8278+ u32 p1xmit65_127; /* DWORD 107 */
8279+ u32 p1xmit128_256; /* DWORD 108 */
8280+ u32 p1xmit256_511; /* DWORD 109 */
8281+ u32 p1xmit512_1023; /* DWORD 110 */
8282+ u32 p1xmit1518_1522; /* DWORD 111 */
8283+ u32 p1xmit1522_2047; /* DWORD 112 */
8284+ u32 p1xmit2048_4095; /* DWORD 113 */
8285+ u32 p1xmit4096_8191; /* DWORD 114 */
8286+ u32 p1xmit8192_9216; /* DWORD 115 */
8287+ u32 p1rxfifooverflowdropped; /* DWORD 116 */
8288+ u32 p1ipseclookupfaileddropped; /* DWORD 117 */
8289+ u32 pxdroppednopbuf; /* DWORD 118 */
8290+ u32 pxdroppednotxpb; /* DWORD 119 */
8291+ u32 pxdroppednoipsecbuf; /* DWORD 120 */
8292+ u32 pxdroppednoerxdescr; /* DWORD 121 */
8293+ u32 pxdroppednotpredescr; /* DWORD 122 */
8294+ u32 pxrecvdmanagementportpackets; /* DWORD 123 */
8295+ u32 pxrecvdmanagementportbytes; /* DWORD 124 */
8296+ u32 pxrecvdmanagementportpauseframes; /* DWORD 125 */
8297+ u32 pxrecvdmanagementporterrors; /* DWORD 126 */
8298+ u32 pxxmitmanagementportpackets; /* DWORD 127 */
8299+ u32 pxxmitmanagementportbytes; /* DWORD 128 */
8300+ u32 pxxmitmanagementportpause; /* DWORD 129 */
8301+ u32 pxxmitmanagementportrxfifooverflow; /* DWORD 130 */
8302+ u32 pxrecvdipsecipcksmerrs; /* DWORD 131 */
8303+ u32 pxrecvdtcpsecipcksmerrs; /* DWORD 132 */
8304+ u32 pxrecvdudpsecipcksmerrs; /* DWORD 133 */
8305+ u32 pxipsecrunt; /* DWORD 134 */
8306+ u32 pxipsecaddressmismatchdropped; /* DWORD 135 */
8307+ u32 pxipsecrxfifooverflowdropped; /* DWORD 136 */
8308+ u32 pxipsecframestoolong; /* DWORD 137 */
8309+ u32 pxipsectotalipframes; /* DWORD 138 */
8310+ u32 pxipseciptoosmall; /* DWORD 139 */
8311+ u32 pxipseciptooshort; /* DWORD 140 */
8312+ u32 pxipseciphdrtoosmall; /* DWORD 141 */
8313+ u32 pxipsectcphdrbad; /* DWORD 142 */
8314+ u32 pxrecvdipsecchute1; /* DWORD 143 */
8315+ u32 pxrecvdipsecchute2; /* DWORD 144 */
8316+ u32 pxrecvdipsecchute3; /* DWORD 145 */
8317+ u32 pxdropped7frags; /* DWORD 146 */
8318+ u32 pxdroppedfrags; /* DWORD 147 */
8319+ u32 pxdroppedinvalidfragring; /* DWORD 148 */
8320+ u32 pxnumforwardedpackets; /* DWORD 149 */
8321+} __packed;
8322+
8323+union MIB_ETH_STATISTICS_PARAMS {
8324+ struct MIB_ETH_STATISTICS_PARAMS_IN request;
8325+ struct BE_RXF_STATS response;
8326+} __packed;
8327+
8328+/*
8329+ * Query ethernet statistics. All domains may issue this command. The
8330+ * host domain drivers may optionally reset internal statistic counters
8331+ * with a query.
8332+ */
8333+struct FWCMD_ETH_GET_STATISTICS {
8334+ union FWCMD_HEADER header;
8335+ union MIB_ETH_STATISTICS_PARAMS params;
8336+} __packed;
8337+
8338+
8339+struct FWCMD_ETH_ANON_175_REQUEST {
8340+ u8 port0_promiscuous;
8341+ u8 port1_promiscuous;
8342+ u16 rsvd0;
8343+} __packed;
8344+
8345+struct FWCMD_ETH_ANON_176_RESPONSE {
8346+ u32 rsvd0;
8347+} __packed;
8348+
8349+union FWCMD_ETH_ANON_174_PARAMS {
8350+ struct FWCMD_ETH_ANON_175_REQUEST request;
8351+ struct FWCMD_ETH_ANON_176_RESPONSE response;
8352+} __packed;
8353+
8354+/* Enables/Disables promiscuous ethernet receive mode. */
8355+struct FWCMD_ETH_PROMISCUOUS {
8356+ union FWCMD_HEADER header;
8357+ union FWCMD_ETH_ANON_174_PARAMS params;
8358+} __packed;
8359+
8360+struct FWCMD_ETH_ANON_178_REQUEST {
8361+ u32 new_fragsize_log2;
8362+} __packed;
8363+
8364+struct FWCMD_ETH_ANON_179_RESPONSE {
8365+ u32 actual_fragsize_log2;
8366+} __packed;
8367+
8368+union FWCMD_ETH_ANON_177_PARAMS {
8369+ struct FWCMD_ETH_ANON_178_REQUEST request;
8370+ struct FWCMD_ETH_ANON_179_RESPONSE response;
8371+} __packed;
8372+
8373+/*
8374+ * Sets the Ethernet RX fragment size. Only host (domain 0) networking
8375+ * drivers may issue this command. This call will fail for non-host
8376+ * protection domains. In this situation the MCC CQ status will indicate
8377+ * a failure due to insufficient priviledges. The response should be
8378+ * ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to
8379+ * query the existing ethernet receive fragment size. It must use this
8380+ * fragment size for all fragments in the ethernet receive ring. If
8381+ * the command succeeds, the driver must use the frag size indicated
8382+ * in the command response since the requested frag size may not be applied
8383+ * until the next reboot. When the requested fragsize matches the response
8384+ * fragsize, this indicates the request was applied immediately.
8385+ */
8386+struct FWCMD_ETH_SET_RX_FRAG_SIZE {
8387+ union FWCMD_HEADER header;
8388+ union FWCMD_ETH_ANON_177_PARAMS params;
8389+} __packed;
8390+
8391+struct FWCMD_ETH_ANON_181_REQUEST {
8392+ u32 rsvd0;
8393+} __packed;
8394+
8395+struct FWCMD_ETH_ANON_182_RESPONSE {
8396+ u32 actual_fragsize_log2;
8397+} __packed;
8398+
8399+union FWCMD_ETH_ANON_180_PARAMS {
8400+ struct FWCMD_ETH_ANON_181_REQUEST request;
8401+ struct FWCMD_ETH_ANON_182_RESPONSE response;
8402+} __packed;
8403+
8404+/*
8405+ * Queries the Ethernet RX fragment size. All domains may issue this
8406+ * command. The driver should call this command to determine the minimum
8407+ * required fragment size for the ethernet RX ring buffers. Drivers
8408+ * may choose to use a larger size for each fragment buffer, but BladeEngine
8409+ * will use up to the configured minimum required fragsize in each ethernet
8410+ * receive fragment buffer. For example, if the ethernet receive fragment
8411+ * size is configured to 4kB, and a driver uses 8kB fragments, a 6kB
8412+ * ethernet packet received by BladeEngine will be split accross two
8413+ * of the driver's receive framgents (4kB in one fragment buffer, and
8414+ * 2kB in the subsequent fragment buffer).
8415+ */
8416+struct FWCMD_ETH_GET_RX_FRAG_SIZE {
8417+ union FWCMD_HEADER header;
8418+ union FWCMD_ETH_ANON_180_PARAMS params;
8419+} __packed;
8420+
8421+#endif /* __fwcmd_eth_bmap_h__ */
8422--- /dev/null
8423+++ b/drivers/staging/benet/fwcmd_hdr_bmap.h
8424@@ -0,0 +1,54 @@
8425+/*
8426+ * Copyright (C) 2005 - 2008 ServerEngines
8427+ * All rights reserved.
8428+ *
8429+ * This program is free software; you can redistribute it and/or
8430+ * modify it under the terms of the GNU General Public License version 2
8431+ * as published by the Free Software Foundation. The full GNU General
8432+ * Public License is included in this distribution in the file called COPYING.
8433+ *
8434+ * Contact Information:
8435+ * linux-drivers@serverengines.com
8436+ *
8437+ * ServerEngines
8438+ * 209 N. Fair Oaks Ave
8439+ * Sunnyvale, CA 94085
8440+ */
8441+/*
8442+ * Autogenerated by srcgen version: 0127
8443+ */
8444+#ifndef __fwcmd_hdr_bmap_h__
8445+#define __fwcmd_hdr_bmap_h__
8446+
8447+struct FWCMD_REQUEST_HEADER {
8448+ u8 opcode;
8449+ u8 subsystem;
8450+ u8 port_number;
8451+ u8 domain;
8452+ u32 timeout;
8453+ u32 request_length;
8454+ u32 rsvd0;
8455+} __packed;
8456+
8457+struct FWCMD_RESPONSE_HEADER {
8458+ u8 opcode;
8459+ u8 subsystem;
8460+ u8 rsvd0;
8461+ u8 domain;
8462+ u8 status;
8463+ u8 additional_status;
8464+ u16 rsvd1;
8465+ u32 response_length;
8466+ u32 actual_response_length;
8467+} __packed;
8468+
8469+/*
8470+ * The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with
8471+ * the output FWCMD_RESPONSE_HEADER.
8472+ */
8473+union FWCMD_HEADER {
8474+ struct FWCMD_REQUEST_HEADER request;
8475+ struct FWCMD_RESPONSE_HEADER response;
8476+} __packed;
8477+
8478+#endif /* __fwcmd_hdr_bmap_h__ */
8479--- /dev/null
8480+++ b/drivers/staging/benet/fwcmd_mcc.h
8481@@ -0,0 +1,94 @@
8482+/*
8483+ * Copyright (C) 2005 - 2008 ServerEngines
8484+ * All rights reserved.
8485+ *
8486+ * This program is free software; you can redistribute it and/or
8487+ * modify it under the terms of the GNU General Public License version 2
8488+ * as published by the Free Software Foundation. The full GNU General
8489+ * Public License is included in this distribution in the file called COPYING.
8490+ *
8491+ * Contact Information:
8492+ * linux-drivers@serverengines.com
8493+ *
8494+ * ServerEngines
8495+ * 209 N. Fair Oaks Ave
8496+ * Sunnyvale, CA 94085
8497+ */
8498+/*
8499+ * Autogenerated by srcgen version: 0127
8500+ */
8501+#ifndef __fwcmd_mcc_amap_h__
8502+#define __fwcmd_mcc_amap_h__
8503+#include "fwcmd_opcodes.h"
8504+/*
8505+ * Where applicable, a WRB, may contain a list of Scatter-gather elements.
8506+ * Each element supports a 64 bit address and a 32bit length field.
8507+ */
8508+struct BE_MCC_SGE_AMAP {
8509+ u8 pa_lo[32]; /* DWORD 0 */
8510+ u8 pa_hi[32]; /* DWORD 1 */
8511+ u8 length[32]; /* DWORD 2 */
8512+} __packed;
8513+struct MCC_SGE_AMAP {
8514+ u32 dw[3];
8515+};
8516+/*
8517+ * The design of an MCC_SGE allows up to 19 elements to be embedded
8518+ * in a WRB, supporting 64KB data transfers (assuming a 4KB page size).
8519+ */
8520+struct BE_MCC_WRB_PAYLOAD_AMAP {
8521+ union {
8522+ struct BE_MCC_SGE_AMAP sgl[19];
8523+ u8 embedded[59][32]; /* DWORD 0 */
8524+ };
8525+} __packed;
8526+struct MCC_WRB_PAYLOAD_AMAP {
8527+ u32 dw[59];
8528+};
8529+
8530+/*
8531+ * This is the structure of the MCC Command WRB for commands
8532+ * sent to the Management Processing Unit (MPU). See section
8533+ * for usage in embedded and non-embedded modes.
8534+ */
8535+struct BE_MCC_WRB_AMAP {
8536+ u8 embedded; /* DWORD 0 */
8537+ u8 rsvd0[2]; /* DWORD 0 */
8538+ u8 sge_count[5]; /* DWORD 0 */
8539+ u8 rsvd1[16]; /* DWORD 0 */
8540+ u8 special[8]; /* DWORD 0 */
8541+ u8 payload_length[32]; /* DWORD 1 */
8542+ u8 tag[2][32]; /* DWORD 2 */
8543+ u8 rsvd2[32]; /* DWORD 4 */
8544+ struct BE_MCC_WRB_PAYLOAD_AMAP payload;
8545+} __packed;
8546+struct MCC_WRB_AMAP {
8547+ u32 dw[64];
8548+};
8549+
8550+/* This is the structure of the MCC Completion queue entry */
8551+struct BE_MCC_CQ_ENTRY_AMAP {
8552+ u8 completion_status[16]; /* DWORD 0 */
8553+ u8 extended_status[16]; /* DWORD 0 */
8554+ u8 mcc_tag[2][32]; /* DWORD 1 */
8555+ u8 rsvd0[27]; /* DWORD 3 */
8556+ u8 consumed; /* DWORD 3 */
8557+ u8 completed; /* DWORD 3 */
8558+ u8 hpi_buffer_completion; /* DWORD 3 */
8559+ u8 async_event; /* DWORD 3 */
8560+ u8 valid; /* DWORD 3 */
8561+} __packed;
8562+struct MCC_CQ_ENTRY_AMAP {
8563+ u32 dw[4];
8564+};
8565+
8566+/* Mailbox structures used by the MPU during bootstrap */
8567+struct BE_MCC_MAILBOX_AMAP {
8568+ struct BE_MCC_WRB_AMAP wrb;
8569+ struct BE_MCC_CQ_ENTRY_AMAP cq;
8570+} __packed;
8571+struct MCC_MAILBOX_AMAP {
8572+ u32 dw[68];
8573+};
8574+
8575+#endif /* __fwcmd_mcc_amap_h__ */
8576--- /dev/null
8577+++ b/drivers/staging/benet/fwcmd_opcodes.h
8578@@ -0,0 +1,244 @@
8579+/*
8580+ * Copyright (C) 2005 - 2008 ServerEngines
8581+ * All rights reserved.
8582+ *
8583+ * This program is free software; you can redistribute it and/or
8584+ * modify it under the terms of the GNU General Public License version 2
8585+ * as published by the Free Software Foundation. The full GNU General
8586+ * Public License is included in this distribution in the file called COPYING.
8587+ *
8588+ * Contact Information:
8589+ * linux-drivers@serverengines.com
8590+ *
8591+ * ServerEngines
8592+ * 209 N. Fair Oaks Ave
8593+ * Sunnyvale, CA 94085
8594+ */
8595+/*
8596+ * Autogenerated by srcgen version: 0127
8597+ */
8598+#ifndef __fwcmd_opcodes_amap_h__
8599+#define __fwcmd_opcodes_amap_h__
8600+
8601+/*
8602+ * --- FWCMD_SUBSYSTEMS ---
8603+ * The commands are grouped into the following subsystems. The subsystem
8604+ * code along with the opcode uniquely identify a particular fwcmd.
8605+ */
8606+#define FWCMD_SUBSYSTEM_RSVD (0) /* This subsystem is reserved. It is */
8607+ /* never used. */
8608+#define FWCMD_SUBSYSTEM_COMMON (1) /* CMDs in this group are common to
8609+ * all subsystems. See
8610+ * COMMON_SUBSYSTEM_OPCODES for opcodes
8611+ * and Common Host Configuration CMDs
8612+ * for the FWCMD descriptions.
8613+ */
8614+#define FWCMD_SUBSYSTEM_COMMON_ISCSI (2) /* CMDs in this group are */
8615+ /*
8616+ * common to Initiator and Target. See
8617+ * COMMON_ISCSI_SUBSYSTEM_OPCODES and
8618+ * Common iSCSI Initiator and Target
8619+ * CMDs for the command descriptions.
8620+ */
8621+#define FWCMD_SUBSYSTEM_ETH (3) /* This subsystem is used to
8622+ execute Ethernet commands. */
8623+
8624+#define FWCMD_SUBSYSTEM_TPM (4) /* This subsystem is used
8625+ to execute TPM commands. */
8626+#define FWCMD_SUBSYSTEM_PXE_UNDI (5) /* This subsystem is used
8627+ * to execute PXE
8628+ * and UNDI specific commands.
8629+ */
8630+
8631+#define FWCMD_SUBSYSTEM_ISCSI_INI (6) /* This subsystem is used to
8632+ execute ISCSI Initiator
8633+ specific commands.
8634+ */
8635+#define FWCMD_SUBSYSTEM_ISCSI_TGT (7) /* This subsystem is used
8636+ to execute iSCSI Target
8637+ specific commands.between
8638+ PTL and ARM firmware.
8639+ */
8640+#define FWCMD_SUBSYSTEM_MILI_PTL (8) /* This subsystem is used to
8641+ execute iSCSI Target specific
8642+ commands.between MILI
8643+ and PTL. */
8644+#define FWCMD_SUBSYSTEM_MILI_TMD (9) /* This subsystem is used to
8645+ execute iSCSI Target specific
8646+ commands between MILI
8647+ and TMD. */
8648+#define FWCMD_SUBSYSTEM_PROXY (11) /* This subsystem is used
8649+ to execute proxied commands
8650+ within the host at the
8651+ explicit request of a
8652+ non priviledged domain.
8653+ This 'subsystem' is entirely
8654+ virtual from the controller
8655+ and firmware perspective as
8656+ it is implemented in host
8657+ drivers.
8658+ */
8659+
8660+/*
8661+ * --- COMMON_SUBSYSTEM_OPCODES ---
8662+ * These opcodes are common to both networking and storage PCI
8663+ * functions. They are used to reserve resources and configure
8664+ * BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON
8665+ * subsystem code.
8666+ */
8667+#define OPCODE_COMMON_NTWK_MAC_QUERY (1)
8668+#define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1)
8669+#define SUBSYSTEM_COMMON_NTWK_MAC_SET (1)
8670+#define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1)
8671+#define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1)
8672+#define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1)
8673+#define SUBSYSTEM_COMMON_READ_FLASHROM (1)
8674+#define SUBSYSTEM_COMMON_WRITE_FLASHROM (1)
8675+#define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1)
8676+#define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1)
8677+#define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1)
8678+#define SUBSYSTEM_COMMON_RING_DESTROY (1)
8679+#define SUBSYSTEM_COMMON_CQ_CREATE (1)
8680+#define SUBSYSTEM_COMMON_EQ_CREATE (1)
8681+#define SUBSYSTEM_COMMON_ETH_RX_CREATE (1)
8682+#define SUBSYSTEM_COMMON_ETH_TX_CREATE (1)
8683+#define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1)
8684+#define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1)
8685+#define SUBSYSTEM_COMMON_MCC_CREATE (1)
8686+#define SUBSYSTEM_COMMON_JELL_CONFIG (1)
8687+#define SUBSYSTEM_COMMON_FORCE_FAILOVER (1)
8688+#define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1)
8689+#define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1)
8690+#define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1)
8691+#define SUBSYSTEM_COMMON_GET_QOS (1)
8692+#define SUBSYSTEM_COMMON_SET_QOS (1)
8693+#define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1)
8694+#define SUBSYSTEM_COMMON_SEEPROM_READ (1)
8695+#define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1)
8696+#define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1)
8697+#define SUBSYSTEM_COMMON_NOP (1)
8698+#define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1)
8699+#define SUBSYSTEM_COMMON_GET_FW_VERSION (1)
8700+#define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1)
8701+#define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1)
8702+#define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1)
8703+#define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1)
8704+#define SUBSYSTEM_COMMON_GET_FAT (1)
8705+#define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1)
8706+#define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1)
8707+#define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1)
8708+#define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1)
8709+#define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1)
8710+#define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1)
8711+#define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1)
8712+#define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1)
8713+#define SUBSYSTEM_COMMON_RED_CONFIG (1)
8714+#define OPCODE_COMMON_NTWK_MAC_SET (2)
8715+#define OPCODE_COMMON_NTWK_MULTICAST_SET (3)
8716+#define OPCODE_COMMON_NTWK_VLAN_CONFIG (4)
8717+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5)
8718+#define OPCODE_COMMON_READ_FLASHROM (6)
8719+#define OPCODE_COMMON_WRITE_FLASHROM (7)
8720+#define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8)
8721+#define OPCODE_COMMON_ADD_PAGE_TABLES (9)
8722+#define OPCODE_COMMON_REMOVE_PAGE_TABLES (10)
8723+#define OPCODE_COMMON_RING_DESTROY (11)
8724+#define OPCODE_COMMON_CQ_CREATE (12)
8725+#define OPCODE_COMMON_EQ_CREATE (13)
8726+#define OPCODE_COMMON_ETH_RX_CREATE (14)
8727+#define OPCODE_COMMON_ETH_TX_CREATE (15)
8728+#define OPCODE_COMMON_NET_RESERVED0 (16) /* Reserved */
8729+#define OPCODE_COMMON_NET_RESERVED1 (17) /* Reserved */
8730+#define OPCODE_COMMON_NET_RESERVED2 (18) /* Reserved */
8731+#define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19)
8732+#define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20)
8733+#define OPCODE_COMMON_MCC_CREATE (21)
8734+#define OPCODE_COMMON_JELL_CONFIG (22)
8735+#define OPCODE_COMMON_FORCE_FAILOVER (23)
8736+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24)
8737+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25)
8738+#define OPCODE_COMMON_POST_ZERO_BUFFER (26)
8739+#define OPCODE_COMMON_GET_QOS (27)
8740+#define OPCODE_COMMON_SET_QOS (28)
8741+#define OPCODE_COMMON_TCP_GET_STATISTICS (29)
8742+#define OPCODE_COMMON_SEEPROM_READ (30)
8743+#define OPCODE_COMMON_TCP_STATE_QUERY (31)
8744+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32)
8745+#define OPCODE_COMMON_NOP (33)
8746+#define OPCODE_COMMON_NTWK_RX_FILTER (34)
8747+#define OPCODE_COMMON_GET_FW_VERSION (35)
8748+#define OPCODE_COMMON_SET_FLOW_CONTROL (36)
8749+#define OPCODE_COMMON_GET_FLOW_CONTROL (37)
8750+#define OPCODE_COMMON_SET_TCP_PARAMETERS (38)
8751+#define OPCODE_COMMON_SET_FRAME_SIZE (39)
8752+#define OPCODE_COMMON_GET_FAT (40)
8753+#define OPCODE_COMMON_MODIFY_EQ_DELAY (41)
8754+#define OPCODE_COMMON_FIRMWARE_CONFIG (42)
8755+#define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43)
8756+#define OPCODE_COMMON_GET_DOMAIN_CONFIG (44)
8757+#define OPCODE_COMMON_SET_VLD_CONFIG (45)
8758+#define OPCODE_COMMON_GET_VLD_CONFIG (46)
8759+#define OPCODE_COMMON_GET_PORT_EQUALIZATION (47)
8760+#define OPCODE_COMMON_SET_PORT_EQUALIZATION (48)
8761+#define OPCODE_COMMON_RED_CONFIG (49)
8762+
8763+
8764+
8765+/*
8766+ * --- ETH_SUBSYSTEM_OPCODES ---
8767+ * These opcodes are used for configuring the Ethernet interfaces. These
8768+ * opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code.
8769+ */
8770+#define OPCODE_ETH_RSS_CONFIG (1)
8771+#define OPCODE_ETH_ACPI_CONFIG (2)
8772+#define SUBSYSTEM_ETH_RSS_CONFIG (3)
8773+#define SUBSYSTEM_ETH_ACPI_CONFIG (3)
8774+#define OPCODE_ETH_PROMISCUOUS (3)
8775+#define SUBSYSTEM_ETH_PROMISCUOUS (3)
8776+#define SUBSYSTEM_ETH_GET_STATISTICS (3)
8777+#define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE (3)
8778+#define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE (3)
8779+#define OPCODE_ETH_GET_STATISTICS (4)
8780+#define OPCODE_ETH_GET_RX_FRAG_SIZE (5)
8781+#define OPCODE_ETH_SET_RX_FRAG_SIZE (6)
8782+
8783+
8784+
8785+
8786+
8787+/*
8788+ * --- MCC_STATUS_CODE ---
8789+ * These are the global status codes used by all subsystems
8790+ */
8791+#define MCC_STATUS_SUCCESS (0) /* Indicates a successful
8792+ completion of the command */
8793+#define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1) /* The client does not have
8794+ sufficient privileges to
8795+ execute the command */
8796+#define MCC_STATUS_INVALID_PARAMETER (2) /* A parameter in the command
8797+ was invalid. The extended
8798+ status contains the index
8799+ of the parameter */
8800+#define MCC_STATUS_INSUFFICIENT_RESOURCES (3) /* There are insufficient
8801+ chip resources to execute
8802+ the command */
8803+#define MCC_STATUS_QUEUE_FLUSHING (4) /* The command is completing
8804+ because the queue was
8805+ getting flushed */
8806+#define MCC_STATUS_DMA_FAILED (5) /* The command is completing
8807+ with a DMA error */
8808+
8809+/*
8810+ * --- MGMT_ERROR_CODES ---
8811+ * Error Codes returned in the status field of the FWCMD response header
8812+ */
8813+#define MGMT_STATUS_SUCCESS (0) /* The FWCMD completed
8814+ without errors */
8815+#define MGMT_STATUS_FAILED (1) /* Error status in the Status
8816+ field of the
8817+ struct FWCMD_RESPONSE_HEADER */
8818+#define MGMT_STATUS_ILLEGAL_REQUEST (2) /* Invalid FWCMD opcode */
8819+#define MGMT_STATUS_ILLEGAL_FIELD (3) /* Invalid parameter in
8820+ the FWCMD payload */
8821+
8822+#endif /* __fwcmd_opcodes_amap_h__ */
8823--- /dev/null
8824+++ b/drivers/staging/benet/fwcmd_types_bmap.h
8825@@ -0,0 +1,29 @@
8826+/*
8827+ * Copyright (C) 2005 - 2008 ServerEngines
8828+ * All rights reserved.
8829+ *
8830+ * This program is free software; you can redistribute it and/or
8831+ * modify it under the terms of the GNU General Public License version 2
8832+ * as published by the Free Software Foundation. The full GNU General
8833+ * Public License is included in this distribution in the file called COPYING.
8834+ *
8835+ * Contact Information:
8836+ * linux-drivers@serverengines.com
8837+ *
8838+ * ServerEngines
8839+ * 209 N. Fair Oaks Ave
8840+ * Sunnyvale, CA 94085
8841+ */
8842+/*
8843+ * Autogenerated by srcgen version: 0127
8844+ */
8845+#ifndef __fwcmd_types_bmap_h__
8846+#define __fwcmd_types_bmap_h__
8847+
8848+/* MAC address format */
8849+struct MAC_ADDRESS_FORMAT {
8850+ u16 SizeOfStructure;
8851+ u8 MACAddress[6];
8852+} __packed;
8853+
8854+#endif /* __fwcmd_types_bmap_h__ */
8855--- /dev/null
8856+++ b/drivers/staging/benet/host_struct.h
8857@@ -0,0 +1,182 @@
8858+/*
8859+ * Copyright (C) 2005 - 2008 ServerEngines
8860+ * All rights reserved.
8861+ *
8862+ * This program is free software; you can redistribute it and/or
8863+ * modify it under the terms of the GNU General Public License version 2
8864+ * as published by the Free Software Foundation. The full GNU General
8865+ * Public License is included in this distribution in the file called COPYING.
8866+ *
8867+ * Contact Information:
8868+ * linux-drivers@serverengines.com
8869+ *
8870+ * ServerEngines
8871+ * 209 N. Fair Oaks Ave
8872+ * Sunnyvale, CA 94085
8873+ */
8874+/*
8875+ * Autogenerated by srcgen version: 0127
8876+ */
8877+#ifndef __host_struct_amap_h__
8878+#define __host_struct_amap_h__
8879+#include "be_cm.h"
8880+#include "be_common.h"
8881+#include "descriptors.h"
8882+
8883+/* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */
8884+#define EQ_MAJOR_CODE_COMPLETION (0) /* Completion event on a */
8885+ /* qcompletion ueue. */
8886+#define EQ_MAJOR_CODE_ETH (1) /* Affiliated Ethernet Event. */
8887+#define EQ_MAJOR_CODE_RESERVED (2) /* Reserved */
8888+#define EQ_MAJOR_CODE_RDMA (3) /* Affiliated RDMA Event. */
8889+#define EQ_MAJOR_CODE_ISCSI (4) /* Affiliated ISCSI Event */
8890+#define EQ_MAJOR_CODE_UNAFFILIATED (5) /* Unaffiliated Event */
8891+
8892+/* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */
8893+#define EQ_MINOR_CODE_COMPLETION (0) /* Completion event on a */
8894+ /* completion queue. */
8895+#define EQ_MINOR_CODE_OTHER (1) /* Other Event (TBD). */
8896+
8897+/* Queue Entry Definition for all 4 byte event queue types. */
8898+struct BE_EQ_ENTRY_AMAP {
8899+ u8 Valid; /* DWORD 0 */
8900+ u8 MajorCode[3]; /* DWORD 0 */
8901+ u8 MinorCode[12]; /* DWORD 0 */
8902+ u8 ResourceID[16]; /* DWORD 0 */
8903+} __packed;
8904+struct EQ_ENTRY_AMAP {
8905+ u32 dw[1];
8906+};
8907+
8908+/*
8909+ * --- ETH_EVENT_CODE ---
8910+ * These codes are returned by the MPU when one of these events has occurred,
8911+ * and the event is configured to report to an Event Queue when an event
8912+ * is detected.
8913+ */
8914+#define ETH_EQ_LINK_STATUS (0) /* Link status change event */
8915+ /* detected. */
8916+#define ETH_EQ_WATERMARK (1) /* watermark event detected. */
8917+#define ETH_EQ_MAGIC_PKT (2) /* magic pkt event detected. */
8918+#define ETH_EQ_ACPI_PKT0 (3) /* ACPI interesting packet */
8919+ /* detected. */
8920+#define ETH_EQ_ACPI_PKT1 (3) /* ACPI interesting packet */
8921+ /* detected. */
8922+#define ETH_EQ_ACPI_PKT2 (3) /* ACPI interesting packet */
8923+ /* detected. */
8924+#define ETH_EQ_ACPI_PKT3 (3) /* ACPI interesting packet */
8925+ /* detected. */
8926+
8927+/*
8928+ * --- ETH_TX_COMPL_STATUS_ENUM ---
8929+ * Status codes contained in Ethernet TX completion descriptors.
8930+ */
8931+#define ETH_COMP_VALID (0)
8932+#define ETH_COMP_ERROR (1)
8933+#define ETH_COMP_INVALID (15)
8934+
8935+/*
8936+ * --- ETH_TX_COMPL_PORT_ENUM ---
8937+ * Port indicator contained in Ethernet TX completion descriptors.
8938+ */
8939+#define ETH_COMP_PORT0 (0)
8940+#define ETH_COMP_PORT1 (1)
8941+#define ETH_COMP_MGMT (2)
8942+
8943+/*
8944+ * --- ETH_TX_COMPL_CT_ENUM ---
8945+ * Completion type indicator contained in Ethernet TX completion descriptors.
8946+ */
8947+#define ETH_COMP_ETH (0)
8948+
8949+/*
8950+ * Work request block that the driver issues to the chip for
8951+ * Ethernet transmissions. All control fields must be valid in each WRB for
8952+ * a message. The controller, as specified by the flags, optionally writes
8953+ * an entry to the Completion Ring and generate an event.
8954+ */
8955+struct BE_ETH_WRB_AMAP {
8956+ u8 frag_pa_hi[32]; /* DWORD 0 */
8957+ u8 frag_pa_lo[32]; /* DWORD 1 */
8958+ u8 complete; /* DWORD 2 */
8959+ u8 event; /* DWORD 2 */
8960+ u8 crc; /* DWORD 2 */
8961+ u8 forward; /* DWORD 2 */
8962+ u8 ipsec; /* DWORD 2 */
8963+ u8 mgmt; /* DWORD 2 */
8964+ u8 ipcs; /* DWORD 2 */
8965+ u8 udpcs; /* DWORD 2 */
8966+ u8 tcpcs; /* DWORD 2 */
8967+ u8 lso; /* DWORD 2 */
8968+ u8 last; /* DWORD 2 */
8969+ u8 vlan; /* DWORD 2 */
8970+ u8 dbg[3]; /* DWORD 2 */
8971+ u8 hash_val[3]; /* DWORD 2 */
8972+ u8 lso_mss[14]; /* DWORD 2 */
8973+ u8 frag_len[16]; /* DWORD 3 */
8974+ u8 vlan_tag[16]; /* DWORD 3 */
8975+} __packed;
8976+struct ETH_WRB_AMAP {
8977+ u32 dw[4];
8978+};
8979+
8980+/* This is an Ethernet transmit completion descriptor */
8981+struct BE_ETH_TX_COMPL_AMAP {
8982+ u8 user_bytes[16]; /* DWORD 0 */
8983+ u8 nwh_bytes[8]; /* DWORD 0 */
8984+ u8 lso; /* DWORD 0 */
8985+ u8 rsvd0[7]; /* DWORD 0 */
8986+ u8 wrb_index[16]; /* DWORD 1 */
8987+ u8 ct[2]; /* DWORD 1 */
8988+ u8 port[2]; /* DWORD 1 */
8989+ u8 rsvd1[8]; /* DWORD 1 */
8990+ u8 status[4]; /* DWORD 1 */
8991+ u8 rsvd2[16]; /* DWORD 2 */
8992+ u8 ringid[11]; /* DWORD 2 */
8993+ u8 hash_val[4]; /* DWORD 2 */
8994+ u8 valid; /* DWORD 2 */
8995+ u8 rsvd3[32]; /* DWORD 3 */
8996+} __packed;
8997+struct ETH_TX_COMPL_AMAP {
8998+ u32 dw[4];
8999+};
9000+
9001+/* Ethernet Receive Buffer descriptor */
9002+struct BE_ETH_RX_D_AMAP {
9003+ u8 fragpa_hi[32]; /* DWORD 0 */
9004+ u8 fragpa_lo[32]; /* DWORD 1 */
9005+} __packed;
9006+struct ETH_RX_D_AMAP {
9007+ u32 dw[2];
9008+};
9009+
9010+/* This is an Ethernet Receive Completion Descriptor */
9011+struct BE_ETH_RX_COMPL_AMAP {
9012+ u8 vlan_tag[16]; /* DWORD 0 */
9013+ u8 pktsize[14]; /* DWORD 0 */
9014+ u8 port; /* DWORD 0 */
9015+ u8 rsvd0; /* DWORD 0 */
9016+ u8 err; /* DWORD 1 */
9017+ u8 rsshp; /* DWORD 1 */
9018+ u8 ipf; /* DWORD 1 */
9019+ u8 tcpf; /* DWORD 1 */
9020+ u8 udpf; /* DWORD 1 */
9021+ u8 ipcksm; /* DWORD 1 */
9022+ u8 tcpcksm; /* DWORD 1 */
9023+ u8 udpcksm; /* DWORD 1 */
9024+ u8 macdst[6]; /* DWORD 1 */
9025+ u8 vtp; /* DWORD 1 */
9026+ u8 vtm; /* DWORD 1 */
9027+ u8 fragndx[10]; /* DWORD 1 */
9028+ u8 ct[2]; /* DWORD 1 */
9029+ u8 ipsec; /* DWORD 1 */
9030+ u8 numfrags[3]; /* DWORD 1 */
9031+ u8 rsvd1[31]; /* DWORD 2 */
9032+ u8 valid; /* DWORD 2 */
9033+ u8 rsshash[32]; /* DWORD 3 */
9034+} __packed;
9035+struct ETH_RX_COMPL_AMAP {
9036+ u32 dw[4];
9037+};
9038+
9039+#endif /* __host_struct_amap_h__ */
9040--- /dev/null
9041+++ b/drivers/staging/benet/hwlib.h
9042@@ -0,0 +1,829 @@
9043+/*
9044+ * Copyright (C) 2005 - 2008 ServerEngines
9045+ * All rights reserved.
9046+ *
9047+ * This program is free software; you can redistribute it and/or
9048+ * modify it under the terms of the GNU General Public License version 2
9049+ * as published by the Free Software Foundation. The full GNU General
9050+ * Public License is included in this distribution in the file called COPYING.
9051+ *
9052+ * Contact Information:
9053+ * linux-drivers@serverengines.com
9054+ *
9055+ * ServerEngines
9056+ * 209 N. Fair Oaks Ave
9057+ * Sunnyvale, CA 94085
9058+ */
9059+#ifndef __hwlib_h__
9060+#define __hwlib_h__
9061+
9062+#include <linux/module.h>
9063+#include <linux/io.h>
9064+#include <linux/list.h>
9065+#include <linux/spinlock.h>
9066+
9067+#include "regmap.h" /* srcgen array map output */
9068+
9069+#include "asyncmesg.h"
9070+#include "fwcmd_opcodes.h"
9071+#include "post_codes.h"
9072+#include "fwcmd_mcc.h"
9073+
9074+#include "fwcmd_types_bmap.h"
9075+#include "fwcmd_common_bmap.h"
9076+#include "fwcmd_eth_bmap.h"
9077+#include "bestatus.h"
9078+/*
9079+ *
9080+ * Macros for reading/writing a protection domain or CSR registers
9081+ * in BladeEngine.
9082+ */
9083+#define PD_READ(fo, field) ioread32((fo)->db_va + \
9084+ offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
9085+
9086+#define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \
9087+ offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
9088+
9089+#define CSR_READ(fo, field) ioread32((fo)->csr_va + \
9090+ offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
9091+
9092+#define CSR_WRITE(fo, field, val) iowrite32(val, (fo)->csr_va + \
9093+ offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
9094+
9095+#define PCICFG0_READ(fo, field) ioread32((fo)->pci_va + \
9096+ offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
9097+
9098+#define PCICFG0_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
9099+ offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
9100+
9101+#define PCICFG1_READ(fo, field) ioread32((fo)->pci_va + \
9102+ offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
9103+
9104+#define PCICFG1_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
9105+ offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
9106+
9107+#ifdef BE_DEBUG
9108+#define ASSERT(c) BUG_ON(!(c));
9109+#else
9110+#define ASSERT(c)
9111+#endif
9112+
9113+/* debug levels */
9114+enum BE_DEBUG_LEVELS {
9115+ DL_ALWAYS = 0, /* cannot be masked */
9116+ DL_ERR = 0x1, /* errors that should never happen */
9117+ DL_WARN = 0x2, /* something questionable.
9118+ recoverable errors */
9119+ DL_NOTE = 0x4, /* infrequent, important debug info */
9120+ DL_INFO = 0x8, /* debug information */
9121+ DL_VERBOSE = 0x10, /* detailed info, such as buffer traces */
9122+ BE_DL_MIN_VALUE = 0x1, /* this is the min value used */
9123+ BE_DL_MAX_VALUE = 0x80 /* this is the higheset value used */
9124+} ;
9125+
9126+extern unsigned int trace_level;
9127+
9128+#define TRACE(lm, fmt, args...) { \
9129+ if (trace_level & lm) { \
9130+ printk(KERN_NOTICE "BE: %s:%d \n" fmt, \
9131+ __FILE__ , __LINE__ , ## args); \
9132+ } \
9133+ }
9134+
9135+static inline unsigned int be_trace_set_level(unsigned int level)
9136+{
9137+ unsigned int old_level = trace_level;
9138+ trace_level = level;
9139+ return old_level;
9140+}
9141+
9142+#define be_trace_get_level() trace_level
9143+/*
9144+ * Returns number of pages spanned by the size of data
9145+ * starting at the given address.
9146+ */
9147+#define PAGES_SPANNED(_address, _size) \
9148+ ((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
9149+ (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
9150+/* Byte offset into the page corresponding to given address */
9151+#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
9152+
9153+/*
9154+ * circular subtract.
9155+ * Returns a - b assuming a circular number system, where a and b are
9156+ * in range (0, maxValue-1). If a==b, zero is returned so the
9157+ * highest value possible with this subtraction is maxValue-1.
9158+ */
9159+static inline u32 be_subc(u32 a, u32 b, u32 max)
9160+{
9161+ ASSERT(a <= max && b <= max);
9162+ ASSERT(max > 0);
9163+ return (a >= b ? (a - b) : (max - b + a));
9164+}
9165+
9166+static inline u32 be_addc(u32 a, u32 b, u32 max)
9167+{
9168+ ASSERT(a < max);
9169+ ASSERT(max > 0);
9170+ return ((max - a > b) ? (a + b) : (b + a - max));
9171+}
9172+
9173+/* descriptor for a physically contiguous memory used for ring */
9174+struct ring_desc {
9175+ u32 length; /* length in bytes */
9176+ void *va; /* virtual address */
9177+ u64 pa; /* bus address */
9178+} ;
9179+
9180+/*
9181+ * This structure stores information about a ring shared between hardware
9182+ * and software. Each ring is allocated by the driver in the uncached
9183+ * extension and mapped into BladeEngine's unified table.
9184+ */
9185+struct mp_ring {
9186+ u32 pages; /* queue size in pages */
9187+ u32 id; /* queue id assigned by beklib */
9188+ u32 num; /* number of elements in queue */
9189+ u32 cidx; /* consumer index */
9190+ u32 pidx; /* producer index -- not used by most rings */
9191+ u32 itemSize; /* size in bytes of one object */
9192+
9193+ void *va; /* The virtual address of the ring.
9194+ This should be last to allow 32 & 64
9195+ bit debugger extensions to work. */
9196+} ;
9197+
9198+/*----------- amap bit filed get / set macros and functions -----*/
9199+/*
9200+ * Structures defined in the map header files (under fw/amap/) with names
9201+ * in the format BE_<name>_AMAP are pseudo structures with members
9202+ * of type u8. These structures are templates that are used in
9203+ * conjuntion with the structures with names in the format
9204+ * <name>_AMAP to calculate the bit masks and bit offsets to get or set
9205+ * bit fields in structures. The structures <name>_AMAP are arrays
9206+ * of 32 bits words and have the correct size. The following macros
9207+ * provide convenient ways to get and set the various members
9208+ * in the structures without using strucctures with bit fields.
9209+ * Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
9210+ * macros to extract and set various members.
9211+ */
9212+
9213+/*
9214+ * Returns the a bit mask for the register that is NOT shifted into location.
9215+ * That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
9216+ */
9217+static inline u32 amap_mask(u32 bit_size)
9218+{
9219+ return (bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1);
9220+}
9221+
9222+#define AMAP_BIT_MASK(_struct_, field) \
9223+ amap_mask(AMAP_BIT_SIZE(_struct_, field))
9224+
9225+/*
9226+ * non-optimized set bits function. First clears the bits and then assigns them.
9227+ * This does not require knowledge of the particular DWORD you are setting.
9228+ * e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
9229+ */
9230+static inline void
9231+amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
9232+{
9233+ u32 *dw = (u32 *)ptr;
9234+ *(dw + dw_offset) &= ~(mask << offset);
9235+ *(dw + dw_offset) |= (mask & value) << offset;
9236+}
9237+
9238+#define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val) \
9239+ amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field), \
9240+ AMAP_BIT_MASK(_struct_, field), AMAP_BIT_OFFSET(_struct_, field), val)
9241+
9242+/*
9243+ * Non-optimized routine that gets the bits without knowing the correct DWORD.
9244+ * e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
9245+ */
9246+static inline u32
9247+amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
9248+{
9249+ u32 *dw = (u32 *)ptr;
9250+ return mask & (*(dw + dw_offset) >> offset);
9251+}
9252+#define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_) \
9253+ amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field), \
9254+ AMAP_BIT_MASK(_struct_, field), AMAP_BIT_OFFSET(_struct_, field))
9255+
9256+/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
9257+#define AMAP_BIT_OFFSET(_struct_, field) \
9258+ (offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32)
9259+
9260+/* Returns 0-n representing DWORD offset of bitfield within the structure. */
9261+#define AMAP_WORD_OFFSET(_struct_, field) \
9262+ (offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32)
9263+
9264+/* Returns size of bitfield in bits. */
9265+#define AMAP_BIT_SIZE(_struct_, field) \
9266+ sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field)
9267+
9268+struct be_mcc_wrb_response_copy {
9269+ u16 length; /* bytes in response */
9270+ u16 fwcmd_offset; /* offset within the wrb of the response */
9271+ void *va; /* user's va to copy response into */
9272+
9273+} ;
9274+typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
9275+ struct MCC_WRB_AMAP *optional_wrb);
9276+struct be_mcc_wrb_context {
9277+
9278+ mcc_wrb_cqe_callback internal_cb; /* Function to call on
9279+ completion */
9280+ void *internal_cb_context; /* Parameter to pass
9281+ to completion function */
9282+
9283+ mcc_wrb_cqe_callback cb; /* Function to call on completion */
9284+ void *cb_context; /* Parameter to pass to completion function */
9285+
9286+ int *users_final_status; /* pointer to a local
9287+ variable for synchronous
9288+ commands */
9289+ struct MCC_WRB_AMAP *wrb; /* pointer to original wrb for embedded
9290+ commands only */
9291+ struct list_head next; /* links context structs together in
9292+ free list */
9293+
9294+ struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
9295+ embedded response to user's va */
9296+
9297+#if defined(BE_DEBUG)
9298+ u16 subsystem, opcode; /* Track this FWCMD for debug builds. */
9299+ struct MCC_WRB_AMAP *ring_wrb;
9300+ u32 consumed_count;
9301+#endif
9302+} ;
9303+
9304+/*
9305+ Represents a function object for network or storage. This
9306+ is used to manage per-function resources like MCC CQs, etc.
9307+*/
9308+struct be_function_object {
9309+
9310+ u32 magic; /*!< magic for detecting memory corruption. */
9311+
9312+ /* PCI BAR mapped addresses */
9313+ u8 __iomem *csr_va; /* CSR */
9314+ u8 __iomem *db_va; /* Door Bell */
9315+ u8 __iomem *pci_va; /* PCI config space */
9316+ u32 emulate; /* if set, MPU is not available.
9317+ Emulate everything. */
9318+ u32 pend_queue_driving; /* if set, drive the queued WRBs
9319+ after releasing the WRB lock */
9320+
9321+ spinlock_t post_lock; /* lock for verifying one thread posting wrbs */
9322+ spinlock_t cq_lock; /* lock for verifying one thread
9323+ processing cq */
9324+ spinlock_t mcc_context_lock; /* lock for protecting mcc
9325+ context free list */
9326+ unsigned long post_irq;
9327+ unsigned long cq_irq;
9328+
9329+ u32 type;
9330+ u32 pci_function_number;
9331+
9332+ struct be_mcc_object *mcc; /* mcc rings. */
9333+
9334+ struct {
9335+ struct MCC_MAILBOX_AMAP *va; /* VA to the mailbox */
9336+ u64 pa; /* PA to the mailbox */
9337+ u32 length; /* byte length of mailbox */
9338+
9339+ /* One default context struct used for posting at
9340+ * least one MCC_WRB
9341+ */
9342+ struct be_mcc_wrb_context default_context;
9343+ bool default_context_allocated;
9344+ } mailbox;
9345+
9346+ struct {
9347+
9348+ /* Wake on lans configured. */
9349+ u32 wol_bitmask; /* bits 0,1,2,3 are set if
9350+ corresponding index is enabled */
9351+ } config;
9352+
9353+
9354+ struct BE_FIRMWARE_CONFIG fw_config;
9355+} ;
9356+
9357+/*
9358+ Represents an Event Queue
9359+*/
9360+struct be_eq_object {
9361+ u32 magic;
9362+ atomic_t ref_count;
9363+
9364+ struct be_function_object *parent_function;
9365+
9366+ struct list_head eq_list;
9367+ struct list_head cq_list_head;
9368+
9369+ u32 eq_id;
9370+ void *cb_context;
9371+
9372+} ;
9373+
9374+/*
9375+ Manages a completion queue
9376+*/
9377+struct be_cq_object {
9378+ u32 magic;
9379+ atomic_t ref_count;
9380+
9381+ struct be_function_object *parent_function;
9382+ struct be_eq_object *eq_object;
9383+
9384+ struct list_head cq_list;
9385+ struct list_head cqlist_for_eq;
9386+
9387+ void *va;
9388+ u32 num_entries;
9389+
9390+ void *cb_context;
9391+
9392+ u32 cq_id;
9393+
9394+} ;
9395+
9396+/*
9397+ Manages an ethernet send queue
9398+*/
9399+struct be_ethsq_object {
9400+ u32 magic;
9401+
9402+ struct list_head list;
9403+
9404+ struct be_function_object *parent_function;
9405+ struct be_cq_object *cq_object;
9406+ u32 bid;
9407+
9408+} ;
9409+
9410+/*
9411+@brief
9412+ Manages an ethernet receive queue
9413+*/
9414+struct be_ethrq_object {
9415+ u32 magic;
9416+ struct list_head list;
9417+ struct be_function_object *parent_function;
9418+ u32 rid;
9419+ struct be_cq_object *cq_object;
9420+ struct be_cq_object *rss_cq_object[4];
9421+
9422+} ;
9423+
9424+/*
9425+ Manages an MCC
9426+*/
9427+typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
9428+ void *event);
9429+struct be_mcc_object {
9430+ u32 magic;
9431+
9432+ struct be_function_object *parent_function;
9433+ struct list_head mcc_list;
9434+
9435+ struct be_cq_object *cq_object;
9436+
9437+ /* Async event callback for MCC CQ. */
9438+ mcc_async_event_callback async_cb;
9439+ void *async_context;
9440+
9441+ struct {
9442+ struct be_mcc_wrb_context *base;
9443+ u32 num;
9444+ struct list_head list_head;
9445+ } wrb_context;
9446+
9447+ struct {
9448+ struct ring_desc *rd;
9449+ struct mp_ring ring;
9450+ } sq;
9451+
9452+ struct {
9453+ struct mp_ring ring;
9454+ } cq;
9455+
9456+ u32 processing; /* flag indicating that one thread
9457+ is processing CQ */
9458+ u32 rearm; /* doorbell rearm setting to make
9459+ sure the active processing thread */
9460+ /* rearms the CQ if any of the threads requested it. */
9461+
9462+ struct list_head backlog;
9463+ u32 backlog_length;
9464+ u32 driving_backlog;
9465+ u32 consumed_index;
9466+
9467+} ;
9468+
9469+
9470+/* Queue context header -- the required software information for
9471+ * queueing a WRB.
9472+ */
9473+struct be_queue_driver_context {
9474+ mcc_wrb_cqe_callback internal_cb; /* Function to call on
9475+ completion */
9476+ void *internal_cb_context; /* Parameter to pass
9477+ to completion function */
9478+
9479+ mcc_wrb_cqe_callback cb; /* Function to call on completion */
9480+ void *cb_context; /* Parameter to pass to completion function */
9481+
9482+ struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
9483+ embedded response to user's va */
9484+ void *optional_fwcmd_va;
9485+ struct list_head list;
9486+ u32 bytes;
9487+} ;
9488+
9489+/*
9490+ * Common MCC WRB header that all commands require.
9491+ */
9492+struct be_mcc_wrb_header {
9493+ u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8];
9494+} ;
9495+
9496+/*
9497+ * All non embedded commands supported by hwlib functions only allow
9498+ * 1 SGE. This queue context handles them all.
9499+ */
9500+struct be_nonembedded_q_ctxt {
9501+ struct be_queue_driver_context context;
9502+ struct be_mcc_wrb_header wrb_header;
9503+ struct MCC_SGE_AMAP sge[1];
9504+} ;
9505+
9506+/*
9507+ * ------------------------------------------------------------------------
9508+ * This section contains the specific queue struct for each command.
9509+ * The user could always provide a be_generic_q_ctxt but this is a
9510+ * rather large struct. By using the specific struct, memory consumption
9511+ * can be reduced.
9512+ * ------------------------------------------------------------------------
9513+ */
9514+
9515+struct be_link_status_q_ctxt {
9516+ struct be_queue_driver_context context;
9517+ struct be_mcc_wrb_header wrb_header;
9518+ struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
9519+} ;
9520+
9521+struct be_multicast_q_ctxt {
9522+ struct be_queue_driver_context context;
9523+ struct be_mcc_wrb_header wrb_header;
9524+ struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
9525+} ;
9526+
9527+
9528+struct be_vlan_q_ctxt {
9529+ struct be_queue_driver_context context;
9530+ struct be_mcc_wrb_header wrb_header;
9531+ struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
9532+} ;
9533+
9534+struct be_promiscuous_q_ctxt {
9535+ struct be_queue_driver_context context;
9536+ struct be_mcc_wrb_header wrb_header;
9537+ struct FWCMD_ETH_PROMISCUOUS fwcmd;
9538+} ;
9539+
9540+struct be_force_failover_q_ctxt {
9541+ struct be_queue_driver_context context;
9542+ struct be_mcc_wrb_header wrb_header;
9543+ struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
9544+} ;
9545+
9546+
9547+struct be_rxf_filter_q_ctxt {
9548+ struct be_queue_driver_context context;
9549+ struct be_mcc_wrb_header wrb_header;
9550+ struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
9551+} ;
9552+
9553+struct be_eq_modify_delay_q_ctxt {
9554+ struct be_queue_driver_context context;
9555+ struct be_mcc_wrb_header wrb_header;
9556+ struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
9557+} ;
9558+
9559+/*
9560+ * The generic context is the largest size that would be required.
9561+ * It is the software context plus an entire WRB.
9562+ */
9563+struct be_generic_q_ctxt {
9564+ struct be_queue_driver_context context;
9565+ struct be_mcc_wrb_header wrb_header;
9566+ struct MCC_WRB_PAYLOAD_AMAP payload;
9567+} ;
9568+
9569+/*
9570+ * Types for the BE_QUEUE_CONTEXT object.
9571+ */
9572+#define BE_QUEUE_INVALID (0)
9573+#define BE_QUEUE_LINK_STATUS (0xA006)
9574+#define BE_QUEUE_ETH_STATS (0xA007)
9575+#define BE_QUEUE_TPM_STATS (0xA008)
9576+#define BE_QUEUE_TCP_STATS (0xA009)
9577+#define BE_QUEUE_MULTICAST (0xA00A)
9578+#define BE_QUEUE_VLAN (0xA00B)
9579+#define BE_QUEUE_RSS (0xA00C)
9580+#define BE_QUEUE_FORCE_FAILOVER (0xA00D)
9581+#define BE_QUEUE_PROMISCUOUS (0xA00E)
9582+#define BE_QUEUE_WAKE_ON_LAN (0xA00F)
9583+#define BE_QUEUE_NOP (0xA010)
9584+
9585+/* --- BE_FUNCTION_ENUM --- */
9586+#define BE_FUNCTION_TYPE_ISCSI (0)
9587+#define BE_FUNCTION_TYPE_NETWORK (1)
9588+#define BE_FUNCTION_TYPE_ARM (2)
9589+
9590+/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
9591+#define BE_ETH_TX_RING_TYPE_FORWARDING (1) /* Ether ring for forwarding */
9592+#define BE_ETH_TX_RING_TYPE_STANDARD (2) /* Ether ring for sending */
9593+ /* network packets. */
9594+#define BE_ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring for sending */
9595+ /* network packets, bound */
9596+ /* to a physical port. */
9597+/*
9598+ * ----------------------------------------------------------------------
9599+ * API MACROS
9600+ * ----------------------------------------------------------------------
9601+ */
9602+#define BE_FWCMD_NAME(_short_name_) struct FWCMD_##_short_name_
9603+#define BE_OPCODE_NAME(_short_name_) OPCODE_##_short_name_
9604+#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
9605+
9606+
9607+#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \
9608+ ((BE_FWCMD_NAME(_short_name_) *) \
9609+ be_function_prepare_embedded_fwcmd(_pfob_, _wrb_, \
9610+ sizeof(BE_FWCMD_NAME(_short_name_)), \
9611+ FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
9612+ FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
9613+ BE_OPCODE_NAME(_short_name_), \
9614+ BE_SUBSYSTEM_NAME(_short_name_)));
9615+
9616+#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
9617+ ((BE_FWCMD_NAME(_short_name_) *) \
9618+ be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
9619+ sizeof(BE_FWCMD_NAME(_short_name_)), \
9620+ FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
9621+ FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
9622+ BE_OPCODE_NAME(_short_name_), \
9623+ BE_SUBSYSTEM_NAME(_short_name_)));
9624+
9625+int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
9626+ u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
9627+ struct be_function_object *pfob);
9628+
9629+int be_function_object_destroy(struct be_function_object *pfob);
9630+int be_function_cleanup(struct be_function_object *pfob);
9631+
9632+
9633+int be_function_get_fw_version(struct be_function_object *pfob,
9634+ struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
9635+ mcc_wrb_cqe_callback cb, void *cb_context);
9636+
9637+
9638+int be_eq_modify_delay(struct be_function_object *pfob,
9639+ u32 num_eq, struct be_eq_object **eq_array,
9640+ u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
9641+ void *cb_context,
9642+ struct be_eq_modify_delay_q_ctxt *q_ctxt);
9643+
9644+
9645+
9646+int be_eq_create(struct be_function_object *pfob,
9647+ struct ring_desc *rd, u32 eqe_size, u32 num_entries,
9648+ u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
9649+
9650+int be_eq_destroy(struct be_eq_object *eq);
9651+
9652+int be_cq_create(struct be_function_object *pfob,
9653+ struct ring_desc *rd, u32 length,
9654+ bool solicited_eventable, bool no_delay,
9655+ u32 wm_thresh, struct be_eq_object *eq_object,
9656+ struct be_cq_object *cq_object);
9657+
9658+int be_cq_destroy(struct be_cq_object *cq);
9659+
9660+int be_mcc_ring_create(struct be_function_object *pfob,
9661+ struct ring_desc *rd, u32 length,
9662+ struct be_mcc_wrb_context *context_array,
9663+ u32 num_context_entries,
9664+ struct be_cq_object *cq, struct be_mcc_object *mcc);
9665+int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
9666+
9667+int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
9668+
9669+int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
9670+ mcc_async_event_callback cb, void *cb_context);
9671+
9672+int be_pci_soft_reset(struct be_function_object *pfob);
9673+
9674+
9675+int be_drive_POST(struct be_function_object *pfob);
9676+
9677+
9678+int be_eth_sq_create(struct be_function_object *pfob,
9679+ struct ring_desc *rd, u32 length_in_bytes,
9680+ u32 type, u32 ulp, struct be_cq_object *cq_object,
9681+ struct be_ethsq_object *eth_sq);
9682+
9683+struct be_eth_sq_parameters {
9684+ u32 port;
9685+ u32 rsvd0[2];
9686+} ;
9687+
9688+int be_eth_sq_create_ex(struct be_function_object *pfob,
9689+ struct ring_desc *rd, u32 length_in_bytes,
9690+ u32 type, u32 ulp, struct be_cq_object *cq_object,
9691+ struct be_eth_sq_parameters *ex_parameters,
9692+ struct be_ethsq_object *eth_sq);
9693+int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
9694+
9695+int be_eth_set_flow_control(struct be_function_object *pfob,
9696+ bool txfc_enable, bool rxfc_enable);
9697+
9698+int be_eth_get_flow_control(struct be_function_object *pfob,
9699+ bool *txfc_enable, bool *rxfc_enable);
9700+int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
9701+
9702+int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
9703+
9704+int be_eth_set_frame_size(struct be_function_object *pfob,
9705+ u32 *tx_frame_size, u32 *rx_frame_size);
9706+
9707+int be_eth_rq_create(struct be_function_object *pfob,
9708+ struct ring_desc *rd, struct be_cq_object *cq_object,
9709+ struct be_cq_object *bcmc_cq_object,
9710+ struct be_ethrq_object *eth_rq);
9711+
9712+int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
9713+
9714+int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
9715+ mcc_wrb_cqe_callback cb, void *cb_context);
9716+int be_eth_rq_set_frag_size(struct be_function_object *pfob,
9717+ u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
9718+int be_eth_rq_get_frag_size(struct be_function_object *pfob,
9719+ u32 *frag_size_bytes);
9720+
9721+void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
9722+ struct MCC_WRB_AMAP *wrb,
9723+ u32 payload_length, u32 request_length,
9724+ u32 response_length, u32 opcode, u32 subsystem);
9725+void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
9726+ struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
9727+ u32 payload_length, u32 request_length, u32 response_length,
9728+ u32 opcode, u32 subsystem);
9729+
9730+
9731+struct MCC_WRB_AMAP *
9732+be_function_peek_mcc_wrb(struct be_function_object *pfob);
9733+
9734+int be_rxf_mac_address_read_write(struct be_function_object *pfob,
9735+ bool port1, bool mac1, bool mgmt,
9736+ bool write, bool permanent, u8 *mac_address,
9737+ mcc_wrb_cqe_callback cb,
9738+ void *cb_context);
9739+
9740+int be_rxf_multicast_config(struct be_function_object *pfob,
9741+ bool promiscuous, u32 num, u8 *mac_table,
9742+ mcc_wrb_cqe_callback cb,
9743+ void *cb_context,
9744+ struct be_multicast_q_ctxt *q_ctxt);
9745+
9746+int be_rxf_vlan_config(struct be_function_object *pfob,
9747+ bool promiscuous, u32 num, u16 *vlan_tag_array,
9748+ mcc_wrb_cqe_callback cb, void *cb_context,
9749+ struct be_vlan_q_ctxt *q_ctxt);
9750+
9751+
9752+int be_rxf_link_status(struct be_function_object *pfob,
9753+ struct BE_LINK_STATUS *link_status,
9754+ mcc_wrb_cqe_callback cb,
9755+ void *cb_context,
9756+ struct be_link_status_q_ctxt *q_ctxt);
9757+
9758+
9759+int be_rxf_query_eth_statistics(struct be_function_object *pfob,
9760+ struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
9761+ u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
9762+ void *cb_context,
9763+ struct be_nonembedded_q_ctxt *q_ctxt);
9764+
9765+int be_rxf_promiscuous(struct be_function_object *pfob,
9766+ bool enable_port0, bool enable_port1,
9767+ mcc_wrb_cqe_callback cb, void *cb_context,
9768+ struct be_promiscuous_q_ctxt *q_ctxt);
9769+
9770+
9771+int be_rxf_filter_config(struct be_function_object *pfob,
9772+ struct NTWK_RX_FILTER_SETTINGS *settings,
9773+ mcc_wrb_cqe_callback cb,
9774+ void *cb_context,
9775+ struct be_rxf_filter_q_ctxt *q_ctxt);
9776+
9777+/*
9778+ * ------------------------------------------------------
9779+ * internal functions used by hwlib
9780+ * ------------------------------------------------------
9781+ */
9782+
9783+
9784+int be_function_ring_destroy(struct be_function_object *pfob,
9785+ u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
9786+ void *cb_context,
9787+ mcc_wrb_cqe_callback internal_cb,
9788+ void *internal_callback_context);
9789+
9790+int be_function_post_mcc_wrb(struct be_function_object *pfob,
9791+ struct MCC_WRB_AMAP *wrb,
9792+ struct be_generic_q_ctxt *q_ctxt,
9793+ mcc_wrb_cqe_callback cb, void *cb_context,
9794+ mcc_wrb_cqe_callback internal_cb,
9795+ void *internal_cb_context, void *optional_fwcmd_va,
9796+ struct be_mcc_wrb_response_copy *response_copy);
9797+
9798+int be_function_queue_mcc_wrb(struct be_function_object *pfob,
9799+ struct be_generic_q_ctxt *q_ctxt);
9800+
9801+/*
9802+ * ------------------------------------------------------
9803+ * MCC QUEUE
9804+ * ------------------------------------------------------
9805+ */
9806+
9807+int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
9808+
9809+
9810+struct MCC_WRB_AMAP *
9811+_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
9812+
9813+struct be_mcc_wrb_context *
9814+_be_mcc_allocate_wrb_context(struct be_function_object *pfob);
9815+
9816+void _be_mcc_free_wrb_context(struct be_function_object *pfob,
9817+ struct be_mcc_wrb_context *context);
9818+
9819+int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
9820+ struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
9821+
9822+int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
9823+ struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
9824+
9825+void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
9826+
9827+
9828+/*
9829+ * ------------------------------------------------------
9830+ * Ring Sizes
9831+ * ------------------------------------------------------
9832+ */
9833+static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
9834+{
9835+
9836+ ASSERT(encoding != 1); /* 1 is rsvd */
9837+ ASSERT(encoding < 16);
9838+ ASSERT(object_size > 0);
9839+
9840+ if (encoding == 0) /* 32k deep */
9841+ encoding = 16;
9842+
9843+ return (1 << (encoding - 1)) * object_size;
9844+}
9845+
9846+static inline
9847+u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
9848+{
9849+
9850+ u32 count, encoding;
9851+
9852+ ASSERT(object_size > 0);
9853+ ASSERT(length_in_bytes % object_size == 0);
9854+
9855+ count = length_in_bytes / object_size;
9856+
9857+ ASSERT(count > 1);
9858+ ASSERT(count <= 32 * 1024);
9859+ ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
9860+
9861+ encoding = __ilog2_u32(count) + 1;
9862+
9863+ if (encoding == 16)
9864+ encoding = 0; /* 32k deep */
9865+
9866+ return encoding;
9867+}
9868+
9869+void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
9870+ u32 max_num);
9871+#endif /* __hwlib_h__ */
9872--- /dev/null
9873+++ b/drivers/staging/benet/Kconfig
9874@@ -0,0 +1,7 @@
9875+config BENET
9876+ tristate "ServerEngines 10Gb NIC - BladeEngine"
9877+ depends on PCI && INET
9878+ select INET_LRO
9879+ help
9880+ This driver implements the NIC functionality for ServerEngines
9881+ 10Gb network adapter BladeEngine (EC 3210).
9882--- /dev/null
9883+++ b/drivers/staging/benet/MAINTAINERS
9884@@ -0,0 +1,6 @@
9885+SERVER ENGINES 10Gbe NIC - BLADE-ENGINE
9886+P: Subbu Seetharaman
9887+M: subbus@serverengines.com
9888+L: netdev@vger.kernel.org
9889+W: http://www.serverengines.com
9890+S: Supported
9891--- /dev/null
9892+++ b/drivers/staging/benet/Makefile
9893@@ -0,0 +1,14 @@
9894+#
9895+# Makefile to build the network driver for ServerEngine's BladeEngine
9896+#
9897+obj-$(CONFIG_BENET) += benet.o
9898+
9899+benet-y := be_init.o \
9900+ be_int.o \
9901+ be_netif.o \
9902+ be_ethtool.o \
9903+ funcobj.o \
9904+ cq.o \
9905+ eq.o \
9906+ mpu.o \
9907+ eth.o
9908--- /dev/null
9909+++ b/drivers/staging/benet/mpu.c
9910@@ -0,0 +1,1364 @@
9911+/*
9912+ * Copyright (C) 2005 - 2008 ServerEngines
9913+ * All rights reserved.
9914+ *
9915+ * This program is free software; you can redistribute it and/or
9916+ * modify it under the terms of the GNU General Public License version 2
9917+ * as published by the Free Software Foundation. The full GNU General
9918+ * Public License is included in this distribution in the file called COPYING.
9919+ *
9920+ * Contact Information:
9921+ * linux-drivers@serverengines.com
9922+ *
9923+ * ServerEngines
9924+ * 209 N. Fair Oaks Ave
9925+ * Sunnyvale, CA 94085
9926+ */
9927+#include <linux/delay.h>
9928+#include "hwlib.h"
9929+#include "bestatus.h"
9930+
9931+static
9932+inline void mp_ring_create(struct mp_ring *ring, u32 num, u32 size, void *va)
9933+{
9934+ ASSERT(ring);
9935+ memset(ring, 0, sizeof(struct mp_ring));
9936+ ring->num = num;
9937+ ring->pages = DIV_ROUND_UP(num * size, PAGE_SIZE);
9938+ ring->itemSize = size;
9939+ ring->va = va;
9940+}
9941+
9942+/*
9943+ * -----------------------------------------------------------------------
9944+ * Interface for 2 index rings. i.e. consumer/producer rings
9945+ * --------------------------------------------------------------------------
9946+ */
9947+
9948+/* Returns number items pending on ring. */
9949+static inline u32 mp_ring_num_pending(struct mp_ring *ring)
9950+{
9951+ ASSERT(ring);
9952+ if (ring->num == 0)
9953+ return 0;
9954+ return be_subc(ring->pidx, ring->cidx, ring->num);
9955+}
9956+
9957+/* Returns number items free on ring. */
9958+static inline u32 mp_ring_num_empty(struct mp_ring *ring)
9959+{
9960+ ASSERT(ring);
9961+ return ring->num - 1 - mp_ring_num_pending(ring);
9962+}
9963+
9964+/* Consume 1 item */
9965+static inline void mp_ring_consume(struct mp_ring *ring)
9966+{
9967+ ASSERT(ring);
9968+ ASSERT(ring->pidx != ring->cidx);
9969+
9970+ ring->cidx = be_addc(ring->cidx, 1, ring->num);
9971+}
9972+
9973+/* Produce 1 item */
9974+static inline void mp_ring_produce(struct mp_ring *ring)
9975+{
9976+ ASSERT(ring);
9977+ ring->pidx = be_addc(ring->pidx, 1, ring->num);
9978+}
9979+
9980+/* Consume count items */
9981+static inline void mp_ring_consume_multiple(struct mp_ring *ring, u32 count)
9982+{
9983+ ASSERT(ring);
9984+ ASSERT(mp_ring_num_pending(ring) >= count);
9985+ ring->cidx = be_addc(ring->cidx, count, ring->num);
9986+}
9987+
9988+static inline void *mp_ring_item(struct mp_ring *ring, u32 index)
9989+{
9990+ ASSERT(ring);
9991+ ASSERT(index < ring->num);
9992+ ASSERT(ring->itemSize > 0);
9993+ return (u8 *) ring->va + index * ring->itemSize;
9994+}
9995+
9996+/* Ptr to produce item */
9997+static inline void *mp_ring_producer_ptr(struct mp_ring *ring)
9998+{
9999+ ASSERT(ring);
10000+ return mp_ring_item(ring, ring->pidx);
10001+}
10002+
10003+/*
10004+ * Returns a pointer to the current location in the ring.
10005+ * This is used for rings with 1 index.
10006+ */
10007+static inline void *mp_ring_current(struct mp_ring *ring)
10008+{
10009+ ASSERT(ring);
10010+ ASSERT(ring->pidx == 0); /* not used */
10011+
10012+ return mp_ring_item(ring, ring->cidx);
10013+}
10014+
10015+/*
10016+ * Increment index for rings with only 1 index.
10017+ * This is used for rings with 1 index.
10018+ */
10019+static inline void *mp_ring_next(struct mp_ring *ring)
10020+{
10021+ ASSERT(ring);
10022+ ASSERT(ring->num > 0);
10023+ ASSERT(ring->pidx == 0); /* not used */
10024+
10025+ ring->cidx = be_addc(ring->cidx, 1, ring->num);
10026+ return mp_ring_current(ring);
10027+}
10028+
10029+/*
10030+ This routine waits for a previously posted mailbox WRB to be completed.
10031+ Specifically it waits for the mailbox to say that it's ready to accept
10032+ more data by setting the LSB of the mailbox pd register to 1.
10033+
10034+ pcontroller - The function object to post this data to
10035+
10036+ IRQL < DISPATCH_LEVEL
10037+*/
10038+static void be_mcc_mailbox_wait(struct be_function_object *pfob)
10039+{
10040+ struct MPU_MAILBOX_DB_AMAP mailbox_db;
10041+ u32 i = 0;
10042+ u32 ready;
10043+
10044+ if (pfob->emulate) {
10045+ /* No waiting for mailbox in emulated mode. */
10046+ return;
10047+ }
10048+
10049+ mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
10050+ ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
10051+
10052+ while (ready == false) {
10053+ if ((++i & 0x3FFFF) == 0) {
10054+ TRACE(DL_WARN, "Waiting for mailbox ready - %dk polls",
10055+ i / 1000);
10056+ }
10057+ udelay(1);
10058+ mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
10059+ ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
10060+ }
10061+}
10062+
10063+/*
10064+ This routine tells the MCC mailbox that there is data to processed
10065+ in the mailbox. It does this by setting the physical address for the
10066+ mailbox location and clearing the LSB. This routine returns immediately
10067+ and does not wait for the WRB to be processed.
10068+
10069+ pcontroller - The function object to post this data to
10070+
10071+ IRQL < DISPATCH_LEVEL
10072+
10073+*/
10074+static void be_mcc_mailbox_notify(struct be_function_object *pfob)
10075+{
10076+ struct MPU_MAILBOX_DB_AMAP mailbox_db;
10077+ u32 pa;
10078+
10079+ ASSERT(pfob->mailbox.pa);
10080+ ASSERT(pfob->mailbox.va);
10081+
10082+ /* If emulated, do not ring the mailbox */
10083+ if (pfob->emulate) {
10084+ TRACE(DL_WARN, "MPU disabled. Skipping mailbox notify.");
10085+ return;
10086+ }
10087+
10088+ /* form the higher bits in the address */
10089+ mailbox_db.dw[0] = 0; /* init */
10090+ AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 1);
10091+ AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
10092+
10093+ /* bits 34 to 63 */
10094+ pa = (u32) (pfob->mailbox.pa >> 34);
10095+ AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
10096+
10097+ /* Wait for the MPU to be ready */
10098+ be_mcc_mailbox_wait(pfob);
10099+
10100+ /* Ring doorbell 1st time */
10101+ PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
10102+
10103+ /* Wait for 1st write to be acknowledged. */
10104+ be_mcc_mailbox_wait(pfob);
10105+
10106+ /* lower bits 30 bits from 4th bit (bits 4 to 33)*/
10107+ pa = (u32) (pfob->mailbox.pa >> 4) & 0x3FFFFFFF;
10108+
10109+ AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 0);
10110+ AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
10111+ AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
10112+
10113+ /* Ring doorbell 2nd time */
10114+ PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
10115+}
10116+
10117+/*
10118+ This routine tells the MCC mailbox that there is data to processed
10119+ in the mailbox. It does this by setting the physical address for the
10120+ mailbox location and clearing the LSB. This routine spins until the
10121+ MPU writes a 1 into the LSB indicating that the data has been received
10122+ and is ready to be processed.
10123+
10124+ pcontroller - The function object to post this data to
10125+
10126+ IRQL < DISPATCH_LEVEL
10127+*/
10128+static void
10129+be_mcc_mailbox_notify_and_wait(struct be_function_object *pfob)
10130+{
10131+ /*
10132+ * Notify it
10133+ */
10134+ be_mcc_mailbox_notify(pfob);
10135+ /*
10136+ * Now wait for completion of WRB
10137+ */
10138+ be_mcc_mailbox_wait(pfob);
10139+}
10140+
10141+void
10142+be_mcc_process_cqe(struct be_function_object *pfob,
10143+ struct MCC_CQ_ENTRY_AMAP *cqe)
10144+{
10145+ struct be_mcc_wrb_context *wrb_context = NULL;
10146+ u32 offset, status;
10147+ u8 *p;
10148+
10149+ ASSERT(cqe);
10150+ /*
10151+ * A command completed. Commands complete out-of-order.
10152+ * Determine which command completed from the TAG.
10153+ */
10154+ offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
10155+ p = (u8 *) cqe + offset;
10156+ wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
10157+ ASSERT(wrb_context);
10158+
10159+ /*
10160+ * Perform a response copy if requested.
10161+ * Only copy data if the FWCMD is successful.
10162+ */
10163+ status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, cqe);
10164+ if (status == MGMT_STATUS_SUCCESS && wrb_context->copy.length > 0) {
10165+ ASSERT(wrb_context->wrb);
10166+ ASSERT(wrb_context->copy.va);
10167+ p = (u8 *)wrb_context->wrb +
10168+ offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
10169+ memcpy(wrb_context->copy.va,
10170+ (u8 *)p + wrb_context->copy.fwcmd_offset,
10171+ wrb_context->copy.length);
10172+ }
10173+
10174+ if (status)
10175+ status = BE_NOT_OK;
10176+ /* internal callback */
10177+ if (wrb_context->internal_cb) {
10178+ wrb_context->internal_cb(wrb_context->internal_cb_context,
10179+ status, wrb_context->wrb);
10180+ }
10181+
10182+ /* callback */
10183+ if (wrb_context->cb) {
10184+ wrb_context->cb(wrb_context->cb_context,
10185+ status, wrb_context->wrb);
10186+ }
10187+ /* Free the context structure */
10188+ _be_mcc_free_wrb_context(pfob, wrb_context);
10189+}
10190+
10191+void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc)
10192+{
10193+ struct be_function_object *pfob = NULL;
10194+ int status = BE_PENDING;
10195+ struct be_generic_q_ctxt *q_ctxt;
10196+ struct MCC_WRB_AMAP *wrb;
10197+ struct MCC_WRB_AMAP *queue_wrb;
10198+ u32 length, payload_length, sge_count, embedded;
10199+ unsigned long irql;
10200+
10201+ BUILD_BUG_ON((sizeof(struct be_generic_q_ctxt) <
10202+ sizeof(struct be_queue_driver_context) +
10203+ sizeof(struct MCC_WRB_AMAP)));
10204+ pfob = mcc->parent_function;
10205+
10206+ spin_lock_irqsave(&pfob->post_lock, irql);
10207+
10208+ if (mcc->driving_backlog) {
10209+ spin_unlock_irqrestore(&pfob->post_lock, irql);
10210+ if (pfob->pend_queue_driving && pfob->mcc) {
10211+ pfob->pend_queue_driving = 0;
10212+ be_drive_mcc_wrb_queue(pfob->mcc);
10213+ }
10214+ return;
10215+ }
10216+ /* Acquire the flag to limit 1 thread to redrive posts. */
10217+ mcc->driving_backlog = 1;
10218+
10219+ while (!list_empty(&mcc->backlog)) {
10220+ wrb = _be_mpu_peek_ring_wrb(mcc, true); /* Driving the queue */
10221+ if (!wrb)
10222+ break; /* No space in the ring yet. */
10223+ /* Get the next queued entry to process. */
10224+ q_ctxt = list_first_entry(&mcc->backlog,
10225+ struct be_generic_q_ctxt, context.list);
10226+ list_del(&q_ctxt->context.list);
10227+ pfob->mcc->backlog_length--;
10228+ /*
10229+ * Compute the required length of the WRB.
10230+ * Since the queue element may be smaller than
10231+ * the complete WRB, copy only the required number of bytes.
10232+ */
10233+ queue_wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
10234+ embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, queue_wrb);
10235+ if (embedded) {
10236+ payload_length = AMAP_GET_BITS_PTR(MCC_WRB,
10237+ payload_length, queue_wrb);
10238+ length = sizeof(struct be_mcc_wrb_header) +
10239+ payload_length;
10240+ } else {
10241+ sge_count = AMAP_GET_BITS_PTR(MCC_WRB, sge_count,
10242+ queue_wrb);
10243+ ASSERT(sge_count == 1); /* only 1 frag. */
10244+ length = sizeof(struct be_mcc_wrb_header) +
10245+ sge_count * sizeof(struct MCC_SGE_AMAP);
10246+ }
10247+
10248+ /*
10249+ * Truncate the length based on the size of the
10250+ * queue element. Some elements that have output parameters
10251+ * can be smaller than the payload_length field would
10252+ * indicate. We really only need to copy the request
10253+ * parameters, not the response.
10254+ */
10255+ length = min(length, (u32) (q_ctxt->context.bytes -
10256+ offsetof(struct be_generic_q_ctxt, wrb_header)));
10257+
10258+ /* Copy the queue element WRB into the ring. */
10259+ memcpy(wrb, &q_ctxt->wrb_header, length);
10260+
10261+ /* Post the wrb. This should not fail assuming we have
10262+ * enough context structs. */
10263+ status = be_function_post_mcc_wrb(pfob, wrb, NULL,
10264+ q_ctxt->context.cb, q_ctxt->context.cb_context,
10265+ q_ctxt->context.internal_cb,
10266+ q_ctxt->context.internal_cb_context,
10267+ q_ctxt->context.optional_fwcmd_va,
10268+ &q_ctxt->context.copy);
10269+
10270+ if (status == BE_SUCCESS) {
10271+ /*
10272+ * Synchronous completion. Since it was queued,
10273+ * we will invoke the callback.
10274+ * To the user, this is an asynchronous request.
10275+ */
10276+ spin_unlock_irqrestore(&pfob->post_lock, irql);
10277+ if (pfob->pend_queue_driving && pfob->mcc) {
10278+ pfob->pend_queue_driving = 0;
10279+ be_drive_mcc_wrb_queue(pfob->mcc);
10280+ }
10281+
10282+ ASSERT(q_ctxt->context.cb);
10283+
10284+ q_ctxt->context.cb(
10285+ q_ctxt->context.cb_context,
10286+ BE_SUCCESS, NULL);
10287+
10288+ spin_lock_irqsave(&pfob->post_lock, irql);
10289+
10290+ } else if (status != BE_PENDING) {
10291+ /*
10292+ * Another resource failed. Should never happen
10293+ * if we have sufficient MCC_WRB_CONTEXT structs.
10294+ * Return to head of the queue.
10295+ */
10296+ TRACE(DL_WARN, "Failed to post a queued WRB. 0x%x",
10297+ status);
10298+ list_add(&q_ctxt->context.list, &mcc->backlog);
10299+ pfob->mcc->backlog_length++;
10300+ break;
10301+ }
10302+ }
10303+
10304+ /* Free the flag to limit 1 thread to redrive posts. */
10305+ mcc->driving_backlog = 0;
10306+ spin_unlock_irqrestore(&pfob->post_lock, irql);
10307+}
10308+
10309+/* This function asserts that the WRB was consumed in order. */
10310+#ifdef BE_DEBUG
10311+u32 be_mcc_wrb_consumed_in_order(struct be_mcc_object *mcc,
10312+ struct MCC_CQ_ENTRY_AMAP *cqe)
10313+{
10314+ struct be_mcc_wrb_context *wrb_context = NULL;
10315+ u32 wrb_index;
10316+ u32 wrb_consumed_in_order;
10317+ u32 offset;
10318+ u8 *p;
10319+
10320+ ASSERT(cqe);
10321+ /*
10322+ * A command completed. Commands complete out-of-order.
10323+ * Determine which command completed from the TAG.
10324+ */
10325+ offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
10326+ p = (u8 *) cqe + offset;
10327+ wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
10328+
10329+ ASSERT(wrb_context);
10330+
10331+ wrb_index = (u32) (((u64)(size_t)wrb_context->ring_wrb -
10332+ (u64)(size_t)mcc->sq.ring.va) / sizeof(struct MCC_WRB_AMAP));
10333+
10334+ ASSERT(wrb_index < mcc->sq.ring.num);
10335+
10336+ wrb_consumed_in_order = (u32) (wrb_index == mcc->consumed_index);
10337+ mcc->consumed_index = be_addc(mcc->consumed_index, 1, mcc->sq.ring.num);
10338+ return wrb_consumed_in_order;
10339+}
10340+#endif
10341+
10342+int be_mcc_process_cq(struct be_mcc_object *mcc, bool rearm)
10343+{
10344+ struct be_function_object *pfob = NULL;
10345+ struct MCC_CQ_ENTRY_AMAP *cqe;
10346+ struct CQ_DB_AMAP db;
10347+ struct mp_ring *cq_ring = &mcc->cq.ring;
10348+ struct mp_ring *mp_ring = &mcc->sq.ring;
10349+ u32 num_processed = 0;
10350+ u32 consumed = 0, valid, completed, cqe_consumed, async_event;
10351+
10352+ pfob = mcc->parent_function;
10353+
10354+ spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
10355+
10356+ /*
10357+ * Verify that only one thread is processing the CQ at once.
10358+ * We cannot hold the lock while processing the CQ due to
10359+ * the callbacks into the OS. Therefore, this flag is used
10360+ * to control it. If any of the threads want to
10361+ * rearm the CQ, we need to honor that.
10362+ */
10363+ if (mcc->processing != 0) {
10364+ mcc->rearm = mcc->rearm || rearm;
10365+ goto Error;
10366+ } else {
10367+ mcc->processing = 1; /* lock processing for this thread. */
10368+ mcc->rearm = rearm; /* set our rearm setting */
10369+ }
10370+
10371+ spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
10372+
10373+ cqe = mp_ring_current(cq_ring);
10374+ valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
10375+ while (valid) {
10376+
10377+ if (num_processed >= 8) {
10378+ /* coalesce doorbells, but free space in cq
10379+ * ring while processing. */
10380+ db.dw[0] = 0; /* clear */
10381+ AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
10382+ AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, false);
10383+ AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
10384+ AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db,
10385+ num_processed);
10386+ num_processed = 0;
10387+
10388+ PD_WRITE(pfob, cq_db, db.dw[0]);
10389+ }
10390+
10391+ async_event = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, async_event, cqe);
10392+ if (async_event) {
10393+ /* This is an asynchronous event. */
10394+ struct ASYNC_EVENT_TRAILER_AMAP *async_trailer =
10395+ (struct ASYNC_EVENT_TRAILER_AMAP *)
10396+ ((u8 *) cqe + sizeof(struct MCC_CQ_ENTRY_AMAP) -
10397+ sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
10398+ u32 event_code;
10399+ async_event = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
10400+ async_event, async_trailer);
10401+ ASSERT(async_event == 1);
10402+
10403+
10404+ valid = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
10405+ valid, async_trailer);
10406+ ASSERT(valid == 1);
10407+
10408+ /* Call the async event handler if it is installed. */
10409+ if (mcc->async_cb) {
10410+ event_code =
10411+ AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
10412+ event_code, async_trailer);
10413+ mcc->async_cb(mcc->async_context,
10414+ (u32) event_code, (void *) cqe);
10415+ }
10416+
10417+ } else {
10418+ /* This is a completion entry. */
10419+
10420+ /* No vm forwarding in this driver. */
10421+
10422+ cqe_consumed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
10423+ consumed, cqe);
10424+ if (cqe_consumed) {
10425+ /*
10426+ * A command on the MCC ring was consumed.
10427+ * Update the consumer index.
10428+ * These occur in order.
10429+ */
10430+ ASSERT(be_mcc_wrb_consumed_in_order(mcc, cqe));
10431+ consumed++;
10432+ }
10433+
10434+ completed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
10435+ completed, cqe);
10436+ if (completed) {
10437+ /* A command completed. Use tag to
10438+ * determine which command. */
10439+ be_mcc_process_cqe(pfob, cqe);
10440+ }
10441+ }
10442+
10443+ /* Reset the CQE */
10444+ AMAP_SET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe, false);
10445+ num_processed++;
10446+
10447+ /* Update our tracking for the CQ ring. */
10448+ cqe = mp_ring_next(cq_ring);
10449+ valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
10450+ }
10451+
10452+ TRACE(DL_INFO, "num_processed:0x%x, and consumed:0x%x",
10453+ num_processed, consumed);
10454+ /*
10455+ * Grab the CQ lock to synchronize the "rearm" setting for
10456+ * the doorbell, and for clearing the "processing" flag.
10457+ */
10458+ spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
10459+
10460+ /*
10461+ * Rearm the cq. This is done based on the global mcc->rearm
10462+ * flag which combines the rearm parameter from the current
10463+ * call to process_cq and any other threads
10464+ * that tried to process the CQ while this one was active.
10465+ * This handles the situation where a sync. fwcmd was processing
10466+ * the CQ while the interrupt/dpc tries to process it.
10467+ * The sync process gets to continue -- but it is now
10468+ * responsible for the rearming.
10469+ */
10470+ if (num_processed > 0 || mcc->rearm == true) {
10471+ db.dw[0] = 0; /* clear */
10472+ AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
10473+ AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, mcc->rearm);
10474+ AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
10475+ AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, num_processed);
10476+
10477+ PD_WRITE(pfob, cq_db, db.dw[0]);
10478+ }
10479+ /*
10480+ * Update the consumer index after ringing the CQ doorbell.
10481+ * We don't want another thread to post more WRBs before we
10482+ * have CQ space available.
10483+ */
10484+ mp_ring_consume_multiple(mp_ring, consumed);
10485+
10486+ /* Clear the processing flag. */
10487+ mcc->processing = 0;
10488+
10489+Error:
10490+ spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
10491+ /*
10492+ * Use the local variable to detect if the current thread
10493+ * holds the WRB post lock. If rearm is false, this is
10494+ * either a synchronous command, or the upper layer driver is polling
10495+ * from a thread. We do not drive the queue from that
10496+ * context since the driver may hold the
10497+ * wrb post lock already.
10498+ */
10499+ if (rearm)
10500+ be_drive_mcc_wrb_queue(mcc);
10501+ else
10502+ pfob->pend_queue_driving = 1;
10503+
10504+ return BE_SUCCESS;
10505+}
10506+
10507+/*
10508+ *============================================================================
10509+ * P U B L I C R O U T I N E S
10510+ *============================================================================
10511+ */
10512+
10513+/*
10514+ This routine creates an MCC object. This object contains an MCC send queue
10515+ and a CQ private to the MCC.
10516+
10517+ pcontroller - Handle to a function object
10518+
10519+ EqObject - EQ object that will be used to dispatch this MCC
10520+
10521+ ppMccObject - Pointer to an internal Mcc Object returned.
10522+
10523+ Returns BE_SUCCESS if successfull,, otherwise a useful error code
10524+ is returned.
10525+
10526+ IRQL < DISPATCH_LEVEL
10527+
10528+*/
10529+int
10530+be_mcc_ring_create(struct be_function_object *pfob,
10531+ struct ring_desc *rd, u32 length,
10532+ struct be_mcc_wrb_context *context_array,
10533+ u32 num_context_entries,
10534+ struct be_cq_object *cq, struct be_mcc_object *mcc)
10535+{
10536+ int status = 0;
10537+
10538+ struct FWCMD_COMMON_MCC_CREATE *fwcmd = NULL;
10539+ struct MCC_WRB_AMAP *wrb = NULL;
10540+ u32 num_entries_encoded, n, i;
10541+ void *va = NULL;
10542+ unsigned long irql;
10543+
10544+ if (length < sizeof(struct MCC_WRB_AMAP) * 2) {
10545+ TRACE(DL_ERR, "Invalid MCC ring length:%d", length);
10546+ return BE_NOT_OK;
10547+ }
10548+ /*
10549+ * Reduce the actual ring size to be less than the number
10550+ * of context entries. This ensures that we run out of
10551+ * ring WRBs first so the queuing works correctly. We never
10552+ * queue based on context structs.
10553+ */
10554+ if (num_context_entries + 1 <
10555+ length / sizeof(struct MCC_WRB_AMAP) - 1) {
10556+
10557+ u32 max_length =
10558+ (num_context_entries + 2) * sizeof(struct MCC_WRB_AMAP);
10559+
10560+ if (is_power_of_2(max_length))
10561+ length = __roundup_pow_of_two(max_length+1) / 2;
10562+ else
10563+ length = __roundup_pow_of_two(max_length) / 2;
10564+
10565+ ASSERT(length <= max_length);
10566+
10567+ TRACE(DL_WARN,
10568+ "MCC ring length reduced based on context entries."
10569+ " length:%d wrbs:%d context_entries:%d", length,
10570+ (int) (length / sizeof(struct MCC_WRB_AMAP)),
10571+ num_context_entries);
10572+ }
10573+
10574+ spin_lock_irqsave(&pfob->post_lock, irql);
10575+
10576+ num_entries_encoded =
10577+ be_ring_length_to_encoding(length, sizeof(struct MCC_WRB_AMAP));
10578+
10579+ /* Init MCC object. */
10580+ memset(mcc, 0, sizeof(*mcc));
10581+ mcc->parent_function = pfob;
10582+ mcc->cq_object = cq;
10583+
10584+ INIT_LIST_HEAD(&mcc->backlog);
10585+
10586+ wrb = be_function_peek_mcc_wrb(pfob);
10587+ if (!wrb) {
10588+ ASSERT(wrb);
10589+ TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
10590+ status = BE_STATUS_NO_MCC_WRB;
10591+ goto error;
10592+ }
10593+ /* Prepares an embedded fwcmd, including request/response sizes. */
10594+ fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MCC_CREATE);
10595+
10596+ fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
10597+ /*
10598+ * Program MCC ring context
10599+ */
10600+ AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, pdid,
10601+ &fwcmd->params.request.context, 0);
10602+ AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, invalid,
10603+ &fwcmd->params.request.context, false);
10604+ AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, ring_size,
10605+ &fwcmd->params.request.context, num_entries_encoded);
10606+
10607+ n = cq->cq_id;
10608+ AMAP_SET_BITS_PTR(MCC_RING_CONTEXT,
10609+ cq_id, &fwcmd->params.request.context, n);
10610+ be_rd_to_pa_list(rd, fwcmd->params.request.pages,
10611+ ARRAY_SIZE(fwcmd->params.request.pages));
10612+ /* Post the f/w command */
10613+ status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
10614+ NULL, NULL, fwcmd, NULL);
10615+ if (status != BE_SUCCESS) {
10616+ TRACE(DL_ERR, "MCC to create CQ failed.");
10617+ goto error;
10618+ }
10619+ /*
10620+ * Create a linked list of context structures
10621+ */
10622+ mcc->wrb_context.base = context_array;
10623+ mcc->wrb_context.num = num_context_entries;
10624+ INIT_LIST_HEAD(&mcc->wrb_context.list_head);
10625+ memset(context_array, 0,
10626+ sizeof(struct be_mcc_wrb_context) * num_context_entries);
10627+ for (i = 0; i < mcc->wrb_context.num; i++) {
10628+ list_add_tail(&context_array[i].next,
10629+ &mcc->wrb_context.list_head);
10630+ }
10631+
10632+ /*
10633+ *
10634+ * Create an mcc_ring for tracking WRB hw ring
10635+ */
10636+ va = rd->va;
10637+ ASSERT(va);
10638+ mp_ring_create(&mcc->sq.ring, length / sizeof(struct MCC_WRB_AMAP),
10639+ sizeof(struct MCC_WRB_AMAP), va);
10640+ mcc->sq.ring.id = fwcmd->params.response.id;
10641+ /*
10642+ * Init a mcc_ring for tracking the MCC CQ.
10643+ */
10644+ ASSERT(cq->va);
10645+ mp_ring_create(&mcc->cq.ring, cq->num_entries,
10646+ sizeof(struct MCC_CQ_ENTRY_AMAP), cq->va);
10647+ mcc->cq.ring.id = cq->cq_id;
10648+
10649+ /* Force zeroing of CQ. */
10650+ memset(cq->va, 0, cq->num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP));
10651+
10652+ /* Initialize debug index. */
10653+ mcc->consumed_index = 0;
10654+
10655+ atomic_inc(&cq->ref_count);
10656+ pfob->mcc = mcc;
10657+
10658+ TRACE(DL_INFO, "MCC ring created. id:%d bytes:%d cq_id:%d cq_entries:%d"
10659+ " num_context:%d", mcc->sq.ring.id, length,
10660+ cq->cq_id, cq->num_entries, num_context_entries);
10661+
10662+error:
10663+ spin_unlock_irqrestore(&pfob->post_lock, irql);
10664+ if (pfob->pend_queue_driving && pfob->mcc) {
10665+ pfob->pend_queue_driving = 0;
10666+ be_drive_mcc_wrb_queue(pfob->mcc);
10667+ }
10668+ return status;
10669+}
10670+
10671+/*
10672+ This routine destroys an MCC send queue
10673+
10674+ MccObject - Internal Mcc Object to be destroyed.
10675+
10676+ Returns BE_SUCCESS if successfull, otherwise an error code is returned.
10677+
10678+ IRQL < DISPATCH_LEVEL
10679+
10680+ The caller of this routine must ensure that no other WRB may be posted
10681+ until this routine returns.
10682+
10683+*/
10684+int be_mcc_ring_destroy(struct be_mcc_object *mcc)
10685+{
10686+ int status = 0;
10687+ struct be_function_object *pfob = mcc->parent_function;
10688+
10689+
10690+ ASSERT(mcc->processing == 0);
10691+
10692+ /*
10693+ * Remove the ring from the function object.
10694+ * This transitions back to mailbox mode.
10695+ */
10696+ pfob->mcc = NULL;
10697+
10698+ /* Send fwcmd to destroy the queue. (Using the mailbox.) */
10699+ status = be_function_ring_destroy(mcc->parent_function, mcc->sq.ring.id,
10700+ FWCMD_RING_TYPE_MCC, NULL, NULL, NULL, NULL);
10701+ ASSERT(status == 0);
10702+
10703+ /* Release the SQ reference to the CQ */
10704+ atomic_dec(&mcc->cq_object->ref_count);
10705+
10706+ return status;
10707+}
10708+
10709+static void
10710+mcc_wrb_sync_cb(void *context, int staus, struct MCC_WRB_AMAP *wrb)
10711+{
10712+ struct be_mcc_wrb_context *wrb_context =
10713+ (struct be_mcc_wrb_context *) context;
10714+ ASSERT(wrb_context);
10715+ *wrb_context->users_final_status = staus;
10716+}
10717+
10718+/*
10719+ This routine posts a command to the MCC send queue
10720+
10721+ mcc - Internal Mcc Object to be destroyed.
10722+
10723+ wrb - wrb to post.
10724+
10725+ Returns BE_SUCCESS if successfull, otherwise an error code is returned.
10726+
10727+ IRQL < DISPATCH_LEVEL if CompletionCallback is not NULL
10728+ IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
10729+
10730+ If this routine is called with CompletionCallback != NULL the
10731+ call is considered to be asynchronous and will return as soon
10732+ as the WRB is posted to the MCC with BE_PENDING.
10733+
10734+ If CompletionCallback is NULL, then this routine will not return until
10735+ a completion for this MCC command has been processed.
10736+ If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
10737+
10738+ This routine should only be called if the MPU has been boostraped past
10739+ mailbox mode.
10740+
10741+
10742+*/
10743+int
10744+_be_mpu_post_wrb_ring(struct be_mcc_object *mcc, struct MCC_WRB_AMAP *wrb,
10745+ struct be_mcc_wrb_context *wrb_context)
10746+{
10747+
10748+ struct MCC_WRB_AMAP *ring_wrb = NULL;
10749+ int status = BE_PENDING;
10750+ int final_status = BE_PENDING;
10751+ mcc_wrb_cqe_callback cb = NULL;
10752+ struct MCC_DB_AMAP mcc_db;
10753+ u32 embedded;
10754+
10755+ ASSERT(mp_ring_num_empty(&mcc->sq.ring) > 0);
10756+ /*
10757+ * Input wrb is most likely the next wrb in the ring, since the client
10758+ * can peek at the address.
10759+ */
10760+ ring_wrb = mp_ring_producer_ptr(&mcc->sq.ring);
10761+ if (wrb != ring_wrb) {
10762+ /* If not equal, copy it into the ring. */
10763+ memcpy(ring_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
10764+ }
10765+#ifdef BE_DEBUG
10766+ wrb_context->ring_wrb = ring_wrb;
10767+#endif
10768+ embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, ring_wrb);
10769+ if (embedded) {
10770+ /* embedded commands will have the response within the WRB. */
10771+ wrb_context->wrb = ring_wrb;
10772+ } else {
10773+ /*
10774+ * non-embedded commands will not have the response
10775+ * within the WRB, and they may complete out-of-order.
10776+ * The WRB will not be valid to inspect
10777+ * during the completion.
10778+ */
10779+ wrb_context->wrb = NULL;
10780+ }
10781+ cb = wrb_context->cb;
10782+
10783+ if (cb == NULL) {
10784+ /* Assign our internal callback if this is a
10785+ * synchronous call. */
10786+ wrb_context->cb = mcc_wrb_sync_cb;
10787+ wrb_context->cb_context = wrb_context;
10788+ wrb_context->users_final_status = &final_status;
10789+ }
10790+ /* Increment producer index */
10791+
10792+ mcc_db.dw[0] = 0; /* initialize */
10793+ AMAP_SET_BITS_PTR(MCC_DB, rid, &mcc_db, mcc->sq.ring.id);
10794+ AMAP_SET_BITS_PTR(MCC_DB, numPosted, &mcc_db, 1);
10795+
10796+ mp_ring_produce(&mcc->sq.ring);
10797+ PD_WRITE(mcc->parent_function, mpu_mcc_db, mcc_db.dw[0]);
10798+ TRACE(DL_INFO, "pidx: %x and cidx: %x.", mcc->sq.ring.pidx,
10799+ mcc->sq.ring.cidx);
10800+
10801+ if (cb == NULL) {
10802+ int polls = 0; /* At >= 1 us per poll */
10803+ /* Wait until this command completes, polling the CQ. */
10804+ do {
10805+ TRACE(DL_INFO, "FWCMD submitted in the poll mode.");
10806+ /* Do not rearm CQ in this context. */
10807+ be_mcc_process_cq(mcc, false);
10808+
10809+ if (final_status == BE_PENDING) {
10810+ if ((++polls & 0x7FFFF) == 0) {
10811+ TRACE(DL_WARN,
10812+ "Warning : polling MCC CQ for %d"
10813+ "ms.", polls / 1000);
10814+ }
10815+
10816+ udelay(1);
10817+ }
10818+
10819+ /* final_status changed when the command completes */
10820+ } while (final_status == BE_PENDING);
10821+
10822+ status = final_status;
10823+ }
10824+
10825+ return status;
10826+}
10827+
10828+struct MCC_WRB_AMAP *
10829+_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue)
10830+{
10831+ /* If we have queued items, do not allow a post to bypass the queue. */
10832+ if (!driving_queue && !list_empty(&mcc->backlog))
10833+ return NULL;
10834+
10835+ if (mp_ring_num_empty(&mcc->sq.ring) <= 0)
10836+ return NULL;
10837+ return (struct MCC_WRB_AMAP *) mp_ring_producer_ptr(&mcc->sq.ring);
10838+}
10839+
10840+int
10841+be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *mailbox)
10842+{
10843+ ASSERT(mailbox);
10844+ pfob->mailbox.va = mailbox->va;
10845+ pfob->mailbox.pa = cpu_to_le64(mailbox->pa);
10846+ pfob->mailbox.length = mailbox->length;
10847+
10848+ ASSERT(((u32)(size_t)pfob->mailbox.va & 0xf) == 0);
10849+ ASSERT(((u32)(size_t)pfob->mailbox.pa & 0xf) == 0);
10850+ /*
10851+ * Issue the WRB to set MPU endianness
10852+ */
10853+ {
10854+ u64 *endian_check = (u64 *) (pfob->mailbox.va +
10855+ offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8);
10856+ *endian_check = 0xFF1234FFFF5678FFULL;
10857+ }
10858+
10859+ be_mcc_mailbox_notify_and_wait(pfob);
10860+
10861+ return BE_SUCCESS;
10862+}
10863+
10864+
10865+/*
10866+ This routine posts a command to the MCC mailbox.
10867+
10868+ FuncObj - Function Object to post the WRB on behalf of.
10869+ wrb - wrb to post.
10870+ CompletionCallback - Address of a callback routine to invoke once the WRB
10871+ is completed.
10872+ CompletionCallbackContext - Opaque context to be passed during the call to
10873+ the CompletionCallback.
10874+ Returns BE_SUCCESS if successfull, otherwise an error code is returned.
10875+
10876+ IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
10877+
10878+ This routine will block until a completion for this MCC command has been
10879+ processed. If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
10880+
10881+ This routine should only be called if the MPU has not been boostraped past
10882+ mailbox mode.
10883+*/
10884+int
10885+_be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
10886+ struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context)
10887+{
10888+ struct MCC_MAILBOX_AMAP *mailbox = NULL;
10889+ struct MCC_WRB_AMAP *mb_wrb;
10890+ struct MCC_CQ_ENTRY_AMAP *mb_cq;
10891+ u32 offset, status;
10892+
10893+ ASSERT(pfob->mcc == NULL);
10894+ mailbox = pfob->mailbox.va;
10895+ ASSERT(mailbox);
10896+
10897+ offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
10898+ mb_wrb = (struct MCC_WRB_AMAP *) (u8 *)mailbox + offset;
10899+ if (mb_wrb != wrb) {
10900+ memset(mailbox, 0, sizeof(*mailbox));
10901+ memcpy(mb_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
10902+ }
10903+ /* The callback can inspect the final WRB to get output parameters. */
10904+ wrb_context->wrb = mb_wrb;
10905+
10906+ be_mcc_mailbox_notify_and_wait(pfob);
10907+
10908+ /* A command completed. Use tag to determine which command. */
10909+ offset = offsetof(struct BE_MCC_MAILBOX_AMAP, cq)/8;
10910+ mb_cq = (struct MCC_CQ_ENTRY_AMAP *) ((u8 *)mailbox + offset);
10911+ be_mcc_process_cqe(pfob, mb_cq);
10912+
10913+ status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, mb_cq);
10914+ if (status)
10915+ status = BE_NOT_OK;
10916+ return status;
10917+}
10918+
10919+struct be_mcc_wrb_context *
10920+_be_mcc_allocate_wrb_context(struct be_function_object *pfob)
10921+{
10922+ struct be_mcc_wrb_context *context = NULL;
10923+ unsigned long irq;
10924+
10925+ spin_lock_irqsave(&pfob->mcc_context_lock, irq);
10926+
10927+ if (!pfob->mailbox.default_context_allocated) {
10928+ /* Use the single default context that we
10929+ * always have allocated. */
10930+ pfob->mailbox.default_context_allocated = true;
10931+ context = &pfob->mailbox.default_context;
10932+ } else if (pfob->mcc) {
10933+ /* Get a context from the free list. If any are available. */
10934+ if (!list_empty(&pfob->mcc->wrb_context.list_head)) {
10935+ context = list_first_entry(
10936+ &pfob->mcc->wrb_context.list_head,
10937+ struct be_mcc_wrb_context, next);
10938+ }
10939+ }
10940+
10941+ spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
10942+
10943+ return context;
10944+}
10945+
10946+void
10947+_be_mcc_free_wrb_context(struct be_function_object *pfob,
10948+ struct be_mcc_wrb_context *context)
10949+{
10950+ unsigned long irq;
10951+
10952+ ASSERT(context);
10953+ /*
10954+ * Zero during free to try and catch any bugs where the context
10955+ * is accessed after a free.
10956+ */
10957+ memset(context, 0, sizeof(context));
10958+
10959+ spin_lock_irqsave(&pfob->mcc_context_lock, irq);
10960+
10961+ if (context == &pfob->mailbox.default_context) {
10962+ /* Free the default context. */
10963+ ASSERT(pfob->mailbox.default_context_allocated);
10964+ pfob->mailbox.default_context_allocated = false;
10965+ } else {
10966+ /* Add to free list. */
10967+ ASSERT(pfob->mcc);
10968+ list_add_tail(&context->next,
10969+ &pfob->mcc->wrb_context.list_head);
10970+ }
10971+
10972+ spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
10973+}
10974+
10975+int
10976+be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
10977+ mcc_async_event_callback cb, void *cb_context)
10978+{
10979+ /* Lock against anyone trying to change the callback/context pointers
10980+ * while being used. */
10981+ spin_lock_irqsave(&mcc_object->parent_function->cq_lock,
10982+ mcc_object->parent_function->cq_irq);
10983+
10984+ /* Assign the async callback. */
10985+ mcc_object->async_context = cb_context;
10986+ mcc_object->async_cb = cb;
10987+
10988+ spin_unlock_irqrestore(&mcc_object->parent_function->cq_lock,
10989+ mcc_object->parent_function->cq_irq);
10990+
10991+ return BE_SUCCESS;
10992+}
10993+
10994+#define MPU_EP_CONTROL 0
10995+#define MPU_EP_SEMAPHORE 0xac
10996+
10997+/*
10998+ *-------------------------------------------------------------------
10999+ * Function: be_wait_for_POST_complete
11000+ * Waits until the BladeEngine POST completes (either in error or success).
11001+ * pfob -
11002+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
11003+ *-------------------------------------------------------------------
11004+ */
11005+static int be_wait_for_POST_complete(struct be_function_object *pfob)
11006+{
11007+ struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
11008+ int s;
11009+ u32 post_error, post_stage;
11010+
11011+ const u32 us_per_loop = 1000; /* 1000us */
11012+ const u32 print_frequency_loops = 1000000 / us_per_loop;
11013+ const u32 max_loops = 60 * print_frequency_loops;
11014+ u32 loops = 0;
11015+
11016+ /*
11017+ * Wait for arm fw indicating it is done or a fatal error happened.
11018+ * Note: POST can take some time to complete depending on configuration
11019+ * settings (consider ARM attempts to acquire an IP address
11020+ * over DHCP!!!).
11021+ *
11022+ */
11023+ do {
11024+ status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
11025+ post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
11026+ error, &status);
11027+ post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
11028+ stage, &status);
11029+ if (0 == (loops % print_frequency_loops)) {
11030+ /* Print current status */
11031+ TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
11032+ status.dw[0], post_stage);
11033+ }
11034+ udelay(us_per_loop);
11035+ } while ((post_error != 1) &&
11036+ (post_stage != POST_STAGE_ARMFW_READY) &&
11037+ (++loops < max_loops));
11038+
11039+ if (post_error == 1) {
11040+ TRACE(DL_ERR, "POST error! Status = 0x%x (stage = 0x%x)",
11041+ status.dw[0], post_stage);
11042+ s = BE_NOT_OK;
11043+ } else if (post_stage != POST_STAGE_ARMFW_READY) {
11044+ TRACE(DL_ERR, "POST time-out! Status = 0x%x (stage = 0x%x)",
11045+ status.dw[0], post_stage);
11046+ s = BE_NOT_OK;
11047+ } else {
11048+ s = BE_SUCCESS;
11049+ }
11050+ return s;
11051+}
11052+
11053+/*
11054+ *-------------------------------------------------------------------
11055+ * Function: be_kickoff_and_wait_for_POST
11056+ * Interacts with the BladeEngine management processor to initiate POST, and
11057+ * subsequently waits until POST completes (either in error or success).
11058+ * The caller must acquire the reset semaphore before initiating POST
11059+ * to prevent multiple drivers interacting with the management processor.
11060+ * Once POST is complete the caller must release the reset semaphore.
11061+ * Callers who only want to wait for POST complete may call
11062+ * be_wait_for_POST_complete.
11063+ * pfob -
11064+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
11065+ *-------------------------------------------------------------------
11066+ */
11067+static int
11068+be_kickoff_and_wait_for_POST(struct be_function_object *pfob)
11069+{
11070+ struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
11071+ int s;
11072+
11073+ const u32 us_per_loop = 1000; /* 1000us */
11074+ const u32 print_frequency_loops = 1000000 / us_per_loop;
11075+ const u32 max_loops = 5 * print_frequency_loops;
11076+ u32 loops = 0;
11077+ u32 post_error, post_stage;
11078+
11079+ /* Wait for arm fw awaiting host ready or a fatal error happened. */
11080+ TRACE(DL_INFO, "Wait for BladeEngine ready to POST");
11081+ do {
11082+ status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
11083+ post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
11084+ error, &status);
11085+ post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
11086+ stage, &status);
11087+ if (0 == (loops % print_frequency_loops)) {
11088+ /* Print current status */
11089+ TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
11090+ status.dw[0], post_stage);
11091+ }
11092+ udelay(us_per_loop);
11093+ } while ((post_error != 1) &&
11094+ (post_stage < POST_STAGE_AWAITING_HOST_RDY) &&
11095+ (++loops < max_loops));
11096+
11097+ if (post_error == 1) {
11098+ TRACE(DL_ERR, "Pre-POST error! Status = 0x%x (stage = 0x%x)",
11099+ status.dw[0], post_stage);
11100+ s = BE_NOT_OK;
11101+ } else if (post_stage == POST_STAGE_AWAITING_HOST_RDY) {
11102+ iowrite32(POST_STAGE_HOST_RDY, pfob->csr_va + MPU_EP_SEMAPHORE);
11103+
11104+ /* Wait for POST to complete */
11105+ s = be_wait_for_POST_complete(pfob);
11106+ } else {
11107+ /*
11108+ * Either a timeout waiting for host ready signal or POST has
11109+ * moved ahead without requiring a host ready signal.
11110+ * Might as well give POST a chance to complete
11111+ * (or timeout again).
11112+ */
11113+ s = be_wait_for_POST_complete(pfob);
11114+ }
11115+ return s;
11116+}
11117+
11118+/*
11119+ *-------------------------------------------------------------------
11120+ * Function: be_pci_soft_reset
11121+ * This function is called to issue a BladeEngine soft reset.
11122+ * Callers should acquire the soft reset semaphore before calling this
11123+ * function. Additionaly, callers should ensure they cannot be pre-empted
11124+ * while the routine executes. Upon completion of this routine, callers
11125+ * should release the reset semaphore. This routine implicitly waits
11126+ * for BladeEngine POST to complete.
11127+ * pfob -
11128+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
11129+ *-------------------------------------------------------------------
11130+ */
11131+int be_pci_soft_reset(struct be_function_object *pfob)
11132+{
11133+ struct PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
11134+ struct PCICFG_ONLINE0_CSR_AMAP pciOnline0;
11135+ struct PCICFG_ONLINE1_CSR_AMAP pciOnline1;
11136+ struct EP_CONTROL_CSR_AMAP epControlCsr;
11137+ int status = BE_SUCCESS;
11138+ u32 i, soft_reset_bit;
11139+
11140+ TRACE(DL_NOTE, "PCI reset...");
11141+
11142+ /* Issue soft reset #1 to get BladeEngine into a known state. */
11143+ soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
11144+ AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
11145+ PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
11146+ /*
11147+ * wait til soft reset is deasserted - hardware
11148+ * deasserts after some time.
11149+ */
11150+ i = 0;
11151+ do {
11152+ udelay(50);
11153+ soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
11154+ soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
11155+ softreset, soft_reset.dw);
11156+ } while (soft_reset_bit && (i++ < 1024));
11157+ if (soft_reset_bit != 0) {
11158+ TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
11159+ status = BE_NOT_OK;
11160+ goto Error_label;
11161+ }
11162+ /* Mask everything */
11163+ PCICFG0_WRITE(pfob, ue_status_low_mask, 0xFFFFFFFF);
11164+ PCICFG0_WRITE(pfob, ue_status_hi_mask, 0xFFFFFFFF);
11165+ /*
11166+ * Set everything offline except MPU IRAM (it is offline with
11167+ * the soft-reset, but soft-reset does not reset the PCICFG registers!)
11168+ */
11169+ pciOnline0.dw[0] = 0;
11170+ pciOnline1.dw[0] = 0;
11171+ AMAP_SET_BITS_PTR(PCICFG_ONLINE1_CSR, mpu_iram_online,
11172+ pciOnline1.dw, 1);
11173+ PCICFG0_WRITE(pfob, online0, pciOnline0.dw[0]);
11174+ PCICFG0_WRITE(pfob, online1, pciOnline1.dw[0]);
11175+
11176+ udelay(20000);
11177+
11178+ /* Issue soft reset #2. */
11179+ AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
11180+ PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
11181+ /*
11182+ * wait til soft reset is deasserted - hardware
11183+ * deasserts after some time.
11184+ */
11185+ i = 0;
11186+ do {
11187+ udelay(50);
11188+ soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
11189+ soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
11190+ softreset, soft_reset.dw);
11191+ } while (soft_reset_bit && (i++ < 1024));
11192+ if (soft_reset_bit != 0) {
11193+ TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
11194+ status = BE_NOT_OK;
11195+ goto Error_label;
11196+ }
11197+
11198+
11199+ udelay(20000);
11200+
11201+ /* Take MPU out of reset. */
11202+
11203+ epControlCsr.dw[0] = ioread32(pfob->csr_va + MPU_EP_CONTROL);
11204+ AMAP_SET_BITS_PTR(EP_CONTROL_CSR, CPU_reset, &epControlCsr, 0);
11205+ iowrite32((u32)epControlCsr.dw[0], pfob->csr_va + MPU_EP_CONTROL);
11206+
11207+ /* Kickoff BE POST and wait for completion */
11208+ status = be_kickoff_and_wait_for_POST(pfob);
11209+
11210+Error_label:
11211+ return status;
11212+}
11213+
11214+
11215+/*
11216+ *-------------------------------------------------------------------
11217+ * Function: be_pci_reset_required
11218+ * This private function is called to detect if a host entity is
11219+ * required to issue a PCI soft reset and subsequently drive
11220+ * BladeEngine POST. Scenarios where this is required:
11221+ * 1) BIOS-less configuration
11222+ * 2) Hot-swap/plug/power-on
11223+ * pfob -
11224+ * return true if a reset is required, false otherwise
11225+ *-------------------------------------------------------------------
11226+ */
11227+static bool be_pci_reset_required(struct be_function_object *pfob)
11228+{
11229+ struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
11230+ bool do_reset = false;
11231+ u32 post_error, post_stage;
11232+
11233+ /*
11234+ * Read the POST status register
11235+ */
11236+ status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
11237+ post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, error,
11238+ &status);
11239+ post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, stage,
11240+ &status);
11241+ if (post_stage <= POST_STAGE_AWAITING_HOST_RDY) {
11242+ /*
11243+ * If BladeEngine is waiting for host ready indication,
11244+ * we want to do a PCI reset.
11245+ */
11246+ do_reset = true;
11247+ }
11248+
11249+ return do_reset;
11250+}
11251+
11252+/*
11253+ *-------------------------------------------------------------------
11254+ * Function: be_drive_POST
11255+ * This function is called to drive BladeEngine POST. The
11256+ * caller should ensure they cannot be pre-empted while this routine executes.
11257+ * pfob -
11258+ * return status - BE_SUCCESS (0) on success. Negative error code on failure.
11259+ *-------------------------------------------------------------------
11260+ */
11261+int be_drive_POST(struct be_function_object *pfob)
11262+{
11263+ int status;
11264+
11265+ if (false != be_pci_reset_required(pfob)) {
11266+ /* PCI reset is needed (implicitly starts and waits for POST) */
11267+ status = be_pci_soft_reset(pfob);
11268+ } else {
11269+ /* No PCI reset is needed, start POST */
11270+ status = be_kickoff_and_wait_for_POST(pfob);
11271+ }
11272+
11273+ return status;
11274+}
11275--- /dev/null
11276+++ b/drivers/staging/benet/mpu_context.h
11277@@ -0,0 +1,46 @@
11278+/*
11279+ * Copyright (C) 2005 - 2008 ServerEngines
11280+ * All rights reserved.
11281+ *
11282+ * This program is free software; you can redistribute it and/or
11283+ * modify it under the terms of the GNU General Public License version 2
11284+ * as published by the Free Software Foundation. The full GNU General
11285+ * Public License is included in this distribution in the file called COPYING.
11286+ *
11287+ * Contact Information:
11288+ * linux-drivers@serverengines.com
11289+ *
11290+ * ServerEngines
11291+ * 209 N. Fair Oaks Ave
11292+ * Sunnyvale, CA 94085
11293+ */
11294+/*
11295+ * Autogenerated by srcgen version: 0127
11296+ */
11297+#ifndef __mpu_context_amap_h__
11298+#define __mpu_context_amap_h__
11299+
11300+/*
11301+ * Management command and control ring context. The MPUs BTLR_CTRL1 CSR
11302+ * controls the writeback behavior of the producer and consumer index values.
11303+ */
11304+struct BE_MCC_RING_CONTEXT_AMAP {
11305+ u8 con_index[16]; /* DWORD 0 */
11306+ u8 ring_size[4]; /* DWORD 0 */
11307+ u8 cq_id[11]; /* DWORD 0 */
11308+ u8 rsvd0; /* DWORD 0 */
11309+ u8 prod_index[16]; /* DWORD 1 */
11310+ u8 pdid[15]; /* DWORD 1 */
11311+ u8 invalid; /* DWORD 1 */
11312+ u8 cmd_pending_current[7]; /* DWORD 2 */
11313+ u8 rsvd1[25]; /* DWORD 2 */
11314+ u8 hpi_port_cq_id[11]; /* DWORD 3 */
11315+ u8 rsvd2[5]; /* DWORD 3 */
11316+ u8 cmd_pending_max[7]; /* DWORD 3 */
11317+ u8 rsvd3[9]; /* DWORD 3 */
11318+} __packed;
11319+struct MCC_RING_CONTEXT_AMAP {
11320+ u32 dw[4];
11321+};
11322+
11323+#endif /* __mpu_context_amap_h__ */
11324--- /dev/null
11325+++ b/drivers/staging/benet/mpu.h
11326@@ -0,0 +1,74 @@
11327+/*
11328+ * Copyright (C) 2005 - 2008 ServerEngines
11329+ * All rights reserved.
11330+ *
11331+ * This program is free software; you can redistribute it and/or
11332+ * modify it under the terms of the GNU General Public License version 2
11333+ * as published by the Free Software Foundation. The full GNU General
11334+ * Public License is included in this distribution in the file called COPYING.
11335+ *
11336+ * Contact Information:
11337+ * linux-drivers@serverengines.com
11338+ *
11339+ * ServerEngines
11340+ * 209 N. Fair Oaks Ave
11341+ * Sunnyvale, CA 94085
11342+ */
11343+/*
11344+ * Autogenerated by srcgen version: 0127
11345+ */
11346+#ifndef __mpu_amap_h__
11347+#define __mpu_amap_h__
11348+#include "ep.h"
11349+
11350+/* Provide control parameters for the Managment Processor Unit. */
11351+struct BE_MPU_CSRMAP_AMAP {
11352+ struct BE_EP_CSRMAP_AMAP ep;
11353+ u8 rsvd0[128]; /* DWORD 64 */
11354+ u8 rsvd1[32]; /* DWORD 68 */
11355+ u8 rsvd2[192]; /* DWORD 69 */
11356+ u8 rsvd3[192]; /* DWORD 75 */
11357+ u8 rsvd4[32]; /* DWORD 81 */
11358+ u8 rsvd5[32]; /* DWORD 82 */
11359+ u8 rsvd6[32]; /* DWORD 83 */
11360+ u8 rsvd7[32]; /* DWORD 84 */
11361+ u8 rsvd8[32]; /* DWORD 85 */
11362+ u8 rsvd9[32]; /* DWORD 86 */
11363+ u8 rsvd10[32]; /* DWORD 87 */
11364+ u8 rsvd11[32]; /* DWORD 88 */
11365+ u8 rsvd12[32]; /* DWORD 89 */
11366+ u8 rsvd13[32]; /* DWORD 90 */
11367+ u8 rsvd14[32]; /* DWORD 91 */
11368+ u8 rsvd15[32]; /* DWORD 92 */
11369+ u8 rsvd16[32]; /* DWORD 93 */
11370+ u8 rsvd17[32]; /* DWORD 94 */
11371+ u8 rsvd18[32]; /* DWORD 95 */
11372+ u8 rsvd19[32]; /* DWORD 96 */
11373+ u8 rsvd20[32]; /* DWORD 97 */
11374+ u8 rsvd21[32]; /* DWORD 98 */
11375+ u8 rsvd22[32]; /* DWORD 99 */
11376+ u8 rsvd23[32]; /* DWORD 100 */
11377+ u8 rsvd24[32]; /* DWORD 101 */
11378+ u8 rsvd25[32]; /* DWORD 102 */
11379+ u8 rsvd26[32]; /* DWORD 103 */
11380+ u8 rsvd27[32]; /* DWORD 104 */
11381+ u8 rsvd28[96]; /* DWORD 105 */
11382+ u8 rsvd29[32]; /* DWORD 108 */
11383+ u8 rsvd30[32]; /* DWORD 109 */
11384+ u8 rsvd31[32]; /* DWORD 110 */
11385+ u8 rsvd32[32]; /* DWORD 111 */
11386+ u8 rsvd33[32]; /* DWORD 112 */
11387+ u8 rsvd34[96]; /* DWORD 113 */
11388+ u8 rsvd35[32]; /* DWORD 116 */
11389+ u8 rsvd36[32]; /* DWORD 117 */
11390+ u8 rsvd37[32]; /* DWORD 118 */
11391+ u8 rsvd38[32]; /* DWORD 119 */
11392+ u8 rsvd39[32]; /* DWORD 120 */
11393+ u8 rsvd40[32]; /* DWORD 121 */
11394+ u8 rsvd41[134][32]; /* DWORD 122 */
11395+} __packed;
11396+struct MPU_CSRMAP_AMAP {
11397+ u32 dw[256];
11398+};
11399+
11400+#endif /* __mpu_amap_h__ */
11401--- /dev/null
11402+++ b/drivers/staging/benet/pcicfg.h
11403@@ -0,0 +1,825 @@
11404+/*
11405+ * Copyright (C) 2005 - 2008 ServerEngines
11406+ * All rights reserved.
11407+ *
11408+ * This program is free software; you can redistribute it and/or
11409+ * modify it under the terms of the GNU General Public License version 2
11410+ * as published by the Free Software Foundation. The full GNU General
11411+ * Public License is included in this distribution in the file called COPYING.
11412+ *
11413+ * Contact Information:
11414+ * linux-drivers@serverengines.com
11415+ *
11416+ * ServerEngines
11417+ * 209 N. Fair Oaks Ave
11418+ * Sunnyvale, CA 94085
11419+ */
11420+/*
11421+ * Autogenerated by srcgen version: 0127
11422+ */
11423+#ifndef __pcicfg_amap_h__
11424+#define __pcicfg_amap_h__
11425+
11426+/* Vendor and Device ID Register. */
11427+struct BE_PCICFG_ID_CSR_AMAP {
11428+ u8 vendorid[16]; /* DWORD 0 */
11429+ u8 deviceid[16]; /* DWORD 0 */
11430+} __packed;
11431+struct PCICFG_ID_CSR_AMAP {
11432+ u32 dw[1];
11433+};
11434+
11435+/* IO Bar Register. */
11436+struct BE_PCICFG_IOBAR_CSR_AMAP {
11437+ u8 iospace; /* DWORD 0 */
11438+ u8 rsvd0[7]; /* DWORD 0 */
11439+ u8 iobar[24]; /* DWORD 0 */
11440+} __packed;
11441+struct PCICFG_IOBAR_CSR_AMAP {
11442+ u32 dw[1];
11443+};
11444+
11445+/* Memory BAR 0 Register. */
11446+struct BE_PCICFG_MEMBAR0_CSR_AMAP {
11447+ u8 memspace; /* DWORD 0 */
11448+ u8 type[2]; /* DWORD 0 */
11449+ u8 pf; /* DWORD 0 */
11450+ u8 rsvd0[10]; /* DWORD 0 */
11451+ u8 membar0[18]; /* DWORD 0 */
11452+} __packed;
11453+struct PCICFG_MEMBAR0_CSR_AMAP {
11454+ u32 dw[1];
11455+};
11456+
11457+/* Memory BAR 1 - Low Address Register. */
11458+struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP {
11459+ u8 memspace; /* DWORD 0 */
11460+ u8 type[2]; /* DWORD 0 */
11461+ u8 pf; /* DWORD 0 */
11462+ u8 rsvd0[13]; /* DWORD 0 */
11463+ u8 membar1lo[15]; /* DWORD 0 */
11464+} __packed;
11465+struct PCICFG_MEMBAR1_LO_CSR_AMAP {
11466+ u32 dw[1];
11467+};
11468+
11469+/* Memory BAR 1 - High Address Register. */
11470+struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP {
11471+ u8 membar1hi[32]; /* DWORD 0 */
11472+} __packed;
11473+struct PCICFG_MEMBAR1_HI_CSR_AMAP {
11474+ u32 dw[1];
11475+};
11476+
11477+/* Memory BAR 2 - Low Address Register. */
11478+struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP {
11479+ u8 memspace; /* DWORD 0 */
11480+ u8 type[2]; /* DWORD 0 */
11481+ u8 pf; /* DWORD 0 */
11482+ u8 rsvd0[17]; /* DWORD 0 */
11483+ u8 membar2lo[11]; /* DWORD 0 */
11484+} __packed;
11485+struct PCICFG_MEMBAR2_LO_CSR_AMAP {
11486+ u32 dw[1];
11487+};
11488+
11489+/* Memory BAR 2 - High Address Register. */
11490+struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP {
11491+ u8 membar2hi[32]; /* DWORD 0 */
11492+} __packed;
11493+struct PCICFG_MEMBAR2_HI_CSR_AMAP {
11494+ u32 dw[1];
11495+};
11496+
11497+/* Subsystem Vendor and ID (Function 0) Register. */
11498+struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
11499+ u8 subsys_vendor_id[16]; /* DWORD 0 */
11500+ u8 subsys_id[16]; /* DWORD 0 */
11501+} __packed;
11502+struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
11503+ u32 dw[1];
11504+};
11505+
11506+/* Subsystem Vendor and ID (Function 1) Register. */
11507+struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
11508+ u8 subsys_vendor_id[16]; /* DWORD 0 */
11509+ u8 subsys_id[16]; /* DWORD 0 */
11510+} __packed;
11511+struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
11512+ u32 dw[1];
11513+};
11514+
11515+/* Semaphore Register. */
11516+struct BE_PCICFG_SEMAPHORE_CSR_AMAP {
11517+ u8 locked; /* DWORD 0 */
11518+ u8 rsvd0[31]; /* DWORD 0 */
11519+} __packed;
11520+struct PCICFG_SEMAPHORE_CSR_AMAP {
11521+ u32 dw[1];
11522+};
11523+
11524+/* Soft Reset Register. */
11525+struct BE_PCICFG_SOFT_RESET_CSR_AMAP {
11526+ u8 rsvd0[7]; /* DWORD 0 */
11527+ u8 softreset; /* DWORD 0 */
11528+ u8 rsvd1[16]; /* DWORD 0 */
11529+ u8 nec_ll_rcvdetect_i[8]; /* DWORD 0 */
11530+} __packed;
11531+struct PCICFG_SOFT_RESET_CSR_AMAP {
11532+ u32 dw[1];
11533+};
11534+
11535+/* Unrecoverable Error Status (Low) Register. Each bit corresponds to
11536+ * an internal Unrecoverable Error. These are set by hardware and may be
11537+ * cleared by writing a one to the respective bit(s) to be cleared. Any
11538+ * bit being set that is also unmasked will result in Unrecoverable Error
11539+ * interrupt notification to the host CPU and/or Server Management chip
11540+ * and the transitioning of BladeEngine to an Offline state.
11541+ */
11542+struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP {
11543+ u8 cev_ue_status; /* DWORD 0 */
11544+ u8 ctx_ue_status; /* DWORD 0 */
11545+ u8 dbuf_ue_status; /* DWORD 0 */
11546+ u8 erx_ue_status; /* DWORD 0 */
11547+ u8 host_ue_status; /* DWORD 0 */
11548+ u8 mpu_ue_status; /* DWORD 0 */
11549+ u8 ndma_ue_status; /* DWORD 0 */
11550+ u8 ptc_ue_status; /* DWORD 0 */
11551+ u8 rdma_ue_status; /* DWORD 0 */
11552+ u8 rxf_ue_status; /* DWORD 0 */
11553+ u8 rxips_ue_status; /* DWORD 0 */
11554+ u8 rxulp0_ue_status; /* DWORD 0 */
11555+ u8 rxulp1_ue_status; /* DWORD 0 */
11556+ u8 rxulp2_ue_status; /* DWORD 0 */
11557+ u8 tim_ue_status; /* DWORD 0 */
11558+ u8 tpost_ue_status; /* DWORD 0 */
11559+ u8 tpre_ue_status; /* DWORD 0 */
11560+ u8 txips_ue_status; /* DWORD 0 */
11561+ u8 txulp0_ue_status; /* DWORD 0 */
11562+ u8 txulp1_ue_status; /* DWORD 0 */
11563+ u8 uc_ue_status; /* DWORD 0 */
11564+ u8 wdma_ue_status; /* DWORD 0 */
11565+ u8 txulp2_ue_status; /* DWORD 0 */
11566+ u8 host1_ue_status; /* DWORD 0 */
11567+ u8 p0_ob_link_ue_status; /* DWORD 0 */
11568+ u8 p1_ob_link_ue_status; /* DWORD 0 */
11569+ u8 host_gpio_ue_status; /* DWORD 0 */
11570+ u8 mbox_netw_ue_status; /* DWORD 0 */
11571+ u8 mbox_stor_ue_status; /* DWORD 0 */
11572+ u8 axgmac0_ue_status; /* DWORD 0 */
11573+ u8 axgmac1_ue_status; /* DWORD 0 */
11574+ u8 mpu_intpend_ue_status; /* DWORD 0 */
11575+} __packed;
11576+struct PCICFG_UE_STATUS_LOW_CSR_AMAP {
11577+ u32 dw[1];
11578+};
11579+
11580+/* Unrecoverable Error Status (High) Register. Each bit corresponds to
11581+ * an internal Unrecoverable Error. These are set by hardware and may be
11582+ * cleared by writing a one to the respective bit(s) to be cleared. Any
11583+ * bit being set that is also unmasked will result in Unrecoverable Error
11584+ * interrupt notification to the host CPU and/or Server Management chip;
11585+ * and the transitioning of BladeEngine to an Offline state.
11586+ */
11587+struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP {
11588+ u8 jtag_ue_status; /* DWORD 0 */
11589+ u8 lpcmemhost_ue_status; /* DWORD 0 */
11590+ u8 mgmt_mac_ue_status; /* DWORD 0 */
11591+ u8 mpu_iram_ue_status; /* DWORD 0 */
11592+ u8 pcs0online_ue_status; /* DWORD 0 */
11593+ u8 pcs1online_ue_status; /* DWORD 0 */
11594+ u8 pctl0_ue_status; /* DWORD 0 */
11595+ u8 pctl1_ue_status; /* DWORD 0 */
11596+ u8 pmem_ue_status; /* DWORD 0 */
11597+ u8 rr_ue_status; /* DWORD 0 */
11598+ u8 rxpp_ue_status; /* DWORD 0 */
11599+ u8 txpb_ue_status; /* DWORD 0 */
11600+ u8 txp_ue_status; /* DWORD 0 */
11601+ u8 xaui_ue_status; /* DWORD 0 */
11602+ u8 arm_ue_status; /* DWORD 0 */
11603+ u8 ipc_ue_status; /* DWORD 0 */
11604+ u8 rsvd0[16]; /* DWORD 0 */
11605+} __packed;
11606+struct PCICFG_UE_STATUS_HI_CSR_AMAP {
11607+ u32 dw[1];
11608+};
11609+
11610+/* Unrecoverable Error Mask (Low) Register. Each bit, when set to one,
11611+ * will mask the associated Unrecoverable Error status bit from notification
11612+ * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
11613+ * transitioning of all BladeEngine units to an Offline state.
11614+ */
11615+struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
11616+ u8 cev_ue_mask; /* DWORD 0 */
11617+ u8 ctx_ue_mask; /* DWORD 0 */
11618+ u8 dbuf_ue_mask; /* DWORD 0 */
11619+ u8 erx_ue_mask; /* DWORD 0 */
11620+ u8 host_ue_mask; /* DWORD 0 */
11621+ u8 mpu_ue_mask; /* DWORD 0 */
11622+ u8 ndma_ue_mask; /* DWORD 0 */
11623+ u8 ptc_ue_mask; /* DWORD 0 */
11624+ u8 rdma_ue_mask; /* DWORD 0 */
11625+ u8 rxf_ue_mask; /* DWORD 0 */
11626+ u8 rxips_ue_mask; /* DWORD 0 */
11627+ u8 rxulp0_ue_mask; /* DWORD 0 */
11628+ u8 rxulp1_ue_mask; /* DWORD 0 */
11629+ u8 rxulp2_ue_mask; /* DWORD 0 */
11630+ u8 tim_ue_mask; /* DWORD 0 */
11631+ u8 tpost_ue_mask; /* DWORD 0 */
11632+ u8 tpre_ue_mask; /* DWORD 0 */
11633+ u8 txips_ue_mask; /* DWORD 0 */
11634+ u8 txulp0_ue_mask; /* DWORD 0 */
11635+ u8 txulp1_ue_mask; /* DWORD 0 */
11636+ u8 uc_ue_mask; /* DWORD 0 */
11637+ u8 wdma_ue_mask; /* DWORD 0 */
11638+ u8 txulp2_ue_mask; /* DWORD 0 */
11639+ u8 host1_ue_mask; /* DWORD 0 */
11640+ u8 p0_ob_link_ue_mask; /* DWORD 0 */
11641+ u8 p1_ob_link_ue_mask; /* DWORD 0 */
11642+ u8 host_gpio_ue_mask; /* DWORD 0 */
11643+ u8 mbox_netw_ue_mask; /* DWORD 0 */
11644+ u8 mbox_stor_ue_mask; /* DWORD 0 */
11645+ u8 axgmac0_ue_mask; /* DWORD 0 */
11646+ u8 axgmac1_ue_mask; /* DWORD 0 */
11647+ u8 mpu_intpend_ue_mask; /* DWORD 0 */
11648+} __packed;
11649+struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
11650+ u32 dw[1];
11651+};
11652+
11653+/* Unrecoverable Error Mask (High) Register. Each bit, when set to one,
11654+ * will mask the associated Unrecoverable Error status bit from notification
11655+ * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
11656+ * transitioning of all BladeEngine units to an Offline state.
11657+ */
11658+struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
11659+ u8 jtag_ue_mask; /* DWORD 0 */
11660+ u8 lpcmemhost_ue_mask; /* DWORD 0 */
11661+ u8 mgmt_mac_ue_mask; /* DWORD 0 */
11662+ u8 mpu_iram_ue_mask; /* DWORD 0 */
11663+ u8 pcs0online_ue_mask; /* DWORD 0 */
11664+ u8 pcs1online_ue_mask; /* DWORD 0 */
11665+ u8 pctl0_ue_mask; /* DWORD 0 */
11666+ u8 pctl1_ue_mask; /* DWORD 0 */
11667+ u8 pmem_ue_mask; /* DWORD 0 */
11668+ u8 rr_ue_mask; /* DWORD 0 */
11669+ u8 rxpp_ue_mask; /* DWORD 0 */
11670+ u8 txpb_ue_mask; /* DWORD 0 */
11671+ u8 txp_ue_mask; /* DWORD 0 */
11672+ u8 xaui_ue_mask; /* DWORD 0 */
11673+ u8 arm_ue_mask; /* DWORD 0 */
11674+ u8 ipc_ue_mask; /* DWORD 0 */
11675+ u8 rsvd0[16]; /* DWORD 0 */
11676+} __packed;
11677+struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
11678+ u32 dw[1];
11679+};
11680+
11681+/* Online Control Register 0. This register controls various units within
11682+ * BladeEngine being in an Online or Offline state.
11683+ */
11684+struct BE_PCICFG_ONLINE0_CSR_AMAP {
11685+ u8 cev_online; /* DWORD 0 */
11686+ u8 ctx_online; /* DWORD 0 */
11687+ u8 dbuf_online; /* DWORD 0 */
11688+ u8 erx_online; /* DWORD 0 */
11689+ u8 host_online; /* DWORD 0 */
11690+ u8 mpu_online; /* DWORD 0 */
11691+ u8 ndma_online; /* DWORD 0 */
11692+ u8 ptc_online; /* DWORD 0 */
11693+ u8 rdma_online; /* DWORD 0 */
11694+ u8 rxf_online; /* DWORD 0 */
11695+ u8 rxips_online; /* DWORD 0 */
11696+ u8 rxulp0_online; /* DWORD 0 */
11697+ u8 rxulp1_online; /* DWORD 0 */
11698+ u8 rxulp2_online; /* DWORD 0 */
11699+ u8 tim_online; /* DWORD 0 */
11700+ u8 tpost_online; /* DWORD 0 */
11701+ u8 tpre_online; /* DWORD 0 */
11702+ u8 txips_online; /* DWORD 0 */
11703+ u8 txulp0_online; /* DWORD 0 */
11704+ u8 txulp1_online; /* DWORD 0 */
11705+ u8 uc_online; /* DWORD 0 */
11706+ u8 wdma_online; /* DWORD 0 */
11707+ u8 txulp2_online; /* DWORD 0 */
11708+ u8 host1_online; /* DWORD 0 */
11709+ u8 p0_ob_link_online; /* DWORD 0 */
11710+ u8 p1_ob_link_online; /* DWORD 0 */
11711+ u8 host_gpio_online; /* DWORD 0 */
11712+ u8 mbox_netw_online; /* DWORD 0 */
11713+ u8 mbox_stor_online; /* DWORD 0 */
11714+ u8 axgmac0_online; /* DWORD 0 */
11715+ u8 axgmac1_online; /* DWORD 0 */
11716+ u8 mpu_intpend_online; /* DWORD 0 */
11717+} __packed;
11718+struct PCICFG_ONLINE0_CSR_AMAP {
11719+ u32 dw[1];
11720+};
11721+
11722+/* Online Control Register 1. This register controls various units within
11723+ * BladeEngine being in an Online or Offline state.
11724+ */
11725+struct BE_PCICFG_ONLINE1_CSR_AMAP {
11726+ u8 jtag_online; /* DWORD 0 */
11727+ u8 lpcmemhost_online; /* DWORD 0 */
11728+ u8 mgmt_mac_online; /* DWORD 0 */
11729+ u8 mpu_iram_online; /* DWORD 0 */
11730+ u8 pcs0online_online; /* DWORD 0 */
11731+ u8 pcs1online_online; /* DWORD 0 */
11732+ u8 pctl0_online; /* DWORD 0 */
11733+ u8 pctl1_online; /* DWORD 0 */
11734+ u8 pmem_online; /* DWORD 0 */
11735+ u8 rr_online; /* DWORD 0 */
11736+ u8 rxpp_online; /* DWORD 0 */
11737+ u8 txpb_online; /* DWORD 0 */
11738+ u8 txp_online; /* DWORD 0 */
11739+ u8 xaui_online; /* DWORD 0 */
11740+ u8 arm_online; /* DWORD 0 */
11741+ u8 ipc_online; /* DWORD 0 */
11742+ u8 rsvd0[16]; /* DWORD 0 */
11743+} __packed;
11744+struct PCICFG_ONLINE1_CSR_AMAP {
11745+ u32 dw[1];
11746+};
11747+
11748+/* Host Timer Register. */
11749+struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
11750+ u8 hosttimer[24]; /* DWORD 0 */
11751+ u8 hostintr; /* DWORD 0 */
11752+ u8 rsvd0[7]; /* DWORD 0 */
11753+} __packed;
11754+struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
11755+ u32 dw[1];
11756+};
11757+
11758+/* Scratchpad Register (for software use). */
11759+struct BE_PCICFG_SCRATCHPAD_CSR_AMAP {
11760+ u8 scratchpad[32]; /* DWORD 0 */
11761+} __packed;
11762+struct PCICFG_SCRATCHPAD_CSR_AMAP {
11763+ u32 dw[1];
11764+};
11765+
11766+/* PCI Express Capabilities Register. */
11767+struct BE_PCICFG_PCIE_CAP_CSR_AMAP {
11768+ u8 capid[8]; /* DWORD 0 */
11769+ u8 nextcap[8]; /* DWORD 0 */
11770+ u8 capver[4]; /* DWORD 0 */
11771+ u8 devport[4]; /* DWORD 0 */
11772+ u8 rsvd0[6]; /* DWORD 0 */
11773+ u8 rsvd1[2]; /* DWORD 0 */
11774+} __packed;
11775+struct PCICFG_PCIE_CAP_CSR_AMAP {
11776+ u32 dw[1];
11777+};
11778+
11779+/* PCI Express Device Capabilities Register. */
11780+struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP {
11781+ u8 payload[3]; /* DWORD 0 */
11782+ u8 rsvd0[3]; /* DWORD 0 */
11783+ u8 lo_lat[3]; /* DWORD 0 */
11784+ u8 l1_lat[3]; /* DWORD 0 */
11785+ u8 rsvd1[3]; /* DWORD 0 */
11786+ u8 rsvd2[3]; /* DWORD 0 */
11787+ u8 pwr_value[8]; /* DWORD 0 */
11788+ u8 pwr_scale[2]; /* DWORD 0 */
11789+ u8 rsvd3[4]; /* DWORD 0 */
11790+} __packed;
11791+struct PCICFG_PCIE_DEVCAP_CSR_AMAP {
11792+ u32 dw[1];
11793+};
11794+
11795+/* PCI Express Device Control/Status Registers. */
11796+struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
11797+ u8 CorrErrReportEn; /* DWORD 0 */
11798+ u8 NonFatalErrReportEn; /* DWORD 0 */
11799+ u8 FatalErrReportEn; /* DWORD 0 */
11800+ u8 UnsuppReqReportEn; /* DWORD 0 */
11801+ u8 EnableRelaxOrder; /* DWORD 0 */
11802+ u8 Max_Payload_Size[3]; /* DWORD 0 */
11803+ u8 ExtendTagFieldEnable; /* DWORD 0 */
11804+ u8 PhantomFnEnable; /* DWORD 0 */
11805+ u8 AuxPwrPMEnable; /* DWORD 0 */
11806+ u8 EnableNoSnoop; /* DWORD 0 */
11807+ u8 Max_Read_Req_Size[3]; /* DWORD 0 */
11808+ u8 rsvd0; /* DWORD 0 */
11809+ u8 CorrErrDetect; /* DWORD 0 */
11810+ u8 NonFatalErrDetect; /* DWORD 0 */
11811+ u8 FatalErrDetect; /* DWORD 0 */
11812+ u8 UnsuppReqDetect; /* DWORD 0 */
11813+ u8 AuxPwrDetect; /* DWORD 0 */
11814+ u8 TransPending; /* DWORD 0 */
11815+ u8 rsvd1[10]; /* DWORD 0 */
11816+} __packed;
11817+struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
11818+ u32 dw[1];
11819+};
11820+
11821+/* PCI Express Link Capabilities Register. */
11822+struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP {
11823+ u8 MaxLinkSpeed[4]; /* DWORD 0 */
11824+ u8 MaxLinkWidth[6]; /* DWORD 0 */
11825+ u8 ASPMSupport[2]; /* DWORD 0 */
11826+ u8 L0sExitLat[3]; /* DWORD 0 */
11827+ u8 L1ExitLat[3]; /* DWORD 0 */
11828+ u8 rsvd0[6]; /* DWORD 0 */
11829+ u8 PortNum[8]; /* DWORD 0 */
11830+} __packed;
11831+struct PCICFG_PCIE_LINK_CAP_CSR_AMAP {
11832+ u32 dw[1];
11833+};
11834+
11835+/* PCI Express Link Status Register. */
11836+struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
11837+ u8 ASPMCtl[2]; /* DWORD 0 */
11838+ u8 rsvd0; /* DWORD 0 */
11839+ u8 ReadCmplBndry; /* DWORD 0 */
11840+ u8 LinkDisable; /* DWORD 0 */
11841+ u8 RetrainLink; /* DWORD 0 */
11842+ u8 CommonClkConfig; /* DWORD 0 */
11843+ u8 ExtendSync; /* DWORD 0 */
11844+ u8 rsvd1[8]; /* DWORD 0 */
11845+ u8 LinkSpeed[4]; /* DWORD 0 */
11846+ u8 NegLinkWidth[6]; /* DWORD 0 */
11847+ u8 LinkTrainErr; /* DWORD 0 */
11848+ u8 LinkTrain; /* DWORD 0 */
11849+ u8 SlotClkConfig; /* DWORD 0 */
11850+ u8 rsvd2[3]; /* DWORD 0 */
11851+} __packed;
11852+struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
11853+ u32 dw[1];
11854+};
11855+
11856+/* PCI Express MSI Configuration Register. */
11857+struct BE_PCICFG_MSI_CSR_AMAP {
11858+ u8 capid[8]; /* DWORD 0 */
11859+ u8 nextptr[8]; /* DWORD 0 */
11860+ u8 tablesize[11]; /* DWORD 0 */
11861+ u8 rsvd0[3]; /* DWORD 0 */
11862+ u8 funcmask; /* DWORD 0 */
11863+ u8 en; /* DWORD 0 */
11864+} __packed;
11865+struct PCICFG_MSI_CSR_AMAP {
11866+ u32 dw[1];
11867+};
11868+
11869+/* MSI-X Table Offset Register. */
11870+struct BE_PCICFG_MSIX_TABLE_CSR_AMAP {
11871+ u8 tablebir[3]; /* DWORD 0 */
11872+ u8 offset[29]; /* DWORD 0 */
11873+} __packed;
11874+struct PCICFG_MSIX_TABLE_CSR_AMAP {
11875+ u32 dw[1];
11876+};
11877+
11878+/* MSI-X PBA Offset Register. */
11879+struct BE_PCICFG_MSIX_PBA_CSR_AMAP {
11880+ u8 pbabir[3]; /* DWORD 0 */
11881+ u8 offset[29]; /* DWORD 0 */
11882+} __packed;
11883+struct PCICFG_MSIX_PBA_CSR_AMAP {
11884+ u32 dw[1];
11885+};
11886+
11887+/* PCI Express MSI-X Message Vector Control Register. */
11888+struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
11889+ u8 vector_control; /* DWORD 0 */
11890+ u8 rsvd0[31]; /* DWORD 0 */
11891+} __packed;
11892+struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
11893+ u32 dw[1];
11894+};
11895+
11896+/* PCI Express MSI-X Message Data Register. */
11897+struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP {
11898+ u8 data[16]; /* DWORD 0 */
11899+ u8 rsvd0[16]; /* DWORD 0 */
11900+} __packed;
11901+struct PCICFG_MSIX_MSG_DATA_CSR_AMAP {
11902+ u32 dw[1];
11903+};
11904+
11905+/* PCI Express MSI-X Message Address Register - High Part. */
11906+struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
11907+ u8 addr[32]; /* DWORD 0 */
11908+} __packed;
11909+struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
11910+ u32 dw[1];
11911+};
11912+
11913+/* PCI Express MSI-X Message Address Register - Low Part. */
11914+struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
11915+ u8 rsvd0[2]; /* DWORD 0 */
11916+ u8 addr[30]; /* DWORD 0 */
11917+} __packed;
11918+struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
11919+ u32 dw[1];
11920+};
11921+
11922+struct BE_PCICFG_ANON_18_RSVD_AMAP {
11923+ u8 rsvd0[32]; /* DWORD 0 */
11924+} __packed;
11925+struct PCICFG_ANON_18_RSVD_AMAP {
11926+ u32 dw[1];
11927+};
11928+
11929+struct BE_PCICFG_ANON_19_RSVD_AMAP {
11930+ u8 rsvd0[32]; /* DWORD 0 */
11931+} __packed;
11932+struct PCICFG_ANON_19_RSVD_AMAP {
11933+ u32 dw[1];
11934+};
11935+
11936+struct BE_PCICFG_ANON_20_RSVD_AMAP {
11937+ u8 rsvd0[32]; /* DWORD 0 */
11938+ u8 rsvd1[25][32]; /* DWORD 1 */
11939+} __packed;
11940+struct PCICFG_ANON_20_RSVD_AMAP {
11941+ u32 dw[26];
11942+};
11943+
11944+struct BE_PCICFG_ANON_21_RSVD_AMAP {
11945+ u8 rsvd0[32]; /* DWORD 0 */
11946+ u8 rsvd1[1919][32]; /* DWORD 1 */
11947+} __packed;
11948+struct PCICFG_ANON_21_RSVD_AMAP {
11949+ u32 dw[1920];
11950+};
11951+
11952+struct BE_PCICFG_ANON_22_MESSAGE_AMAP {
11953+ struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
11954+ struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
11955+ struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
11956+ struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
11957+} __packed;
11958+struct PCICFG_ANON_22_MESSAGE_AMAP {
11959+ u32 dw[4];
11960+};
11961+
11962+struct BE_PCICFG_ANON_23_RSVD_AMAP {
11963+ u8 rsvd0[32]; /* DWORD 0 */
11964+ u8 rsvd1[895][32]; /* DWORD 1 */
11965+} __packed;
11966+struct PCICFG_ANON_23_RSVD_AMAP {
11967+ u32 dw[896];
11968+};
11969+
11970+/* These PCI Configuration Space registers are for the Storage Function of
11971+ * BladeEngine (Function 0). In the memory map of the registers below their
11972+ * table,
11973+ */
11974+struct BE_PCICFG0_CSRMAP_AMAP {
11975+ struct BE_PCICFG_ID_CSR_AMAP id;
11976+ u8 rsvd0[32]; /* DWORD 1 */
11977+ u8 rsvd1[32]; /* DWORD 2 */
11978+ u8 rsvd2[32]; /* DWORD 3 */
11979+ struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
11980+ struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
11981+ struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
11982+ struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
11983+ struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
11984+ struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
11985+ u8 rsvd3[32]; /* DWORD 10 */
11986+ struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id;
11987+ u8 rsvd4[32]; /* DWORD 12 */
11988+ u8 rsvd5[32]; /* DWORD 13 */
11989+ u8 rsvd6[32]; /* DWORD 14 */
11990+ u8 rsvd7[32]; /* DWORD 15 */
11991+ struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
11992+ struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
11993+ u8 rsvd8[32]; /* DWORD 21 */
11994+ struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
11995+ u8 rsvd9[32]; /* DWORD 23 */
11996+ u8 rsvd10[32]; /* DWORD 24 */
11997+ u8 rsvd11[32]; /* DWORD 25 */
11998+ u8 rsvd12[32]; /* DWORD 26 */
11999+ u8 rsvd13[32]; /* DWORD 27 */
12000+ u8 rsvd14[2][32]; /* DWORD 28 */
12001+ u8 rsvd15[32]; /* DWORD 30 */
12002+ u8 rsvd16[32]; /* DWORD 31 */
12003+ u8 rsvd17[8][32]; /* DWORD 32 */
12004+ struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
12005+ struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
12006+ struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
12007+ struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
12008+ struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
12009+ struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
12010+ u8 rsvd18[32]; /* DWORD 46 */
12011+ u8 rsvd19[32]; /* DWORD 47 */
12012+ u8 rsvd20[32]; /* DWORD 48 */
12013+ u8 rsvd21[32]; /* DWORD 49 */
12014+ struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
12015+ u8 rsvd22[32]; /* DWORD 51 */
12016+ struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
12017+ struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
12018+ struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
12019+ struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
12020+ struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
12021+ struct BE_PCICFG_MSI_CSR_AMAP msi;
12022+ struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
12023+ struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
12024+ u8 rsvd23[32]; /* DWORD 60 */
12025+ u8 rsvd24[32]; /* DWORD 61 */
12026+ u8 rsvd25[32]; /* DWORD 62 */
12027+ u8 rsvd26[32]; /* DWORD 63 */
12028+ u8 rsvd27[32]; /* DWORD 64 */
12029+ u8 rsvd28[32]; /* DWORD 65 */
12030+ u8 rsvd29[32]; /* DWORD 66 */
12031+ u8 rsvd30[32]; /* DWORD 67 */
12032+ u8 rsvd31[32]; /* DWORD 68 */
12033+ u8 rsvd32[32]; /* DWORD 69 */
12034+ u8 rsvd33[32]; /* DWORD 70 */
12035+ u8 rsvd34[32]; /* DWORD 71 */
12036+ u8 rsvd35[32]; /* DWORD 72 */
12037+ u8 rsvd36[32]; /* DWORD 73 */
12038+ u8 rsvd37[32]; /* DWORD 74 */
12039+ u8 rsvd38[32]; /* DWORD 75 */
12040+ u8 rsvd39[32]; /* DWORD 76 */
12041+ u8 rsvd40[32]; /* DWORD 77 */
12042+ u8 rsvd41[32]; /* DWORD 78 */
12043+ u8 rsvd42[32]; /* DWORD 79 */
12044+ u8 rsvd43[32]; /* DWORD 80 */
12045+ u8 rsvd44[32]; /* DWORD 81 */
12046+ u8 rsvd45[32]; /* DWORD 82 */
12047+ u8 rsvd46[32]; /* DWORD 83 */
12048+ u8 rsvd47[32]; /* DWORD 84 */
12049+ u8 rsvd48[32]; /* DWORD 85 */
12050+ u8 rsvd49[32]; /* DWORD 86 */
12051+ u8 rsvd50[32]; /* DWORD 87 */
12052+ u8 rsvd51[32]; /* DWORD 88 */
12053+ u8 rsvd52[32]; /* DWORD 89 */
12054+ u8 rsvd53[32]; /* DWORD 90 */
12055+ u8 rsvd54[32]; /* DWORD 91 */
12056+ u8 rsvd55[32]; /* DWORD 92 */
12057+ u8 rsvd56[832]; /* DWORD 93 */
12058+ u8 rsvd57[32]; /* DWORD 119 */
12059+ u8 rsvd58[32]; /* DWORD 120 */
12060+ u8 rsvd59[32]; /* DWORD 121 */
12061+ u8 rsvd60[32]; /* DWORD 122 */
12062+ u8 rsvd61[32]; /* DWORD 123 */
12063+ u8 rsvd62[32]; /* DWORD 124 */
12064+ u8 rsvd63[32]; /* DWORD 125 */
12065+ u8 rsvd64[32]; /* DWORD 126 */
12066+ u8 rsvd65[32]; /* DWORD 127 */
12067+ u8 rsvd66[61440]; /* DWORD 128 */
12068+ struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32];
12069+ u8 rsvd67[28672]; /* DWORD 2176 */
12070+ u8 rsvd68[32]; /* DWORD 3072 */
12071+ u8 rsvd69[1023][32]; /* DWORD 3073 */
12072+} __packed;
12073+struct PCICFG0_CSRMAP_AMAP {
12074+ u32 dw[4096];
12075+};
12076+
12077+struct BE_PCICFG_ANON_24_RSVD_AMAP {
12078+ u8 rsvd0[32]; /* DWORD 0 */
12079+} __packed;
12080+struct PCICFG_ANON_24_RSVD_AMAP {
12081+ u32 dw[1];
12082+};
12083+
12084+struct BE_PCICFG_ANON_25_RSVD_AMAP {
12085+ u8 rsvd0[32]; /* DWORD 0 */
12086+} __packed;
12087+struct PCICFG_ANON_25_RSVD_AMAP {
12088+ u32 dw[1];
12089+};
12090+
12091+struct BE_PCICFG_ANON_26_RSVD_AMAP {
12092+ u8 rsvd0[32]; /* DWORD 0 */
12093+} __packed;
12094+struct PCICFG_ANON_26_RSVD_AMAP {
12095+ u32 dw[1];
12096+};
12097+
12098+struct BE_PCICFG_ANON_27_RSVD_AMAP {
12099+ u8 rsvd0[32]; /* DWORD 0 */
12100+ u8 rsvd1[32]; /* DWORD 1 */
12101+} __packed;
12102+struct PCICFG_ANON_27_RSVD_AMAP {
12103+ u32 dw[2];
12104+};
12105+
12106+struct BE_PCICFG_ANON_28_RSVD_AMAP {
12107+ u8 rsvd0[32]; /* DWORD 0 */
12108+ u8 rsvd1[3][32]; /* DWORD 1 */
12109+} __packed;
12110+struct PCICFG_ANON_28_RSVD_AMAP {
12111+ u32 dw[4];
12112+};
12113+
12114+struct BE_PCICFG_ANON_29_RSVD_AMAP {
12115+ u8 rsvd0[32]; /* DWORD 0 */
12116+ u8 rsvd1[36][32]; /* DWORD 1 */
12117+} __packed;
12118+struct PCICFG_ANON_29_RSVD_AMAP {
12119+ u32 dw[37];
12120+};
12121+
12122+struct BE_PCICFG_ANON_30_RSVD_AMAP {
12123+ u8 rsvd0[32]; /* DWORD 0 */
12124+ u8 rsvd1[1930][32]; /* DWORD 1 */
12125+} __packed;
12126+struct PCICFG_ANON_30_RSVD_AMAP {
12127+ u32 dw[1931];
12128+};
12129+
12130+struct BE_PCICFG_ANON_31_MESSAGE_AMAP {
12131+ struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
12132+ struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
12133+ struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
12134+ struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
12135+} __packed;
12136+struct PCICFG_ANON_31_MESSAGE_AMAP {
12137+ u32 dw[4];
12138+};
12139+
12140+struct BE_PCICFG_ANON_32_RSVD_AMAP {
12141+ u8 rsvd0[32]; /* DWORD 0 */
12142+ u8 rsvd1[895][32]; /* DWORD 1 */
12143+} __packed;
12144+struct PCICFG_ANON_32_RSVD_AMAP {
12145+ u32 dw[896];
12146+};
12147+
12148+/* This PCI configuration space register map is for the Networking Function of
12149+ * BladeEngine (Function 1).
12150+ */
12151+struct BE_PCICFG1_CSRMAP_AMAP {
12152+ struct BE_PCICFG_ID_CSR_AMAP id;
12153+ u8 rsvd0[32]; /* DWORD 1 */
12154+ u8 rsvd1[32]; /* DWORD 2 */
12155+ u8 rsvd2[32]; /* DWORD 3 */
12156+ struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
12157+ struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
12158+ struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
12159+ struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
12160+ struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
12161+ struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
12162+ u8 rsvd3[32]; /* DWORD 10 */
12163+ struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id;
12164+ u8 rsvd4[32]; /* DWORD 12 */
12165+ u8 rsvd5[32]; /* DWORD 13 */
12166+ u8 rsvd6[32]; /* DWORD 14 */
12167+ u8 rsvd7[32]; /* DWORD 15 */
12168+ struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
12169+ struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
12170+ u8 rsvd8[32]; /* DWORD 21 */
12171+ struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
12172+ u8 rsvd9[32]; /* DWORD 23 */
12173+ u8 rsvd10[32]; /* DWORD 24 */
12174+ u8 rsvd11[32]; /* DWORD 25 */
12175+ u8 rsvd12[32]; /* DWORD 26 */
12176+ u8 rsvd13[32]; /* DWORD 27 */
12177+ u8 rsvd14[2][32]; /* DWORD 28 */
12178+ u8 rsvd15[32]; /* DWORD 30 */
12179+ u8 rsvd16[32]; /* DWORD 31 */
12180+ u8 rsvd17[8][32]; /* DWORD 32 */
12181+ struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
12182+ struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
12183+ struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
12184+ struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
12185+ struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
12186+ struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
12187+ u8 rsvd18[32]; /* DWORD 46 */
12188+ u8 rsvd19[32]; /* DWORD 47 */
12189+ u8 rsvd20[32]; /* DWORD 48 */
12190+ u8 rsvd21[32]; /* DWORD 49 */
12191+ struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
12192+ u8 rsvd22[32]; /* DWORD 51 */
12193+ struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
12194+ struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
12195+ struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
12196+ struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
12197+ struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
12198+ struct BE_PCICFG_MSI_CSR_AMAP msi;
12199+ struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
12200+ struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
12201+ u8 rsvd23[64]; /* DWORD 60 */
12202+ u8 rsvd24[32]; /* DWORD 62 */
12203+ u8 rsvd25[32]; /* DWORD 63 */
12204+ u8 rsvd26[32]; /* DWORD 64 */
12205+ u8 rsvd27[32]; /* DWORD 65 */
12206+ u8 rsvd28[32]; /* DWORD 66 */
12207+ u8 rsvd29[32]; /* DWORD 67 */
12208+ u8 rsvd30[32]; /* DWORD 68 */
12209+ u8 rsvd31[32]; /* DWORD 69 */
12210+ u8 rsvd32[32]; /* DWORD 70 */
12211+ u8 rsvd33[32]; /* DWORD 71 */
12212+ u8 rsvd34[32]; /* DWORD 72 */
12213+ u8 rsvd35[32]; /* DWORD 73 */
12214+ u8 rsvd36[32]; /* DWORD 74 */
12215+ u8 rsvd37[128]; /* DWORD 75 */
12216+ u8 rsvd38[32]; /* DWORD 79 */
12217+ u8 rsvd39[1184]; /* DWORD 80 */
12218+ u8 rsvd40[61792]; /* DWORD 117 */
12219+ struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32];
12220+ u8 rsvd41[28672]; /* DWORD 2176 */
12221+ u8 rsvd42[32]; /* DWORD 3072 */
12222+ u8 rsvd43[1023][32]; /* DWORD 3073 */
12223+} __packed;
12224+struct PCICFG1_CSRMAP_AMAP {
12225+ u32 dw[4096];
12226+};
12227+
12228+#endif /* __pcicfg_amap_h__ */
12229--- /dev/null
12230+++ b/drivers/staging/benet/post_codes.h
12231@@ -0,0 +1,111 @@
12232+/*
12233+ * Copyright (C) 2005 - 2008 ServerEngines
12234+ * All rights reserved.
12235+ *
12236+ * This program is free software; you can redistribute it and/or
12237+ * modify it under the terms of the GNU General Public License version 2
12238+ * as published by the Free Software Foundation. The full GNU General
12239+ * Public License is included in this distribution in the file called COPYING.
12240+ *
12241+ * Contact Information:
12242+ * linux-drivers@serverengines.com
12243+ *
12244+ * ServerEngines
12245+ * 209 N. Fair Oaks Ave
12246+ * Sunnyvale, CA 94085
12247+ */
12248+/*
12249+ * Autogenerated by srcgen version: 0127
12250+ */
12251+#ifndef __post_codes_amap_h__
12252+#define __post_codes_amap_h__
12253+
12254+/* --- MGMT_HBA_POST_STAGE_ENUM --- */
12255+#define POST_STAGE_POWER_ON_RESET (0) /* State after a cold or warm boot. */
12256+#define POST_STAGE_AWAITING_HOST_RDY (1) /* ARM boot code awaiting a
12257+ go-ahed from the host. */
12258+#define POST_STAGE_HOST_RDY (2) /* Host has given go-ahed to ARM. */
12259+#define POST_STAGE_BE_RESET (3) /* Host wants to reset chip, this is a chip
12260+ workaround */
12261+#define POST_STAGE_SEEPROM_CS_START (256) /* SEEPROM checksum
12262+ test start. */
12263+#define POST_STAGE_SEEPROM_CS_DONE (257) /* SEEPROM checksum test
12264+ done. */
12265+#define POST_STAGE_DDR_CONFIG_START (512) /* DDR configuration start. */
12266+#define POST_STAGE_DDR_CONFIG_DONE (513) /* DDR configuration done. */
12267+#define POST_STAGE_DDR_CALIBRATE_START (768) /* DDR calibration start. */
12268+#define POST_STAGE_DDR_CALIBRATE_DONE (769) /* DDR calibration done. */
12269+#define POST_STAGE_DDR_TEST_START (1024) /* DDR memory test start. */
12270+#define POST_STAGE_DDR_TEST_DONE (1025) /* DDR memory test done. */
12271+#define POST_STAGE_REDBOOT_INIT_START (1536) /* Redboot starts execution. */
12272+#define POST_STAGE_REDBOOT_INIT_DONE (1537) /* Redboot done execution. */
12273+#define POST_STAGE_FW_IMAGE_LOAD_START (1792) /* Firmware image load to
12274+ DDR start. */
12275+#define POST_STAGE_FW_IMAGE_LOAD_DONE (1793) /* Firmware image load
12276+ to DDR done. */
12277+#define POST_STAGE_ARMFW_START (2048) /* ARMfw runtime code
12278+ starts execution. */
12279+#define POST_STAGE_DHCP_QUERY_START (2304) /* DHCP server query start. */
12280+#define POST_STAGE_DHCP_QUERY_DONE (2305) /* DHCP server query done. */
12281+#define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560) /* Boot Target
12282+ Discovery Start. */
12283+#define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561) /* Boot Target
12284+ Discovery Done. */
12285+#define POST_STAGE_RC_OPTION_SET (2816) /* Remote configuration
12286+ option is set in SEEPROM */
12287+#define POST_STAGE_SWITCH_LINK (2817) /* Wait for link up on switch */
12288+#define POST_STAGE_SEND_ICDS_MESSAGE (2818) /* Send the ICDS message
12289+ to switch */
12290+#define POST_STAGE_PERFROM_TFTP (2819) /* Download xml using TFTP */
12291+#define POST_STAGE_PARSE_XML (2820) /* Parse XML file */
12292+#define POST_STAGE_DOWNLOAD_IMAGE (2821) /* Download IMAGE from
12293+ TFTP server */
12294+#define POST_STAGE_FLASH_IMAGE (2822) /* Flash the IMAGE */
12295+#define POST_STAGE_RC_DONE (2823) /* Remote configuration
12296+ complete */
12297+#define POST_STAGE_REBOOT_SYSTEM (2824) /* Upgrade IMAGE done,
12298+ reboot required */
12299+#define POST_STAGE_MAC_ADDRESS (3072) /* MAC Address Check */
12300+#define POST_STAGE_ARMFW_READY (49152) /* ARMfw is done with POST
12301+ and ready. */
12302+#define POST_STAGE_ARMFW_UE (61440) /* ARMfw has asserted an
12303+ unrecoverable error. The
12304+ lower 3 hex digits of the
12305+ stage code identify the
12306+ unique error code.
12307+ */
12308+
12309+/* This structure defines the format of the MPU semaphore
12310+ * register when used for POST.
12311+ */
12312+struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP {
12313+ u8 stage[16]; /* DWORD 0 */
12314+ u8 rsvd0[10]; /* DWORD 0 */
12315+ u8 iscsi_driver_loaded; /* DWORD 0 */
12316+ u8 option_rom_installed; /* DWORD 0 */
12317+ u8 iscsi_ip_conflict; /* DWORD 0 */
12318+ u8 iscsi_no_ip; /* DWORD 0 */
12319+ u8 backup_fw; /* DWORD 0 */
12320+ u8 error; /* DWORD 0 */
12321+} __packed;
12322+struct MGMT_HBA_POST_STATUS_STRUCT_AMAP {
12323+ u32 dw[1];
12324+};
12325+
12326+/* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */
12327+#define POST_BIT_ISCSI_LOADED (26)
12328+#define POST_BIT_OPTROM_INST (27)
12329+#define POST_BIT_BAD_IP_ADDR (28)
12330+#define POST_BIT_NO_IP_ADDR (29)
12331+#define POST_BIT_BACKUP_FW (30)
12332+#define POST_BIT_ERROR (31)
12333+
12334+/* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */
12335+#define POST_ISCSI_DRIVER_LOADED (67108864)
12336+#define POST_OPTROM_INSTALLED (134217728)
12337+#define POST_ISCSI_IP_ADDRESS_CONFLICT (268435456)
12338+#define POST_ISCSI_NO_IP_ADDRESS (536870912)
12339+#define POST_BACKUP_FW_LOADED (1073741824)
12340+#define POST_FATAL_ERROR (2147483648)
12341+
12342+#endif /* __post_codes_amap_h__ */
12343--- /dev/null
12344+++ b/drivers/staging/benet/regmap.h
12345@@ -0,0 +1,68 @@
12346+/*
12347+ * Copyright (C) 2005 - 2008 ServerEngines
12348+ * All rights reserved.
12349+ *
12350+ * This program is free software; you can redistribute it and/or
12351+ * modify it under the terms of the GNU General Public License version 2
12352+ * as published by the Free Software Foundation. The full GNU General
12353+ * Public License is included in this distribution in the file called COPYING.
12354+ *
12355+ * Contact Information:
12356+ * linux-drivers@serverengines.com
12357+ *
12358+ * ServerEngines
12359+ * 209 N. Fair Oaks Ave
12360+ * Sunnyvale, CA 94085
12361+ */
12362+/*
12363+ * Autogenerated by srcgen version: 0127
12364+ */
12365+#ifndef __regmap_amap_h__
12366+#define __regmap_amap_h__
12367+#include "pcicfg.h"
12368+#include "ep.h"
12369+#include "cev.h"
12370+#include "mpu.h"
12371+#include "doorbells.h"
12372+
12373+/*
12374+ * This is the control and status register map for BladeEngine, showing
12375+ * the relative size and offset of each sub-module. The CSR registers
12376+ * are identical for the network and storage PCI functions. The
12377+ * CSR map is shown below, followed by details of each block,
12378+ * in sub-sections. The sub-sections begin with a description
12379+ * of CSRs that are instantiated in multiple blocks.
12380+ */
12381+struct BE_BLADE_ENGINE_CSRMAP_AMAP {
12382+ struct BE_MPU_CSRMAP_AMAP mpu;
12383+ u8 rsvd0[8192]; /* DWORD 256 */
12384+ u8 rsvd1[8192]; /* DWORD 512 */
12385+ struct BE_CEV_CSRMAP_AMAP cev;
12386+ u8 rsvd2[8192]; /* DWORD 1024 */
12387+ u8 rsvd3[8192]; /* DWORD 1280 */
12388+ u8 rsvd4[8192]; /* DWORD 1536 */
12389+ u8 rsvd5[8192]; /* DWORD 1792 */
12390+ u8 rsvd6[8192]; /* DWORD 2048 */
12391+ u8 rsvd7[8192]; /* DWORD 2304 */
12392+ u8 rsvd8[8192]; /* DWORD 2560 */
12393+ u8 rsvd9[8192]; /* DWORD 2816 */
12394+ u8 rsvd10[8192]; /* DWORD 3072 */
12395+ u8 rsvd11[8192]; /* DWORD 3328 */
12396+ u8 rsvd12[8192]; /* DWORD 3584 */
12397+ u8 rsvd13[8192]; /* DWORD 3840 */
12398+ u8 rsvd14[8192]; /* DWORD 4096 */
12399+ u8 rsvd15[8192]; /* DWORD 4352 */
12400+ u8 rsvd16[8192]; /* DWORD 4608 */
12401+ u8 rsvd17[8192]; /* DWORD 4864 */
12402+ u8 rsvd18[8192]; /* DWORD 5120 */
12403+ u8 rsvd19[8192]; /* DWORD 5376 */
12404+ u8 rsvd20[8192]; /* DWORD 5632 */
12405+ u8 rsvd21[8192]; /* DWORD 5888 */
12406+ u8 rsvd22[8192]; /* DWORD 6144 */
12407+ u8 rsvd23[17152][32]; /* DWORD 6400 */
12408+} __packed;
12409+struct BLADE_ENGINE_CSRMAP_AMAP {
12410+ u32 dw[23552];
12411+};
12412+
12413+#endif /* __regmap_amap_h__ */
12414--- /dev/null
12415+++ b/drivers/staging/benet/TODO
12416@@ -0,0 +1,7 @@
12417+TODO:
12418+ - fix minor checkpatch.pl issues
12419+ - remove wrappers around common iowrite functions
12420+ - full netdev audit of common problems/issues
12421+
12422+Please send all patches and questions to Subbu Seetharaman
12423+<subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com>
12424--- a/drivers/staging/Kconfig
12425+++ b/drivers/staging/Kconfig
12426@@ -49,4 +49,6 @@ source "drivers/staging/otus/Kconfig"
12427
12428 source "drivers/staging/rt2860/Kconfig"
12429
12430+source "drivers/staging/benet/Kconfig"
12431+
12432 endif # STAGING
12433--- a/drivers/staging/Makefile
12434+++ b/drivers/staging/Makefile
12435@@ -16,3 +16,4 @@ obj-$(CONFIG_USB_ATMEL) += at76_usb/
12436 obj-$(CONFIG_AGNX) += agnx/
12437 obj-$(CONFIG_OTUS) += otus/
12438 obj-$(CONFIG_RT2860) += rt2860/
12439+obj-$(CONFIG_BENET) += benet/