* SOFTWARE.
*
*/
-#include <stdio.h>
#include <strings.h>
#include <errno.h>
#include <gpxe/malloc.h>
/*
- mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN
+ mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN
*/
-/* (mcb30) - The Mellanox driver used "1" as a universal error code;
- * this at least makes it a valid error number.
- */
-#define MTNIC_ERROR -EIO
-
-
-/** Set port number to use
- *
- * 0 - port 1
- * 1 - port 2
- */
-#define MTNIC_PORT_NUM 0
-/* Note: for verbose printing do Make ... DEBUG=mtnic */
-
-
-
/********************************************************************
*
{
*va = alloc_memblock(size, alignment);
if (!*va) {
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
*pa = (u32)virt_to_bus(*va);
return 0;
*
*/
static int
-mtnic_alloc_cmdif(struct mtnic_priv *priv)
+mtnic_alloc_cmdif(struct mtnic *mtnic)
{
- u32 bar = mtnic_pci_dev.dev.bar[0];
+ u32 bar = mtnic_pci_dev.dev.bar[0];
- priv->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE);
- if (!priv->hcr) {
- DBG("Couldn't map command register.");
- return MTNIC_ERROR;
+ mtnic->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE);
+ if ( !mtnic->hcr ) {
+ DBG("Couldn't map command register\n");
+ return -EADDRINUSE;
}
- mtnic_alloc_aligned(PAGE_SIZE, (void *)&priv->cmd.buf, &priv->cmd.mapping, PAGE_SIZE);
- if (!priv->cmd.buf) {
+ mtnic_alloc_aligned(PAGE_SIZE, (void *)&mtnic->cmd.buf, &mtnic->cmd.mapping, PAGE_SIZE);
+ if ( !mtnic->cmd.buf ) {
DBG("Error in allocating buffer for command interface\n");
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
- return 0;
+ return 0;
}
/**
static void
mtnic_free_io_buffers(struct mtnic_ring *ring)
{
- int index;
+ int index;
for (; ring->cons <= ring->prod; ++ring->cons) {
index = ring->cons & ring->size_mask;
- if (ring->iobuf[index])
+ if ( ring->iobuf[index] ) {
free_iob(ring->iobuf[index]);
+ }
}
}
*
*/
static int
-mtnic_alloc_iobuf(struct mtnic_priv *priv, struct mtnic_ring *ring,
+mtnic_alloc_iobuf(struct mtnic_port *priv, struct mtnic_ring *ring,
unsigned int size)
{
struct mtnic_rx_desc *rx_desc_ptr = ring->buf;
while ((u32)(ring->prod - ring->cons) < UNITS_BUFFER_SIZE) {
index = ring->prod & ring->size_mask;
ring->iobuf[index] = alloc_iob(size);
- if (!&ring->iobuf[index]) {
+ if (!ring->iobuf[index]) {
if (ring->prod <= (ring->cons + 1)) {
- DBG("Error allocating Rx io "
- "buffer number %x", index);
- /* In case of error freeing io buffer */
- mtnic_free_io_buffers(ring);
- return MTNIC_ERROR;
+ DBG ( "Dropping packet, buffer is full\n" );
}
-
break;
}
/* Attach io_buffer to descriptor */
rx_desc_ptr = ring->buf +
- (sizeof(struct mtnic_rx_desc) * index);
+ (sizeof(struct mtnic_rx_desc) * index);
rx_desc_ptr->data.count = cpu_to_be32(size);
- rx_desc_ptr->data.mem_type = priv->fw.mem_type_snoop_be;
+ rx_desc_ptr->data.mem_type = priv->mtnic->fw.mem_type_snoop_be;
rx_desc_ptr->data.addr_l = cpu_to_be32(
- virt_to_bus(ring->iobuf[index]->data));
+ virt_to_bus(ring->iobuf[index]->data));
++ ring->prod;
}
*
*/
static int
-mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring,
- u32 size, u16 stride, u16 cq, u8 is_rx)
+mtnic_alloc_ring(struct mtnic_port *priv, struct mtnic_ring *ring,
+ u32 size, u16 stride, u16 cq, u8 is_rx)
{
unsigned int i;
int err;
/* Alloc descriptors buffer */
ring->buf_size = ring->size * ((is_rx) ? sizeof(struct mtnic_rx_desc) :
- sizeof(struct mtnic_tx_desc));
+ sizeof(struct mtnic_tx_desc));
err = mtnic_alloc_aligned(ring->buf_size, (void *)&ring->buf,
- &ring->dma, PAGE_SIZE);
- if (err) {
+ &ring->dma, PAGE_SIZE);
+ if (err) {
DBG("Failed allocating descriptor ring sizeof %x\n",
ring->buf_size);
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
- memset(ring->buf, 0, ring->buf_size);
+ memset(ring->buf, 0, ring->buf_size);
DBG("Allocated %s ring (addr:%p) - buf:%p size:%x"
"buf_size:%x dma:%lx\n",
if (is_rx) { /* RX ring */
/* Alloc doorbell */
err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
- (void *)&ring->db, &ring->db_dma, 32);
+ (void *)&ring->db, &ring->db_dma, 32);
if (err) {
DBG("Failed allocating Rx ring doorbell record\n");
- free(ring->buf);
- return MTNIC_ERROR;
+ free_memblock(ring->buf, ring->buf_size);
+ return -EADDRINUSE;
}
/* ==- Configure Descriptor -== */
/*The last ctrl descriptor is '0' and points to the first one*/
/* Alloc IO_BUFFERS */
- err = mtnic_alloc_iobuf(priv, ring, DEF_IOBUF_SIZE);
+ err = mtnic_alloc_iobuf ( priv, ring, DEF_IOBUF_SIZE );
if (err) {
- DBG("ERROR Allocating io buffer");
- free(ring->buf);
- return MTNIC_ERROR;
+ DBG("ERROR Allocating io buffer\n");
+ free_memblock(ring->buf, ring->buf_size);
+ return -EADDRINUSE;
}
- } else { /* TX ring */
+ } else { /* TX ring */
/* Set initial ownership of all Tx Desc' to SW (1) */
for (i = 0; i < ring->size; i++) {
tx_desc = ring->buf + ring->stride * i;
}
/* DB */
ring->db_offset = cpu_to_be32(
- ((u32) priv->fw.tx_offset[priv->port]) << 8);
+ ((u32) priv->mtnic->fw.tx_offset[priv->port]) << 8);
/* Map Tx+CQ doorbells */
DBG("Mapping TxCQ doorbell at offset:0x%x\n",
- priv->fw.txcq_db_offset);
+ priv->mtnic->fw.txcq_db_offset);
ring->txcq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
- priv->fw.txcq_db_offset, PAGE_SIZE);
+ priv->mtnic->fw.txcq_db_offset, PAGE_SIZE);
if (!ring->txcq_db) {
DBG("Couldn't map txcq doorbell, aborting...\n");
- free(ring->buf);
- return MTNIC_ERROR;
+ free_memblock(ring->buf, ring->buf_size);
+ return -EADDRINUSE;
}
}
*/
static int
mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq,
- u8 is_rx, u32 size, u32 offset_ind)
+ u8 is_rx, u32 size, u32 offset_ind)
{
int err ;
unsigned int i;
/* Alloc doorbell */
err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
- (void *)&cq->db, &cq->db_dma, 32);
+ (void *)&cq->db, &cq->db_dma, 32);
if (err) {
DBG("Failed allocating CQ doorbell record\n");
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
memset(cq->db, 0, sizeof(struct mtnic_cq_db_record));
/* Alloc CQEs buffer */
cq->buf_size = size * sizeof(struct mtnic_cqe);
err = mtnic_alloc_aligned(cq->buf_size,
- (void *)&cq->buf, &cq->dma, PAGE_SIZE);
+ (void *)&cq->buf, &cq->dma, PAGE_SIZE);
if (err) {
DBG("Failed allocating CQ buffer\n");
- free(cq->db);
- return MTNIC_ERROR;
+ free_memblock(cq->db, sizeof(struct mtnic_cq_db_record));
+ return -EADDRINUSE;
}
- memset(cq->buf, 0, cq->buf_size);
- DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x "
+ memset(cq->buf, 0, cq->buf_size);
+ DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x "
"dma:%lx db:%p db_dma:%lx\n"
"cqn offset:%x \n", cq, cq->size, cq->buf,
cq->buf_size, cq->dma, cq->db,
unsigned int
mtnic_alloc_resources(struct net_device *dev)
{
- struct mtnic_priv *priv = netdev_priv(dev);
- int err;
+ struct mtnic_port *priv = netdev_priv(dev);
+ int err;
int cq_ind = 0;
- int cq_offset = priv->fw.cq_offset;
+ int cq_offset = priv->mtnic->fw.cq_offset;
/* Alloc 1st CQ */
- err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */,
+ err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */,
UNITS_BUFFER_SIZE, cq_offset + cq_ind);
if (err) {
DBG("Failed allocating Rx CQ\n");
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
+
/* Alloc RX */
err = mtnic_alloc_ring(priv, &priv->rx_ring, UNITS_BUFFER_SIZE,
sizeof(struct mtnic_rx_desc), cq_ind, /* RX */1);
goto cq0_error;
}
- ++cq_ind;
+
+ ++cq_ind;
/* alloc 2nd CQ */
err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 0 /* TX */,
return 0;
cq1_error:
- free(priv->cq[1].buf);
- free(priv->cq[1].db);
+ free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
+ free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
+
rx_error:
- free(priv->rx_ring.buf);
- free(priv->rx_ring.db);
+ free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
+ free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
mtnic_free_io_buffers(&priv->rx_ring);
cq0_error:
- free(priv->cq[0].buf);
- free(priv->cq[0].db);
+ free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
+ free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
* Note: EQ is not used by the driver but must be allocated
*/
static int
-mtnic_alloc_eq(struct mtnic_priv *priv)
+mtnic_alloc_eq(struct mtnic *mtnic)
{
int err;
unsigned int i;
struct mtnic_eqe *eqe_desc = NULL;
/* Allocating doorbell */
- priv->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
- priv->fw.eq_db_offset, sizeof(u32));
- if (!priv->eq_db) {
+ mtnic->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
+ mtnic->fw.eq_db_offset, sizeof(u32));
+ if (!mtnic->eq_db) {
DBG("Couldn't map EQ doorbell, aborting...\n");
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
/* Allocating buffer */
- priv->eq.size = NUM_EQES;
- priv->eq.buf_size = priv->eq.size * sizeof(struct mtnic_eqe);
- err = mtnic_alloc_aligned(priv->eq.buf_size, (void *)&priv->eq.buf,
- &priv->eq.dma, PAGE_SIZE);
+ mtnic->eq.size = NUM_EQES;
+ mtnic->eq.buf_size = mtnic->eq.size * sizeof(struct mtnic_eqe);
+ err = mtnic_alloc_aligned(mtnic->eq.buf_size, (void *)&mtnic->eq.buf,
+ &mtnic->eq.dma, PAGE_SIZE);
if (err) {
DBG("Failed allocating EQ buffer\n");
- iounmap(priv->eq_db);
- return MTNIC_ERROR;
+ iounmap(mtnic->eq_db);
+ return -EADDRINUSE;
}
- memset(priv->eq.buf, 0, priv->eq.buf_size);
+ memset(mtnic->eq.buf, 0, mtnic->eq.buf_size);
- for (i = 0; i < priv->eq.size; i++)
- eqe_desc = priv->eq.buf + (sizeof(struct mtnic_eqe) * i);
- eqe_desc->own |= MTNIC_BIT_EQE_OWN;
+ for (i = 0; i < mtnic->eq.size; i++)
+ eqe_desc = mtnic->eq.buf + (sizeof(struct mtnic_eqe) * i);
+ eqe_desc->own |= MTNIC_BIT_EQE_OWN;
mdelay(20);
return 0;
*
*********************************************************************/
static inline int
-cmdif_go_bit(struct mtnic_priv *priv)
+cmdif_go_bit(struct mtnic *mtnic)
{
- struct mtnic_if_cmd_reg *hcr = priv->hcr;
+ struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
u32 status;
int i;
for (i = 0; i < TBIT_RETRIES; i++) {
status = be32_to_cpu(readl(&hcr->status_go_opcode));
if ((status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT)) ==
- (priv->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) {
+ (mtnic->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) {
/* Read expected t-bit - now return go-bit value */
return status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT);
}
}
DBG("Invalid tbit after %d retries!\n", TBIT_RETRIES);
- return 1; /* Return busy... */
+ return -EBUSY; /* Return busy... */
}
/* Base Command interface */
static int
-mtnic_cmd(struct mtnic_priv *priv, void *in_imm,
+mtnic_cmd(struct mtnic *mtnic, void *in_imm,
void *out_imm, u32 in_modifier, u16 op)
{
- struct mtnic_if_cmd_reg *hcr = priv->hcr;
+ struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
int err = 0;
u32 out_param_h = 0;
u32 out_param_l = 0;
token++;
- if (cmdif_go_bit(priv)) {
+ if ( cmdif_go_bit ( mtnic ) ) {
DBG("GO BIT BUSY:%p.\n", hcr + 6);
- err = MTNIC_ERROR;
+ err = -EBUSY;
goto out;
}
if (in_imm) {
in_param_h = *((u32*)in_imm);
in_param_l = *((u32*)in_imm + 1);
} else {
- in_param_l = cpu_to_be32(priv->cmd.mapping);
+ in_param_l = cpu_to_be32(mtnic->cmd.mapping);
}
- out_param_l = cpu_to_be32(priv->cmd.mapping);
+ out_param_l = cpu_to_be32(mtnic->cmd.mapping);
/* writing to MCR */
- writel(in_param_h, &hcr->in_param_h);
- writel(in_param_l, &hcr->in_param_l);
- writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier);
- writel(out_param_h, &hcr->out_param_h);
- writel(out_param_l, &hcr->out_param_l);
- writel((u32)cpu_to_be32(token << 16), &hcr->token);
+ writel(in_param_h, &hcr->in_param_h);
+ writel(in_param_l, &hcr->in_param_l);
+ writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier);
+ writel(out_param_h, &hcr->out_param_h);
+ writel(out_param_l, &hcr->out_param_l);
+ writel((u32)cpu_to_be32(token << 16), &hcr->token);
wmb();
/* flip toggle bit before each write to the HCR */
- priv->cmd.tbit = !priv->cmd.tbit;
- writel((u32)
+ mtnic->cmd.tbit = !mtnic->cmd.tbit;
+ writel( ( u32 )
cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT) |
- (priv->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT)) | op),
+ ( mtnic->cmd.tbit << MTNIC_BC_OFF ( MTNIC_MASK_CMD_REG_T_BIT ) ) | op ),
&hcr->status_go_opcode);
- while (cmdif_go_bit(priv) && (timeout <= GO_BIT_TIMEOUT)) {
- mdelay(1);
+ while ( cmdif_go_bit ( mtnic ) && ( timeout <= GO_BIT_TIMEOUT ) ) {
+ mdelay ( 1 );
++timeout;
}
- if (cmdif_go_bit(priv)) {
+ if ( cmdif_go_bit ( mtnic ) ) {
DBG("Command opcode:0x%x token:0x%x TIMEOUT.\n", op, token);
- err = MTNIC_ERROR;
+ err = -EBUSY;
goto out;
}
}
status = be32_to_cpu((u32)readl(&hcr->status_go_opcode)) >> 24;
- /*DBG("Command opcode:0x%x token:0x%x returned:0x%lx\n",
- op, token, status);*/
if (status) {
+ DBG("Command opcode:0x%x token:0x%x returned:0x%x\n",
+ op, token, status);
return status;
}
/* MAP PAGES wrapper */
static int
-mtnic_map_cmd(struct mtnic_priv *priv, u16 op, struct mtnic_pages pages)
+mtnic_map_cmd(struct mtnic *mtnic, u16 op, struct mtnic_pages pages)
{
- unsigned int j;
+ unsigned int j;
u32 addr;
unsigned int len;
- u32 *page_arr = priv->cmd.buf;
+ u32 *page_arr = mtnic->cmd.buf;
int nent = 0;
int err = 0;
if (addr & (PAGE_MASK)) {
DBG("Got FW area not aligned to %d (%llx/%x)\n",
PAGE_SIZE, (u64) addr, len);
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
/* Function maps each PAGE seperately */
for (j = 0; j < len; j+= PAGE_SIZE) {
page_arr[nent * 4 + 3] = cpu_to_be32(addr + j);
if (++nent == MTNIC_MAILBOX_SIZE / 16) {
- err = mtnic_cmd(priv, NULL, NULL, nent, op);
+ err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
if (err)
- return MTNIC_ERROR;
- nent = 0;
+ return -EIO;
+ nent = 0;
}
}
- if (nent)
- err = mtnic_cmd(priv, NULL, NULL, nent, op);
-
+ if (nent) {
+ err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
+ }
return err;
}
* Query FW
*/
static int
-mtnic_QUERY_FW(struct mtnic_priv *priv)
+mtnic_QUERY_FW ( struct mtnic *mtnic )
{
int err;
- struct mtnic_if_query_fw_out_mbox *cmd = priv->cmd.buf;
+ struct mtnic_if_query_fw_out_mbox *cmd = mtnic->cmd.buf;
- err = mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW);
+ err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW);
if (err)
- return MTNIC_ERROR;
+ return -EIO;
/* Get FW and interface versions */
- priv->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) |
- ((u64) be16_to_cpu(cmd->rev_min) << 16) |
- (u64) be16_to_cpu(cmd->rev_smin);
- priv->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev);
+ mtnic->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) |
+ ((u64) be16_to_cpu(cmd->rev_min) << 16) |
+ (u64) be16_to_cpu(cmd->rev_smin);
+ mtnic->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev);
/* Get offset for internal error reports (debug) */
- priv->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start);
- priv->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size);
+ mtnic->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start);
+ mtnic->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size);
- DBG("Error buf offset is %llx\n", priv->fw.err_buf.offset);
+ DBG("Error buf offset is %llx\n", mtnic->fw.err_buf.offset);
/* Get number of required FW (4k) pages */
- priv->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages);
+ mtnic->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages);
return 0;
}
static int
-mtnic_OPEN_NIC(struct mtnic_priv *priv)
+mtnic_OPEN_NIC(struct mtnic *mtnic)
{
-
- struct mtnic_if_open_nic_in_mbox *open_nic = priv->cmd.buf;
+ struct mtnic_if_open_nic_in_mbox *open_nic = mtnic->cmd.buf;
u32 extra_pages[2] = {0};
int err;
memset(open_nic, 0, sizeof *open_nic);
- /* port 1 */
+ /* port 1 */
open_nic->log_rx_p1 = 0;
open_nic->log_cq_p1 = 1;
open_nic->steer_p2 = MTNIC_IF_STEER_RSS;
/* MAC + VLAN - leave reserved */
- err = mtnic_cmd(priv, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC);
- priv->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1));
- DBG("Extra pages num is %x\n", priv->fw.extra_pages.num);
+ err = mtnic_cmd(mtnic, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC);
+
+ mtnic->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1));
+ DBG("Extra pages num is %x\n", mtnic->fw.extra_pages.num);
return err;
}
static int
-mtnic_CONFIG_RX(struct mtnic_priv *priv)
+mtnic_CONFIG_RX(struct mtnic *mtnic)
{
struct mtnic_if_config_rx_in_imm config_rx;
memset(&config_rx, 0, sizeof config_rx);
- return mtnic_cmd(priv, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX);
+ return mtnic_cmd(mtnic, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX);
}
static int
-mtnic_CONFIG_TX(struct mtnic_priv *priv)
+mtnic_CONFIG_TX(struct mtnic *mtnic)
{
struct mtnic_if_config_send_in_imm config_tx;
config_tx.enph_gpf = 0;
- return mtnic_cmd(priv, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX);
+ return mtnic_cmd(mtnic, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX);
}
static int
-mtnic_HEART_BEAT(struct mtnic_priv *priv, u32 *link_state)
+mtnic_HEART_BEAT(struct mtnic_port *priv, u32 *link_state)
{
struct mtnic_if_heart_beat_out_imm heart_beat;
int err;
u32 flags;
- err = mtnic_cmd(priv, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT);
+ err = mtnic_cmd(priv->mtnic, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT);
if (!err) {
flags = be32_to_cpu(heart_beat.flags);
if (flags & MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)) {
DBG("Internal error detected\n");
- return MTNIC_ERROR;
+ return -EIO;
}
*link_state = flags &
- ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR));
+ ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR));
}
return err;
}
*/
static int
-mtnic_SET_PORT_DEFAULT_RING(struct mtnic_priv *priv, u8 port, u16 ring)
+mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port *priv, u8 port, u16 ring)
{
struct mtnic_if_set_port_default_ring_in_imm def_ring;
memset(&def_ring, 0, sizeof(def_ring));
def_ring.ring = ring;
- return mtnic_cmd(priv, &def_ring, NULL, port + 1,
+ return mtnic_cmd(priv->mtnic, &def_ring, NULL, port + 1,
MTNIC_IF_CMD_SET_PORT_DEFAULT_RING);
}
static int
-mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_priv *priv, int port)
+mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port *priv, int port)
{
- memset(priv->cmd.buf, 0, PAGE_SIZE);
- return mtnic_cmd(priv, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER);
+ memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
+ return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
+ MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER);
}
static int
-mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_priv *priv, int port)
+mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port *priv, int port)
{
- memset(priv->cmd.buf, 0, PAGE_SIZE);
- return mtnic_cmd(priv, NULL, NULL, port + 1,
- MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION);
+ memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
+ return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
+ MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION);
}
* Config commands
*/
static int
-mtnic_CONFIG_CQ(struct mtnic_priv *priv, int port,
- u16 cq_ind, struct mtnic_cq *cq)
+mtnic_CONFIG_CQ(struct mtnic_port *priv, int port,
+ u16 cq_ind, struct mtnic_cq *cq)
{
- struct mtnic_if_config_cq_in_mbox *config_cq = priv->cmd.buf;
+ struct mtnic_if_config_cq_in_mbox *config_cq = priv->mtnic->cmd.buf;
memset(config_cq, 0, sizeof *config_cq);
config_cq->cq = cq_ind;
config_cq->size = fls(UNITS_BUFFER_SIZE - 1);
config_cq->offset = ((cq->dma) & (PAGE_MASK)) >> 6;
config_cq->db_record_addr_l = cpu_to_be32(cq->db_dma);
- config_cq->page_address[1] = cpu_to_be32(cq->dma);
+ config_cq->page_address[1] = cpu_to_be32(cq->dma);
DBG("config cq address: %x dma_address: %lx"
- "offset: %d size %d index: %d "
+ "offset: %d size %d index: %d\n"
, config_cq->page_address[1],cq->dma,
config_cq->offset, config_cq->size, config_cq->cq );
- return mtnic_cmd(priv, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_CQ);
+ return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
+ MTNIC_IF_CMD_CONFIG_CQ);
}
static int
-mtnic_CONFIG_TX_RING(struct mtnic_priv *priv, u8 port,
- u16 ring_ind, struct mtnic_ring *ring)
+mtnic_CONFIG_TX_RING(struct mtnic_port *priv, u8 port,
+ u16 ring_ind, struct mtnic_ring *ring)
{
- struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->cmd.buf;
+ struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->mtnic->cmd.buf;
memset(config_tx_ring, 0, sizeof *config_tx_ring);
config_tx_ring->ring = cpu_to_be16(ring_ind);
config_tx_ring->size = fls(UNITS_BUFFER_SIZE - 1);
config_tx_ring->cq = cpu_to_be16(ring->cq);
config_tx_ring->page_address[1] = cpu_to_be32(ring->dma);
- return mtnic_cmd(priv, NULL, NULL, port + 1,
+ return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
MTNIC_IF_CMD_CONFIG_TX_RING);
}
static int
-mtnic_CONFIG_RX_RING(struct mtnic_priv *priv, u8 port,
- u16 ring_ind, struct mtnic_ring *ring)
+mtnic_CONFIG_RX_RING(struct mtnic_port *priv, u8 port,
+ u16 ring_ind, struct mtnic_ring *ring)
{
- struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->cmd.buf;
+ struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->mtnic->cmd.buf;
memset(config_rx_ring, 0, sizeof *config_rx_ring);
config_rx_ring->ring = ring_ind;
- MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1),
- MTNIC_MASK_CONFIG_RX_RING_SIZE);
- MTNIC_BC_PUT(config_rx_ring->stride_size, 1,
- MTNIC_MASK_CONFIG_RX_RING_STRIDE);
+ MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1),
+ MTNIC_MASK_CONFIG_RX_RING_SIZE);
+ MTNIC_BC_PUT(config_rx_ring->stride_size, 1,
+ MTNIC_MASK_CONFIG_RX_RING_STRIDE);
config_rx_ring->cq = cpu_to_be16(ring->cq);
config_rx_ring->db_record_addr_l = cpu_to_be32(ring->db_dma);
- DBG("Config RX ring starting at address:%lx\n", ring->dma);
+ DBG("Config RX ring starting at address:%lx\n", ring->dma);
config_rx_ring->page_address[1] = cpu_to_be32(ring->dma);
- return mtnic_cmd(priv, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_RX_RING);
+ return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
+ MTNIC_IF_CMD_CONFIG_RX_RING);
}
static int
-mtnic_CONFIG_EQ(struct mtnic_priv *priv)
+mtnic_CONFIG_EQ(struct mtnic *mtnic)
{
- struct mtnic_if_config_eq_in_mbox *eq = priv->cmd.buf;
+ struct mtnic_if_config_eq_in_mbox *eq = mtnic->cmd.buf;
- if (priv->eq.dma & (PAGE_MASK)) {
+ if (mtnic->eq.dma & (PAGE_MASK)) {
DBG("misalligned eq buffer:%lx\n",
- priv->eq.dma);
- return MTNIC_ERROR;
- }
+ mtnic->eq.dma);
+ return -EADDRINUSE;
+ }
- memset(eq, 0, sizeof *eq);
- MTNIC_BC_PUT(eq->offset, priv->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET);
- MTNIC_BC_PUT(eq->size, fls(priv->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE);
+ memset(eq, 0, sizeof *eq);
+ MTNIC_BC_PUT(eq->offset, mtnic->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET);
+ MTNIC_BC_PUT(eq->size, fls(mtnic->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE);
MTNIC_BC_PUT(eq->int_vector, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC);
- eq->page_address[1] = cpu_to_be32(priv->eq.dma);
+ eq->page_address[1] = cpu_to_be32(mtnic->eq.dma);
- return mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ);
+ return mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ);
}
static int
-mtnic_SET_RX_RING_ADDR(struct mtnic_priv *priv, u8 port, u64* mac)
+mtnic_SET_RX_RING_ADDR(struct mtnic_port *priv, u8 port, u64* mac)
{
struct mtnic_if_set_rx_ring_addr_in_imm ring_addr;
u32 modifier = ((u32) port + 1) << 16;
ring_addr.mac_31_0 = cpu_to_be32(*mac & 0xffffffff);
ring_addr.mac_47_32 = cpu_to_be16((*mac >> 32) & 0xffff);
ring_addr.flags_vlan_id |= cpu_to_be16(
- MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC));
+ MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC));
- return mtnic_cmd(priv, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR);
+ return mtnic_cmd(priv->mtnic, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR);
}
static int
-mtnic_SET_PORT_STATE(struct mtnic_priv *priv, u8 port, u8 state)
+mtnic_SET_PORT_STATE(struct mtnic_port *priv, u8 port, u8 state)
{
struct mtnic_if_set_port_state_in_imm port_state;
port_state.state = state ? cpu_to_be32(
- MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0;
+ MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0;
port_state.reserved = 0;
- return mtnic_cmd(priv, &port_state, NULL, port + 1,
+ return mtnic_cmd(priv->mtnic, &port_state, NULL, port + 1,
MTNIC_IF_CMD_SET_PORT_STATE);
}
static int
-mtnic_SET_PORT_MTU(struct mtnic_priv *priv, u8 port, u16 mtu)
+mtnic_SET_PORT_MTU(struct mtnic_port *priv, u8 port, u16 mtu)
{
struct mtnic_if_set_port_mtu_in_imm set_mtu;
memset(&set_mtu, 0, sizeof(set_mtu));
set_mtu.mtu = cpu_to_be16(mtu);
- return mtnic_cmd(priv, &set_mtu, NULL, port + 1,
- MTNIC_IF_CMD_SET_PORT_MTU);
+ return mtnic_cmd(priv->mtnic, &set_mtu, NULL, port + 1,
+ MTNIC_IF_CMD_SET_PORT_MTU);
}
-
+/*
static int
-mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_priv *priv, int port)
+mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_port *priv, int port)
{
- struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->cmd.buf;
+ struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->mtnic->cmd.buf;
- /* When no vlans are configured we disable the filter
- * (i.e., pass all vlans) because we ignore them anyhow */
+ // When no vlans are configured we disable the filter
+ // (i.e., pass all vlans) because we ignore them anyhow
memset(vlan_filter, 0xff, sizeof(*vlan_filter));
- return mtnic_cmd(priv, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
+ return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
+ MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
}
+*/
static int
-mtnic_RELEASE_RESOURCE(struct mtnic_priv *priv, u8 port, u8 type, u8 index)
+mtnic_RELEASE_RESOURCE(struct mtnic_port *priv, u8 port, u8 type, u8 index)
{
struct mtnic_if_release_resource_in_imm rel;
memset(&rel, 0, sizeof rel);
rel.index = index;
rel.type = type;
- return mtnic_cmd(priv,
- &rel, NULL, (type == MTNIC_IF_RESOURCE_TYPE_EQ) ?
- 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE);
+ return mtnic_cmd ( priv->mtnic,
+ &rel, NULL, ( type == MTNIC_IF_RESOURCE_TYPE_EQ ) ?
+ 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE );
}
static int
-mtnic_QUERY_CAP(struct mtnic_priv *priv, u8 index, u8 mod, u64 *result)
+mtnic_QUERY_CAP(struct mtnic *mtnic, u8 index, u8 mod, u64 *result)
{
struct mtnic_if_query_cap_in_imm cap;
u32 out_imm[2];
memset(&cap, 0, sizeof cap);
cap.cap_index = index;
cap.cap_modifier = mod;
- err = mtnic_cmd(priv, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP);
+ err = mtnic_cmd(mtnic, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP);
*((u32*)result) = be32_to_cpu(*(out_imm+1));
*((u32*)result + 1) = be32_to_cpu(*out_imm);
#define DO_QUERY_CAP(cap, mod, var) \
- err = mtnic_QUERY_CAP(priv, cap, mod, &result); \
+ err = mtnic_QUERY_CAP(mtnic, cap, mod, &result);\
if (err) \
return err; \
(var) = result
static int
-mtnic_query_cap(struct mtnic_priv *priv)
+mtnic_query_num_ports(struct mtnic *mtnic)
+{
+ int err = 0;
+ u64 result;
+
+ DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, mtnic->fw.num_ports);
+
+ return 0;
+}
+
+static int
+mtnic_query_mac(struct mtnic *mtnic)
{
int err = 0;
int i;
- u64 result;
+ u64 result;
- DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, priv->fw.num_ports);
- for (i = 0; i < priv->fw.num_ports; i++) {
- DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, priv->fw.mac[i]);
+ for (i = 0; i < mtnic->fw.num_ports; i++) {
+ DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, mtnic->fw.mac[i]);
}
return 0;
}
static int
-mtnic_query_offsets(struct mtnic_priv *priv)
+mtnic_query_offsets(struct mtnic *mtnic)
{
int err;
int i;
DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY,
MTNIC_IF_MEM_TYPE_SNOOP,
- priv->fw.mem_type_snoop_be);
- priv->fw.mem_type_snoop_be = cpu_to_be32(priv->fw.mem_type_snoop_be);
- DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, priv->fw.txcq_db_offset);
- DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, priv->fw.eq_db_offset);
+ mtnic->fw.mem_type_snoop_be);
+ mtnic->fw.mem_type_snoop_be = cpu_to_be32(mtnic->fw.mem_type_snoop_be);
+ DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, mtnic->fw.txcq_db_offset);
+ DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, mtnic->fw.eq_db_offset);
- for (i = 0; i < priv->fw.num_ports; i++) {
- DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, priv->fw.cq_offset);
- DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, priv->fw.tx_offset[i]);
- DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, priv->fw.rx_offset[i]);
- DBG("--> Port %d CQ offset:0x%x\n", i, priv->fw.cq_offset);
- DBG("--> Port %d Tx offset:0x%x\n", i, priv->fw.tx_offset[i]);
- DBG("--> Port %d Rx offset:0x%x\n", i, priv->fw.rx_offset[i]);
+ for (i = 0; i < mtnic->fw.num_ports; i++) {
+ DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, mtnic->fw.cq_offset);
+ DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, mtnic->fw.tx_offset[i]);
+ DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, mtnic->fw.rx_offset[i]);
+ DBG("--> Port %d CQ offset:0x%x\n", i, mtnic->fw.cq_offset);
+ DBG("--> Port %d Tx offset:0x%x\n", i, mtnic->fw.tx_offset[i]);
+ DBG("--> Port %d Rx offset:0x%x\n", i, mtnic->fw.rx_offset[i]);
}
mdelay(20);
* Reset device
*/
void
-mtnic_reset(void)
+mtnic_reset ( void )
{
- void *reset = ioremap(mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET, 4);
- writel(cpu_to_be32(1), reset);
- iounmap(reset);
+ void *reset = ioremap ( mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET,
+ 4 );
+ writel ( cpu_to_be32 ( 1 ), reset );
+ iounmap ( reset );
}
int err;
/* save bars */
- DBG("bus=%d devfn=0x%x", dev->bus, dev->devfn);
+ DBG("bus=%d devfn=0x%x\n", dev->bus, dev->devfn);
for (i = 0; i < 6; ++i) {
mtnic_pci_dev.dev.bar[i] =
- pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
+ pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
DBG("bar[%d]= 0x%08lx \n", i, mtnic_pci_dev.dev.bar[i]);
}
/* save config space */
for (i = 0; i < 64; ++i) {
err = pci_read_config_dword(dev, i << 2,
- &mtnic_pci_dev.dev.
- dev_config_space[i]);
+ &mtnic_pci_dev.dev.
+ dev_config_space[i]);
if (err) {
DBG("Can not save configuration space");
return err;
mtnic_pci_dev.dev.dev = dev;
- return 0;
+ return 0;
}
/**
* Initial hardware
*/
static inline
-int mtnic_init_card(struct net_device *dev)
+int mtnic_init_card(struct mtnic *mtnic)
{
- struct mtnic_priv *priv = netdev_priv(dev);
int err = 0;
- /* Set state */
- priv->state = CARD_DOWN;
- /* Set port */
- priv->port = MTNIC_PORT_NUM;
-
- /* Alloc command interface */
- err = mtnic_alloc_cmdif(priv);
+ /* Alloc command interface */
+ err = mtnic_alloc_cmdif ( mtnic );
if (err) {
- DBG("Failed to init command interface, aborting.\n");
- return MTNIC_ERROR;
+ DBG("Failed to init command interface, aborting\n");
+ return -EADDRINUSE;
}
-
- /**
- * Bring up HW
- */
- err = mtnic_QUERY_FW(priv);
+ /**
+ * Bring up HW
+ */
+ err = mtnic_QUERY_FW ( mtnic );
if (err) {
- DBG("QUERY_FW command failed, aborting.\n");
+ DBG("QUERY_FW command failed, aborting\n");
goto cmd_error;
}
-
- DBG("Command interface revision:%d\n", priv->fw.ifc_rev);
+ DBG("Command interface revision:%d\n", mtnic->fw.ifc_rev);
/* Allocate memory for FW and start it */
- err = mtnic_map_cmd(priv, MTNIC_IF_CMD_MAP_FW, priv->fw.fw_pages);
+ err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_FW, mtnic->fw.fw_pages);
if (err) {
DBG("Eror In MAP_FW\n");
- if (priv->fw.fw_pages.buf)
- free(priv->fw.fw_pages.buf);
+ if (mtnic->fw.fw_pages.buf)
+ ufree((intptr_t)mtnic->fw.fw_pages.buf);
goto cmd_error;
}
/* Run firmware */
- err = mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW);
+ err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW);
if (err) {
DBG("Eror In RUN FW\n");
goto map_fw_error;
}
- DBG("FW version:%d.%d.%d\n",
- (u16) (priv->fw_ver >> 32),
- (u16) ((priv->fw_ver >> 16) & 0xffff),
- (u16) (priv->fw_ver & 0xffff));
+ DBG("FW version:%d.%d.%d\n",
+ (u16) (mtnic->fw_ver >> 32),
+ (u16) ((mtnic->fw_ver >> 16) & 0xffff),
+ (u16) (mtnic->fw_ver & 0xffff));
- /* Get device information */
- err = mtnic_query_cap(priv);
+ /* Query num ports */
+ err = mtnic_query_num_ports(mtnic);
if (err) {
- DBG("Insufficient resources, aborting.\n");
+ DBG("Insufficient resources, aborting\n");
goto map_fw_error;
}
/* Open NIC */
- err = mtnic_OPEN_NIC(priv);
+ err = mtnic_OPEN_NIC(mtnic);
if (err) {
- DBG("Failed opening NIC, aborting.\n");
+ DBG("Failed opening NIC, aborting\n");
goto map_fw_error;
}
/* Allocate and map pages worksace */
- err = mtnic_map_cmd(priv, MTNIC_IF_CMD_MAP_PAGES, priv->fw.extra_pages);
+ err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_PAGES, mtnic->fw.extra_pages);
+ if (err) {
+ DBG("Couldn't allocate %x FW extra pages, aborting\n",
+ mtnic->fw.extra_pages.num);
+ if (mtnic->fw.extra_pages.buf)
+ ufree((intptr_t)mtnic->fw.extra_pages.buf);
+ goto map_fw_error;
+ }
+
+
+ /* Get device information */
+ err = mtnic_query_mac(mtnic);
if (err) {
- DBG("Couldn't allocate %x FW extra pages, aborting.\n",
- priv->fw.extra_pages.num);
- if (priv->fw.extra_pages.buf)
- free(priv->fw.extra_pages.buf);
+ DBG("Insufficient resources in quesry mac, aborting\n");
goto map_fw_error;
}
/* Get device offsets */
- err = mtnic_query_offsets(priv);
+ err = mtnic_query_offsets(mtnic);
if (err) {
- DBG("Failed retrieving resource offests, aborting.\n");
- free(priv->fw.extra_pages.buf);
+ DBG("Failed retrieving resource offests, aborting\n");
+ ufree((intptr_t)mtnic->fw.extra_pages.buf);
goto map_extra_error;
}
- /* Alloc EQ */
- err = mtnic_alloc_eq(priv);
+ /* Alloc EQ */
+ err = mtnic_alloc_eq(mtnic);
if (err) {
DBG("Failed init shared resources. error: %d\n", err);
goto map_extra_error;
- }
+ }
/* Configure HW */
- err = mtnic_CONFIG_EQ(priv);
+ err = mtnic_CONFIG_EQ(mtnic);
if (err) {
DBG("Failed configuring EQ\n");
goto eq_error;
}
- err = mtnic_CONFIG_RX(priv);
+ err = mtnic_CONFIG_RX(mtnic);
if (err) {
DBG("Failed Rx configuration\n");
goto eq_error;
}
- err = mtnic_CONFIG_TX(priv);
+ err = mtnic_CONFIG_TX(mtnic);
if (err) {
DBG("Failed Tx configuration\n");
goto eq_error;
}
- DBG("Activating port:%d\n", MTNIC_PORT_NUM + 1);
-
- priv->state = CARD_INITIALIZED;
return 0;
eq_error:
- iounmap(priv->eq_db);
- free(priv->eq.buf);
+ iounmap(mtnic->eq_db);
+ free_memblock(mtnic->eq.buf, mtnic->eq.buf_size);
map_extra_error:
- free(priv->fw.extra_pages.buf);
+ ufree((intptr_t)mtnic->fw.extra_pages.buf);
map_fw_error:
- free(priv->fw.fw_pages.buf);
+ ufree((intptr_t)mtnic->fw.fw_pages.buf);
cmd_error:
- iounmap(priv->hcr);
- free(priv->cmd.buf);
- free(priv);
+ iounmap(mtnic->hcr);
+ free_memblock(mtnic->cmd.buf, PAGE_SIZE);
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
*
*
********************************************************************/
-void mtnic_process_tx_cq(struct mtnic_priv *priv, struct net_device *dev,
+void mtnic_process_tx_cq(struct mtnic_port *priv, struct net_device *dev,
struct mtnic_cq *cq)
{
struct mtnic_cqe *cqe = cq->buf;
/* Owner bit changes every round */
while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
netdev_tx_complete (dev, ring->iobuf[index]);
- ++cq->last;
- index = cq->last & (cq->size-1);
+ ++cq->last;
+ index = cq->last & (cq->size-1);
cqe = &cq->buf[index];
- }
+ }
/* Update consumer index */
cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
}
-int mtnic_process_rx_cq(struct mtnic_priv *priv, struct net_device *dev, struct mtnic_cq *cq)
+int mtnic_process_rx_cq(struct mtnic_port *priv,
+ struct net_device *dev,
+ struct mtnic_cq *cq)
{
struct mtnic_cqe *cqe;
struct mtnic_ring *ring = &priv->rx_ring;
int index;
int err;
struct io_buffer *rx_iob;
+ unsigned int length;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
/*
* Packet is OK - process it.
*/
- rx_iob = ring->iobuf[index];
- iob_put(rx_iob, DEF_IOBUF_SIZE);
+ length = be32_to_cpu(cqe->byte_cnt);
+ rx_iob = ring->iobuf[index];
+ iob_put(rx_iob, length);
+
/* Add this packet to the receive queue. */
netdev_rx(dev, rx_iob);
- ring->iobuf[index] = NULL;
+ ring->iobuf[index] = NULL;
next:
++cq->last;
index = cq->last & (cq->size-1);
cqe = &cq->buf[index];
+
+
+
}
/* Update consumer index */
err = mtnic_alloc_iobuf(priv, &priv->rx_ring, DEF_IOBUF_SIZE);
if (err) {
DBG("ERROR Allocating io buffer");
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
}
static int
mtnic_open(struct net_device *dev)
{
- struct mtnic_priv *priv = netdev_priv(dev);
+ struct mtnic_port *priv = netdev_priv(dev);
+
int err = 0;
struct mtnic_ring *ring;
struct mtnic_cq *cq;
int cq_ind = 0;
u32 dev_link_state;
+ int link_check;
- DBG("starting port:%d", priv->port);
+ DBG("starting port:%d, MAC Address: 0x%12llx\n",
+ priv->port, priv->mtnic->fw.mac[priv->port]);
/* Alloc and configure CQs, TX, RX */
- err = mtnic_alloc_resources(dev);
+ err = mtnic_alloc_resources ( dev );
if (err) {
DBG("Error allocating resources\n");
- return MTNIC_ERROR;
+ return -EADDRINUSE;
}
/* Pass CQs configuration to HW */
- for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) {
+ for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) {
cq = &priv->cq[cq_ind];
err = mtnic_CONFIG_CQ(priv, priv->port, cq_ind, cq);
if (err) {
if (cq_ind)
goto cq_error;
else
- return MTNIC_ERROR;
- }
+ goto allocation_error;
+ }
/* Update consumer index */
cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
}
+
/* Pass Tx configuration to HW */
ring = &priv->tx_ring;
- err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring);
+ err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring);
if (err) {
DBG("Failed configuring Tx ring:0\n");
- goto cq_error;
+ goto cq_error;
}
/* Pass RX configuration to HW */
- ring = &priv->rx_ring;
- err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring);
+ ring = &priv->rx_ring;
+ err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring);
if (err) {
DBG("Failed configuring Rx ring:0\n");
goto tx_error;
goto rx_error;
}
+
/* Set the port default ring to ring 0 */
err = mtnic_SET_PORT_DEFAULT_RING(priv, priv->port, 0);
if (err) {
}
/* Set Mac address */
- err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->fw.mac[priv->port]);
+ err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->mtnic->fw.mac[priv->port]);
if (err) {
DBG("Failed setting default MAC address\n");
goto rx_error;
}
/* Configure VLAN filter */
+ /* By adding this function, The second port won't accept packets
err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port);
- if (err) {
+ if (err) {
DBG("Failed configuring VLAN filter\n");
goto rx_error;
}
+ */
+
/* Bring up physical link */
err = mtnic_SET_PORT_STATE(priv, priv->port, 1);
DBG("Failed bringing up port\n");
goto rx_error;
}
- mdelay(300); /* Let link state stabilize if cable was connected */
+ /* PORT IS UP */
priv->state = CARD_UP;
- err = mtnic_HEART_BEAT(priv, &dev_link_state);
- if (err) {
- DBG("Failed getting device link state\n");
- return MTNIC_ERROR;
+
+ /* Checking Link is up */
+ DBG ( "Checking if link is up\n" );
+
+
+ for ( link_check = 0; link_check < CHECK_LINK_TIMES; link_check ++ ) {
+ /* Let link state stabilize if cable was connected */
+ mdelay ( DELAY_LINK_CHECK );
+
+ err = mtnic_HEART_BEAT(priv, &dev_link_state);
+ if (err) {
+ DBG("Failed getting device link state\n");
+ return -ENETDOWN;
+ }
+
+ if ( dev_link_state & priv->port ) {
+ /* Link is up */
+ break;
+ }
}
- if (!(dev_link_state & 0x3)) {
+
+
+ if ( ! ( dev_link_state & 0x3 ) ) {
DBG("Link down, check cables and restart\n");
- return MTNIC_ERROR;
+ netdev_link_down ( dev );
+ return -ENETDOWN;
}
- return 0;
+ DBG ( "Link is up!\n" );
+ /* Mark as link up */
+ netdev_link_up ( dev );
+
+ return 0;
rx_error:
err = mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
+ MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
tx_error:
err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
+
cq_error:
while (cq_ind) {
err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
if (err)
DBG("Eror Releasing resources\n");
- return MTNIC_ERROR;
+allocation_error:
+
+ free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size);
+ iounmap(priv->tx_ring.txcq_db);
+ free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
+ free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
+ free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
+ free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
+ free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
+ free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
+
+ mtnic_free_io_buffers(&priv->rx_ring);
+
+ return -ENETDOWN;
}
+
/** Check if we got completion for receive and transmit and
* check the line with heart_bit command */
static void
-mtnic_poll(struct net_device *dev)
+mtnic_poll ( struct net_device *dev )
{
- struct mtnic_priv *priv = netdev_priv(dev);
+ struct mtnic_port *priv = netdev_priv(dev);
struct mtnic_cq *cq;
u32 dev_link_state;
int err;
unsigned int i;
- /* In case of an old error then return */
+ /* In case of an old error then return */
if (priv->state != CARD_UP)
return;
/* We do not check the device every call _poll call,
- since it will slow it down */
+ since it will slow it down */
if ((priv->poll_counter % ROUND_TO_CHECK) == 0) {
/* Check device */
err = mtnic_HEART_BEAT(priv, &dev_link_state);
if (err) {
DBG("Device has internal error\n");
- priv->state = CARD_DOWN;
+ priv->state = CARD_LINK_DOWN;
return;
}
if (!(dev_link_state & 0x3)) {
DBG("Link down, check cables and restart\n");
- priv->state = CARD_DOWN;
+ priv->state = CARD_LINK_DOWN;
return;
}
}
-
/* Polling CQ */
for (i = 0; i < NUM_CQS; i++) {
cq = &priv->cq[i]; //Passing on the 2 cqs.
if (cq->is_rx) {
- err = mtnic_process_rx_cq(priv, cq->dev, cq);
+ err = mtnic_process_rx_cq(priv, cq->dev, cq);
if (err) {
- priv->state = CARD_DOWN;
+ priv->state = CARD_LINK_DOWN;
DBG(" Error allocating RX buffers\n");
return;
}
- } else {
- mtnic_process_tx_cq(priv, cq->dev, cq);
+ } else {
+ mtnic_process_tx_cq(priv, cq->dev, cq);
}
}
++ priv->poll_counter;
}
+
+
static int
mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf )
{
- struct mtnic_priv *priv = netdev_priv(dev);
+ struct mtnic_port *priv = netdev_priv(dev);
struct mtnic_ring *ring;
struct mtnic_tx_desc *tx_desc;
struct mtnic_data_seg *data;
/* In case of an error then return */
if (priv->state != CARD_UP)
- return MTNIC_ERROR;
+ return -ENETDOWN;
ring = &priv->tx_ring;
- index = ring->prod & ring->size_mask;
+ index = ring->prod & ring->size_mask;
if ((ring->prod - ring->cons) >= ring->size) {
DBG("No space left for descriptors!!! cons: %x prod: %x\n",
ring->cons, ring->prod);
mdelay(5);
- return MTNIC_ERROR;/* no space left */
+ return -EAGAIN;/* no space left */
}
- /* get current descriptor */
+ /* get current descriptor */
tx_desc = ring->buf + (index * sizeof(struct mtnic_tx_desc));
- /* Prepare ctrl segement */
- tx_desc->ctrl.size_vlan = cpu_to_be32(2);
- tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP |
- MTNIC_BIT_NO_ICRC);
- tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) |
- ((ring->prod & ring->size) ?
- cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0);
-
/* Prepare Data Seg */
data = &tx_desc->data;
data->addr_l = cpu_to_be32((u32)virt_to_bus(iobuf->data));
data->count = cpu_to_be32(iob_len(iobuf));
- data->mem_type = priv->fw.mem_type_snoop_be;
+ data->mem_type = priv->mtnic->fw.mem_type_snoop_be;
+
+ /* Prepare ctrl segement */
+ tx_desc->ctrl.size_vlan = cpu_to_be32(2);
+ tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP |
+ MTNIC_BIT_NO_ICRC);
+ tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0);
/* Attach io_buffer */
ring->iobuf[index] = iobuf;
static void
mtnic_close(struct net_device *dev)
{
- struct mtnic_priv *priv = netdev_priv(dev);
+ struct mtnic_port *priv = netdev_priv(dev);
int err = 0;
DBG("Close called for port:%d\n", priv->port);
- if (priv->state == CARD_UP) {
+ if ( ( priv->state == CARD_UP ) ||
+ ( priv->state == CARD_LINK_DOWN ) ) {
+
/* Disable port */
err |= mtnic_SET_PORT_STATE(priv, priv->port, 0);
/*
/* Stop CQs */
err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_CQ, 0);
+ MTNIC_IF_RESOURCE_TYPE_CQ, 0);
err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_CQ, 1);
+ MTNIC_IF_RESOURCE_TYPE_CQ, 1);
if (err) {
- DBG("Close reported error %d", err);
+ DBG("Close reported error %d\n", err);
}
- /* Free memory */
- free(priv->tx_ring.buf);
+ mdelay ( 10 );
+
+ /* free memory */
+ free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size);
iounmap(priv->tx_ring.txcq_db);
- free(priv->cq[1].buf);
- free(priv->cq[1].db);
+ free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
+ free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
+ free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
+ free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
+ free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
+ free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
/* Free RX buffers */
mtnic_free_io_buffers(&priv->rx_ring);
- free(priv->rx_ring.buf);
- free(priv->rx_ring.db);
- free(priv->cq[0].buf);
- free(priv->cq[0].db);
- priv->state = CARD_INITIALIZED;
}
+ priv->state = CARD_INITIALIZED;
}
{
int err;
- struct net_device *dev = pci_get_drvdata(pci);
- struct mtnic_priv *priv = netdev_priv(dev);
-
- /* Should NOT happen! but just in case */
- if (priv->state == CARD_UP)
- mtnic_close(dev);
-
- if (priv->state == CARD_INITIALIZED) {
- err = mtnic_RELEASE_RESOURCE(priv, 0,
- MTNIC_IF_RESOURCE_TYPE_EQ, 0);
- DBG("Calling MTNIC_CLOSE command\n");
- err |= mtnic_cmd(priv, NULL, NULL, 0,
- MTNIC_IF_CMD_CLOSE_NIC);
- if (err) {
- DBG("Error Releasing resources %d\n", err);
- }
+ int i;
+ struct mtnic *mtnic = pci_get_drvdata(pci);
+
+
+ struct net_device *dev;
+ struct mtnic_port *priv;
- free(priv->cmd.buf);
- iounmap(priv->hcr);
- ufree((intptr_t)priv->fw.fw_pages.buf);
- ufree((intptr_t)priv->fw.extra_pages.buf);
- free(priv->eq.buf);
- iounmap(priv->eq_db);
- priv->state = CARD_DOWN;
+ for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
+
+ dev = mtnic->netdev[i];
+
+ priv = netdev_priv(dev);
+
+ /* Just in case */
+ if ( ( priv->state == CARD_UP ) ||
+ ( priv->state == CARD_LINK_DOWN ) )
+ mtnic_close ( dev );
+ }
+
+ /* Releasing EQ */
+ priv = netdev_priv ( mtnic->netdev[0] );
+ err = mtnic_RELEASE_RESOURCE(priv, 1,
+ MTNIC_IF_RESOURCE_TYPE_EQ, 0);
+
+ DBG("Calling MTNIC_CLOSE command\n");
+ err |= mtnic_cmd(mtnic, NULL, NULL, 0,
+ MTNIC_IF_CMD_CLOSE_NIC);
+ if (err) {
+ DBG("Error Releasing resources %d\n", err);
+ }
+
+ free_memblock(mtnic->cmd.buf, PAGE_SIZE);
+ iounmap(mtnic->hcr);
+ ufree((intptr_t)mtnic->fw.fw_pages.buf);
+ ufree((intptr_t)mtnic->fw.extra_pages.buf);
+ free_memblock(mtnic->eq.buf, mtnic->eq.buf_size);
+ iounmap(mtnic->eq_db);
+
+
+ for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
+ dev = mtnic->netdev[i];
+ unregister_netdev ( dev );
+ netdev_nullify ( dev );
+ netdev_put ( dev );
}
- unregister_netdev(dev);
- netdev_nullify(dev);
- netdev_put(dev);
+ free ( mtnic );
+
+
+ mtnic_reset ();
+ mdelay ( 1000 );
+ /* Restore config, if we would like to retry booting */
+ restore_config ();
+
+
}
/** mtnic net device operations */
static struct net_device_operations mtnic_operations = {
- .open = mtnic_open,
- .close = mtnic_close,
- .transmit = mtnic_transmit,
- .poll = mtnic_poll,
- .irq = mtnic_irq,
+ .open = mtnic_open,
+ .close = mtnic_close,
+ .transmit = mtnic_transmit,
+ .poll = mtnic_poll,
+ .irq = mtnic_irq,
};
static int
mtnic_probe(struct pci_device *pci,
- const struct pci_device_id *id __unused)
+ const struct pci_device_id *id __unused)
{
- struct net_device *dev;
- struct mtnic_priv *priv;
+ struct mtnic_port *priv;
+ struct mtnic *mtnic;
int err;
u64 mac;
- u32 result = 0;
- void *dev_id;
- int i;
+ int port_index;
+
- adjust_pci_device(pci);
+ adjust_pci_device(pci);
- err = mtnic_init_pci(pci);
+ err = mtnic_init_pci(pci);
if (err) {
DBG("Error in pci_init\n");
- return MTNIC_ERROR;
+ return -EIO;
}
mtnic_reset();
- mdelay(1000);
+ mdelay(1000);
- err = restore_config();
+ err = restore_config();
if (err) {
- DBG("Error restoring config\n");
+ DBG("Error in restoring config\n");
return err;
}
- /* Checking MTNIC device ID */
- dev_id = ioremap(mtnic_pci_dev.dev.bar[0] +
- MTNIC_DEVICE_ID_OFFSET, 4);
- result = ntohl(readl(dev_id));
- iounmap(dev_id);
- if (result != MTNIC_DEVICE_ID) {
- DBG("Wrong Devie ID (0x%x) !!!", result);
- return MTNIC_ERROR;
+ mtnic = zalloc ( sizeof ( *mtnic ) );
+ if ( ! mtnic ) {
+ DBG ( "Error Allocating mtnic buffer\n" );
+ return -EADDRINUSE;
}
- /* Initializing net device */
- dev = alloc_etherdev(sizeof(struct mtnic_priv));
- if (dev == NULL) {
- DBG("Net device allocation failed\n");
- return MTNIC_ERROR;
- }
- /*
- * Initialize driver private data
- */
- priv = netdev_priv(dev);
- memset(priv, 0, sizeof(struct mtnic_priv));
- priv->dev = dev;
- priv->pdev = pci;
- priv->dev->dev = &pci->dev;
- /* Attach pci device */
- pci_set_drvdata(pci, priv->dev);
- netdev_init(dev, &mtnic_operations);
+ pci_set_drvdata(pci, mtnic);
+
+ mtnic->pdev = pci;
/* Initialize hardware */
- err = mtnic_init_card(dev);
+ err = mtnic_init_card ( mtnic );
if (err) {
DBG("Error in init_card\n");
- return MTNIC_ERROR;
+ goto err_init_card;
}
- /* Program the MAC address */
- mac = priv->fw.mac[priv->port];
- printf("Port %d Mac address: 0x%12llx\n", MTNIC_PORT_NUM + 1, mac);
- for (i = 0;i < MAC_ADDRESS_SIZE; ++i) {
- dev->ll_addr[MAC_ADDRESS_SIZE - i - 1] = mac & 0xFF;
- mac = mac >> 8;
+ for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
+ /* Initializing net device */
+ mtnic->netdev[port_index] = alloc_etherdev( sizeof ( struct mtnic_port ) );
+ if ( mtnic->netdev[port_index] == NULL ) {
+ DBG("Net device allocation failed\n");
+ goto err_alloc_mtnic;
+ }
+
+ /*
+ * Initialize driver private data
+ */
+
+ mtnic->netdev[port_index]->dev = &pci->dev;
+ priv = netdev_priv ( mtnic->netdev[port_index] );
+ memset ( priv, 0, sizeof ( struct mtnic_port ) );
+ priv->mtnic = mtnic;
+ priv->netdev = mtnic->netdev[port_index];
+
+ /* Attach pci device */
+ netdev_init(mtnic->netdev[port_index], &mtnic_operations);
+
+ /* Set port number */
+ priv->port = port_index;
+
+ /* Set state */
+ priv->state = CARD_DOWN;
}
- /* Mark as link up; we don't yet handle link state */
- netdev_link_up ( dev );
- if (register_netdev(dev)) {
- DBG("Netdev registration failed\n");
- return MTNIC_ERROR;
+ int mac_idx;
+ for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
+ priv = netdev_priv ( mtnic->netdev[port_index] );
+ /* Program the MAC address */
+ mac = priv->mtnic->fw.mac[port_index];
+ for (mac_idx = 0; mac_idx < MAC_ADDRESS_SIZE; ++mac_idx) {
+ mtnic->netdev[port_index]->ll_addr[MAC_ADDRESS_SIZE - mac_idx - 1] = mac & 0xFF;
+ mac = mac >> 8;
+ }
+
+ if ( register_netdev ( mtnic->netdev[port_index] ) ) {
+ DBG("Netdev registration failed\n");
+ priv->state = CARD_INITIALIZED;
+ goto err_alloc_mtnic;
+ }
}
return 0;
-}
-
+err_alloc_mtnic:
+ free ( mtnic );
+err_init_card:
+ return -EIO;
+}
static struct pci_device_id mtnic_nics[] = {
- PCI_ROM(0x15b3, 0x6368, "mtnic", "Mellanox MTNIC driver"),
+ PCI_ROM ( 0x15b3, 0x6368, "mt25448", "Mellanox ConnectX EN driver" ),
+ PCI_ROM ( 0x15b3, 0x6372, "mt25458", "Mellanox ConnectX ENt driver" ),
+ PCI_ROM ( 0x15b3, 0x6750, "mt26448", "Mellanox ConnectX EN GEN2 driver" ),
+ PCI_ROM ( 0x15b3, 0x675a, "mt26458", "Mellanox ConnectX ENt GEN2 driver" ),
};
struct pci_driver mtnic_driver __pci_driver = {
.ids = mtnic_nics,
.id_count = sizeof(mtnic_nics) / sizeof(mtnic_nics[0]),
- .probe = mtnic_probe,
+ .probe = mtnic_probe,
.remove = mtnic_disable,
};
/*
* Device setup
*/
-
-/*
- Note port number can be changed under mtnic.c !
-*/
#define MTNIC_MAX_PORTS 2
+#define MTNIC_PORT1 0
+#define MTNIC_PORT2 1
#define NUM_TX_RINGS 1
#define NUM_RX_RINGS 1
#define NUM_CQS (NUM_RX_RINGS + NUM_TX_RINGS)
#define GO_BIT_TIMEOUT 6000
#define TBIT_RETRIES 100
#define UNITS_BUFFER_SIZE 8 /* can be configured to 4/8/16 */
-#define MAX_GAP_PROD_CONS (UNITS_BUFFER_SIZE/4)
-#define DEF_MTU 1600
-#define DEF_IOBUF_SIZE 1600
+#define MAX_GAP_PROD_CONS ( UNITS_BUFFER_SIZE / 4 )
+#define ETH_DEF_LEN 1540 /* 40 bytes used by the card */
+#define ETH_FCS_LEN 14
+#define DEF_MTU ETH_DEF_LEN + ETH_FCS_LEN
+#define DEF_IOBUF_SIZE ETH_DEF_LEN
+
#define MAC_ADDRESS_SIZE 6
#define NUM_EQES 16
#define ROUND_TO_CHECK 0x400
+#define DELAY_LINK_CHECK 300
+#define CHECK_LINK_TIMES 7
+
#define XNOR(x,y) (!(x) == !(y))
#define dma_addr_t unsigned long
MTNIC_IF_CMD_CONFIG_RX = 0x005, /* general receive configuration */
MTNIC_IF_CMD_CONFIG_TX = 0x006, /* general transmit configuration */
MTNIC_IF_CMD_CONFIG_INT_FREQ = 0x007, /* interrupt timers freq limits */
- MTNIC_IF_CMD_HEART_BEAT = 0x008, /* NOP command testing liveliness */
+ MTNIC_IF_CMD_HEART_BEAT = 0x008, /* NOP command testing liveliness */
MTNIC_IF_CMD_CLOSE_NIC = 0x009, /* release memory and stop the NIC */
/* Port commands: */
MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER = 0x14, /* configure VLAN filter */
MTNIC_IF_CMD_CONFIG_PORT_MCAST_FILTER = 0x15, /* configure mcast filter */
MTNIC_IF_CMD_ENABLE_PORT_MCAST_FILTER = 0x16, /* enable/disable */
- MTNIC_IF_CMD_SET_PORT_MTU = 0x17, /* set port MTU */
+ MTNIC_IF_CMD_SET_PORT_MTU = 0x17, /* set port MTU */
MTNIC_IF_CMD_SET_PORT_PROMISCUOUS_MODE = 0x18, /* enable/disable promisc */
MTNIC_IF_CMD_SET_PORT_DEFAULT_RING = 0x19, /* set the default ring */
- MTNIC_IF_CMD_SET_PORT_STATE = 0x1a, /* set link up/down */
- MTNIC_IF_CMD_DUMP_STAT = 0x1b, /* dump statistics */
+ MTNIC_IF_CMD_SET_PORT_STATE = 0x1a, /* set link up/down */
+ MTNIC_IF_CMD_DUMP_STAT = 0x1b, /* dump statistics */
MTNIC_IF_CMD_ARM_PORT_STATE_EVENT = 0x1c, /* arm the port state event */
/* Ring / Completion queue commands: */
- MTNIC_IF_CMD_CONFIG_CQ = 0x20, /* set up completion queue */
- MTNIC_IF_CMD_CONFIG_RX_RING = 0x21, /* setup Rx ring */
- MTNIC_IF_CMD_SET_RX_RING_ADDR = 0x22, /* set Rx ring filter by address */
+ MTNIC_IF_CMD_CONFIG_CQ = 0x20, /* set up completion queue */
+ MTNIC_IF_CMD_CONFIG_RX_RING = 0x21, /* setup Rx ring */
+ MTNIC_IF_CMD_SET_RX_RING_ADDR = 0x22, /* set Rx ring filter by address */
MTNIC_IF_CMD_SET_RX_RING_MCAST = 0x23, /* set Rx ring mcast filter */
- MTNIC_IF_CMD_ARM_RX_RING_WM = 0x24, /* one-time low-watermark INT */
- MTNIC_IF_CMD_CONFIG_TX_RING = 0x25, /* set up Tx ring */
+ MTNIC_IF_CMD_ARM_RX_RING_WM = 0x24, /* one-time low-watermark INT */
+ MTNIC_IF_CMD_CONFIG_TX_RING = 0x25, /* set up Tx ring */
MTNIC_IF_CMD_ENFORCE_TX_RING_ADDR = 0x26, /* setup anti spoofing */
- MTNIC_IF_CMD_CONFIG_EQ = 0x27, /* config EQ ring */
+ MTNIC_IF_CMD_CONFIG_EQ = 0x27, /* config EQ ring */
MTNIC_IF_CMD_RELEASE_RESOURCE = 0x28, /* release internal ref to resource */
}
mtnic_if_cmd_t;
typedef enum mtnic_if_caps {
MTNIC_IF_CAP_MAX_TX_RING_PER_PORT = 0x0,
MTNIC_IF_CAP_MAX_RX_RING_PER_PORT = 0x1,
- MTNIC_IF_CAP_MAX_CQ_PER_PORT = 0x2,
- MTNIC_IF_CAP_NUM_PORTS = 0x3,
- MTNIC_IF_CAP_MAX_TX_DESC = 0x4,
- MTNIC_IF_CAP_MAX_RX_DESC = 0x5,
- MTNIC_IF_CAP_MAX_CQES = 0x6,
- MTNIC_IF_CAP_MAX_TX_SG_ENTRIES = 0x7,
- MTNIC_IF_CAP_MAX_RX_SG_ENTRIES = 0x8,
- MTNIC_IF_CAP_MEM_KEY = 0x9, /* key to mem (after map_pages) */
- MTNIC_IF_CAP_RSS_HASH_TYPE = 0xa, /* one of mtnic_if_rss_types_t */
+ MTNIC_IF_CAP_MAX_CQ_PER_PORT = 0x2,
+ MTNIC_IF_CAP_NUM_PORTS = 0x3,
+ MTNIC_IF_CAP_MAX_TX_DESC = 0x4,
+ MTNIC_IF_CAP_MAX_RX_DESC = 0x5,
+ MTNIC_IF_CAP_MAX_CQES = 0x6,
+ MTNIC_IF_CAP_MAX_TX_SG_ENTRIES = 0x7,
+ MTNIC_IF_CAP_MAX_RX_SG_ENTRIES = 0x8,
+ MTNIC_IF_CAP_MEM_KEY = 0x9, /* key to mem (after map_pages) */
+ MTNIC_IF_CAP_RSS_HASH_TYPE = 0xa, /* one of mtnic_if_rss_types_t */
MTNIC_IF_CAP_MAX_PORT_UCAST_ADDR = 0xc,
MTNIC_IF_CAP_MAX_RING_UCAST_ADDR = 0xd, /* only for ADDR steer */
MTNIC_IF_CAP_MAX_PORT_MCAST_ADDR = 0xe,
MTNIC_IF_CAP_EQ_DB_OFFSET = 0x14, /* offset in bytes for EQ doorbell record */
/* These are per port - using port number from cap modifier field */
- MTNIC_IF_CAP_SPEED = 0x20,
- MTNIC_IF_CAP_DEFAULT_MAC = 0x21,
- MTNIC_IF_CAP_EQ_OFFSET = 0x22,
- MTNIC_IF_CAP_CQ_OFFSET = 0x23,
+ MTNIC_IF_CAP_SPEED = 0x20,
+ MTNIC_IF_CAP_DEFAULT_MAC = 0x21,
+ MTNIC_IF_CAP_EQ_OFFSET = 0x22,
+ MTNIC_IF_CAP_CQ_OFFSET = 0x23,
MTNIC_IF_CAP_TX_OFFSET = 0x24,
MTNIC_IF_CAP_RX_OFFSET = 0x25,
} mtnic_if_caps_t;
typedef enum mtnic_if_steer_types {
- MTNIC_IF_STEER_NONE = 0,
- MTNIC_IF_STEER_PRIORITY = 1,
- MTNIC_IF_STEER_RSS = 2,
- MTNIC_IF_STEER_ADDRESS = 3,
+ MTNIC_IF_STEER_NONE = 0,
+ MTNIC_IF_STEER_PRIORITY = 1,
+ MTNIC_IF_STEER_RSS = 2,
+ MTNIC_IF_STEER_ADDRESS = 3,
} mtnic_if_steer_types_t;
/** types of memory access modes */
enum {
- MTNIC_HCR_BASE = 0x1f000,
- MTNIC_HCR_SIZE = 0x0001c,
- MTNIC_CLR_INT_SIZE = 0x00008,
+ MTNIC_HCR_BASE = 0x1f000,
+ MTNIC_HCR_SIZE = 0x0001c,
+ MTNIC_CLR_INT_SIZE = 0x00008,
};
-#define MELLANOX_VENDOR_ID 0x15b3
-#define MTNIC_DEVICE_ID 0x00a00190
#define MTNIC_RESET_OFFSET 0xF0010
-#define MTNIC_DEVICE_ID_OFFSET 0xF0014
-
-
-
-
/* Buffers */
u32 buf_size; /* ring buffer size in bytes */
- dma_addr_t dma;
+ dma_addr_t dma;
void *buf;
struct io_buffer *iobuf[UNITS_BUFFER_SIZE];
u32 db_offset;
/* Rx ring only */
- dma_addr_t iobuf_dma;
+ dma_addr_t iobuf_dma;
struct mtnic_rx_db_record *db;
dma_addr_t db_dma;
};
struct mtnic_eq {
u32 size; /* number of EQEs in ring */
- u32 buf_size; /* EQ size in bytes */
+ u32 buf_size; /* EQ size in bytes */
void *buf;
dma_addr_t dma;
};
enum mtnic_state {
CARD_DOWN,
- CARD_INITIALIZED,
- CARD_UP
+ CARD_INITIALIZED,
+ CARD_UP,
+ CARD_LINK_DOWN,
};
/* FW */
struct mtnic_cmd {
- void *buf;
- unsigned long mapping;
- u32 tbit;
+ void *buf;
+ unsigned long mapping;
+ u32 tbit;
};
* Device private data
*
*/
-struct mtnic_priv {
- struct net_device *dev;
- struct pci_device *pdev;
- u8 port;
+struct mtnic {
+ struct net_device *netdev[MTNIC_MAX_PORTS];
+ struct mtnic_if_cmd_reg *hcr;
+ struct mtnic_cmd cmd;
+ struct pci_device *pdev;
- enum mtnic_state state;
- /* Firmware and board info */
- u64 fw_ver;
+ struct mtnic_eq eq;
+ u32 *eq_db;
+
+ /* Firmware and board info */
+ u64 fw_ver;
struct {
- struct mtnic_pages fw_pages;
- struct mtnic_pages extra_pages;
- struct mtnic_err_buf err_buf;
- u16 ifc_rev;
- u8 num_ports;
- u64 mac[MTNIC_MAX_PORTS];
- u16 cq_offset;
- u16 tx_offset[MTNIC_MAX_PORTS];
- u16 rx_offset[MTNIC_MAX_PORTS];
- u32 mem_type_snoop_be;
- u32 txcq_db_offset;
- u32 eq_db_offset;
- } fw;
-
-
- struct mtnic_if_cmd_reg *hcr;
- struct mtnic_cmd cmd;
+ struct mtnic_pages fw_pages;
+ struct mtnic_pages extra_pages;
+ struct mtnic_err_buf err_buf;
+ u16 ifc_rev;
+ u8 num_ports;
+ u64 mac[MTNIC_MAX_PORTS];
+ u16 cq_offset;
+ u16 tx_offset[MTNIC_MAX_PORTS];
+ u16 rx_offset[MTNIC_MAX_PORTS];
+ u32 mem_type_snoop_be;
+ u32 txcq_db_offset;
+ u32 eq_db_offset;
+ } fw;
+};
+
+
+
+
+
+struct mtnic_port {
+
+ struct mtnic *mtnic;
+ u8 port;
+
+ enum mtnic_state state;
/* TX, RX, CQs, EQ */
- struct mtnic_ring tx_ring;
- struct mtnic_ring rx_ring;
- struct mtnic_cq cq[NUM_CQS];
- struct mtnic_eq eq;
- u32 *eq_db;
- u32 poll_counter;
+ struct mtnic_ring tx_ring;
+ struct mtnic_ring rx_ring;
+ struct mtnic_cq cq[NUM_CQS];
+ u32 poll_counter;
+ struct net_device *netdev;
+
+
};
/* CMD MTNIC_IF_CMD_QUERY_CAP */
struct mtnic_if_query_cap_in_imm {
u16 reserved1;
- u8 cap_modifier; /* a modifier for the particular capability */
- u8 cap_index; /* the index of the capability queried */
+ u8 cap_modifier; /* a modifier for the particular capability */
+ u8 cap_index; /* the index of the capability queried */
u32 reserved2;
};
/* CMD OPEN_NIC */
struct mtnic_if_open_nic_in_mbox {
- u16 reserved1;
- u16 mkey; /* number of mem keys for all chip*/
- u32 mkey_entry; /* mem key entries for each key*/
- u8 log_rx_p1; /* log2 rx rings for port1 */
- u8 log_cq_p1; /* log2 cq for port1 */
- u8 log_tx_p1; /* log2 tx rings for port1 */
- u8 steer_p1; /* port 1 steering mode */
- u16 reserved2;
- u8 log_vlan_p1; /* log2 vlan per rx port1 */
- u8 log_mac_p1; /* log2 mac per rx port1 */
-
- u8 log_rx_p2; /* log2 rx rings for port1 */
- u8 log_cq_p2; /* log2 cq for port1 */
- u8 log_tx_p2; /* log2 tx rings for port1 */
- u8 steer_p2; /* port 1 steering mode */
- u16 reserved3;
- u8 log_vlan_p2; /* log2 vlan per rx port1 */
- u8 log_mac_p2; /* log2 mac per rx port1 */
+ u16 reserved1;
+ u16 mkey; /* number of mem keys for all chip*/
+ u32 mkey_entry; /* mem key entries for each key*/
+ u8 log_rx_p1; /* log2 rx rings for port1 */
+ u8 log_cq_p1; /* log2 cq for port1 */
+ u8 log_tx_p1; /* log2 tx rings for port1 */
+ u8 steer_p1; /* port 1 steering mode */
+ u16 reserved2;
+ u8 log_vlan_p1; /* log2 vlan per rx port1 */
+ u8 log_mac_p1; /* log2 mac per rx port1 */
+
+ u8 log_rx_p2; /* log2 rx rings for port1 */
+ u8 log_cq_p2; /* log2 cq for port1 */
+ u8 log_tx_p2; /* log2 tx rings for port1 */
+ u8 steer_p2; /* port 1 steering mode */
+ u16 reserved3;
+ u8 log_vlan_p2; /* log2 vlan per rx port1 */
+ u8 log_mac_p2; /* log2 mac per rx port1 */
};
+
/* CMD CONFIG_RX */
struct mtnic_if_config_rx_in_imm {
u16 spkt_size; /* size of small packets interrupts enabled on CQ */
/* CMD HEART_BEAT */
struct mtnic_if_heart_beat_out_imm {
- u32 flags; /* several flags */
+ u32 flags; /* several flags */
#define MTNIC_MASK_HEAR_BEAT_INT_ERROR MTNIC_BC(31,1)
- u32 reserved;
+ u32 reserved;
};
/* CMD CONFIG_PORT_VLAN_FILTER */
/* in mbox is a 4K bits mask - bit per VLAN */
struct mtnic_if_config_port_vlan_filter_in_mbox {
- u64 filter[64]; /* vlans[63:0] sit in filter[0], vlans[127:64] sit in filter[1] .. */
+ u64 filter[64]; /* vlans[63:0] sit in filter[0], vlans[127:64] sit in filter[1] .. */
};
/* CMD SET_PORT_MTU */
struct mtnic_if_set_port_mtu_in_imm {
u16 reserved1;
- u16 mtu; /* The MTU of the port in bytes */
+ u16 mtu; /* The MTU of the port in bytes */
u32 reserved2;
};
/* CMD CONFIG_CQ */
struct mtnic_if_config_cq_in_mbox {
- u8 reserved1;
- u8 cq;
- u8 size; /* Num CQs is 2^size (size <= 22) */
- u8 offset; /* start address of CQE in first page (11:6) */
- u16 tlast; /* interrupt moderation timer from last completion usec */
+ u8 reserved1;
+ u8 cq;
+ u8 size; /* Num CQs is 2^size (size <= 22) */
+ u8 offset; /* start address of CQE in first page (11:6) */
+ u16 tlast; /* interrupt moderation timer from last completion usec */
u8 flags; /* flags */
- u8 int_vector; /* MSI index if MSI is enabled, otherwise reserved */
+ u8 int_vector; /* MSI index if MSI is enabled, otherwise reserved */
u16 reserved2;
u16 max_cnt; /* interrupt moderation counter */
- u8 page_size; /* each mapped page is 2^(12+page_size) bytes */
- u8 reserved4[3];
+ u8 page_size; /* each mapped page is 2^(12+page_size) bytes */
+ u8 reserved4[3];
u32 db_record_addr_h; /*physical address of CQ doorbell record */
u32 db_record_addr_l; /*physical address of CQ doorbell record */
u32 page_address[0]; /* 64 bit page addresses of CQ buffer */
/* CMD CONFIG_RX_RING */
struct mtnic_if_config_rx_ring_in_mbox {
- u8 reserved1;
- u8 ring; /* The ring index (with offset) */
- u8 stride_size; /* stride and size */
+ u8 reserved1;
+ u8 ring; /* The ring index (with offset) */
+ u8 stride_size; /* stride and size */
/* Entry size = 16* (2^stride) bytes */
#define MTNIC_MASK_CONFIG_RX_RING_STRIDE MTNIC_BC(4,3)
/* Rx ring size is 2^size entries */
#define MTNIC_MASK_CONFIG_RX_RING_SIZE MTNIC_BC(0,4)
- u8 flags; /* Bit0 - header separation */
- u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
- u8 reserved2[2];
- u8 cq; /* CQ associated with this ring */
- u32 db_record_addr_h;
- u32 db_record_addr_l;
- u32 page_address[0];/* Array of 2^size 64b page descriptor addresses */
- /* Must hold all Rx descriptors + doorbell record. */
+ u8 flags; /* Bit0 - header separation */
+ u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
+ u8 reserved2[2];
+ u8 cq; /* CQ associated with this ring */
+ u32 db_record_addr_h;
+ u32 db_record_addr_l;
+ u32 page_address[0];/* Array of 2^size 64b page descriptor addresses */
+ /* Must hold all Rx descriptors + doorbell record. */
};
/* The modifier for SET_RX_RING_ADDR */
/* CMD SET_RX_RING_ADDR */
struct mtnic_if_set_rx_ring_addr_in_imm {
- u16 mac_47_32; /* UCAST MAC Address bits 47:32 */
+ u16 mac_47_32; /* UCAST MAC Address bits 47:32 */
u16 flags_vlan_id; /* MAC/VLAN flags and vlan id */
#define MTNIC_MASK_SET_RX_RING_ADDR_VLAN_ID MTNIC_BC(0,12)
#define MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC MTNIC_BC(12,1)
#define MTNIC_MASK_SET_RX_RING_ADDR_BY_VLAN MTNIC_BC(13,1)
- u32 mac_31_0; /* UCAST MAC Address bits 31:0 */
+ u32 mac_31_0; /* UCAST MAC Address bits 31:0 */
};
/* CMD CONFIG_TX_RING */
struct mtnic_if_config_send_ring_in_mbox {
- u16 ring; /* The ring index (with offset) */
+ u16 ring; /* The ring index (with offset) */
#define MTNIC_MASK_CONFIG_TX_RING_INDEX MTNIC_BC(0,8)
- u8 size; /* Tx ring size is 32*2^size bytes */
+ u8 size; /* Tx ring size is 32*2^size bytes */
#define MTNIC_MASK_CONFIG_TX_RING_SIZE MTNIC_BC(0,4)
- u8 reserved;
- u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
- u8 qos_class; /* The COS used for this Tx */
- u16 cq; /* CQ associated with this ring */
+ u8 reserved;
+ u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
+ u8 qos_class; /* The COS used for this Tx */
+ u16 cq; /* CQ associated with this ring */
#define MTNIC_MASK_CONFIG_TX_CQ_INDEX MTNIC_BC(0,8)
u32 page_address[0]; /* 64 bit page addresses of descriptor buffer. */
- /* The buffer must accommodate all Tx descriptors */
+ /* The buffer must accommodate all Tx descriptors */
};
/* CMD CONFIG_EQ */
u8 reserved1;
u8 int_vector; /* MSI index if MSI enabled; otherwise reserved */
#define MTNIC_MASK_CONFIG_EQ_INT_VEC MTNIC_BC(0,6)
- u8 size; /* Num CQs is 2^size entries (size <= 22) */
+ u8 size; /* Num CQs is 2^size entries (size <= 22) */
#define MTNIC_MASK_CONFIG_EQ_SIZE MTNIC_BC(0,5)
- u8 offset; /* Start address of CQE in first page (11:6) */
+ u8 offset; /* Start address of CQE in first page (11:6) */
#define MTNIC_MASK_CONFIG_EQ_OFFSET MTNIC_BC(0,6)
u8 page_size; /* Each mapped page is 2^(12+page_size) bytes*/
u8 reserved[3];