******************************************************************************
*/
+/**
+ * Disable descriptor ring
+ *
+ * @v intel Intel device
+ * @v reg Register block
+ * @ret rc Return status code
+ */
+static int intel_disable_ring ( struct intel_nic *intel, unsigned int reg ) {
+ uint32_t dctl;
+ unsigned int i;
+
+ /* Disable ring */
+ writel ( 0, ( intel->regs + reg + INTEL_xDCTL ) );
+
+ /* Wait for disable to complete */
+ for ( i = 0 ; i < INTEL_DISABLE_MAX_WAIT_MS ; i++ ) {
+
+ /* Check if ring is disabled */
+ dctl = readl ( intel->regs + reg + INTEL_xDCTL );
+ if ( ! ( dctl & INTEL_xDCTL_ENABLE ) )
+ return 0;
+
+ /* Delay */
+ mdelay ( 1 );
+ }
+
+ DBGC ( intel, "INTEL %p ring %05x timed out waiting for disable "
+ "(dctl %08x)\n", intel, reg, dctl );
+ return -ETIMEDOUT;
+}
+
+/**
+ * Reset descriptor ring
+ *
+ * @v intel Intel device
+ * @v reg Register block
+ * @ret rc Return status code
+ */
+void intel_reset_ring ( struct intel_nic *intel, unsigned int reg ) {
+
+ /* Disable ring. Ignore errors and continue to reset the ring anyway */
+ intel_disable_ring ( intel, reg );
+
+ /* Clear ring length */
+ writel ( 0, ( intel->regs + reg + INTEL_xDLEN ) );
+
+ /* Clear ring address */
+ writel ( 0, ( intel->regs + reg + INTEL_xDBAH ) );
+ writel ( 0, ( intel->regs + reg + INTEL_xDBAL ) );
+
+ /* Reset head and tail pointers */
+ writel ( 0, ( intel->regs + reg + INTEL_xDH ) );
+ writel ( 0, ( intel->regs + reg + INTEL_xDT ) );
+}
+
/**
* Create descriptor ring
*
*/
void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
- /* Clear ring length */
- writel ( 0, ( intel->regs + ring->reg + INTEL_xDLEN ) );
-
- /* Clear ring address */
- writel ( 0, ( intel->regs + ring->reg + INTEL_xDBAL ) );
- writel ( 0, ( intel->regs + ring->reg + INTEL_xDBAH ) );
+ /* Reset ring */
+ intel_reset_ring ( intel, ring->reg );
/* Free descriptor ring */
free_dma ( ring->desc, ring->len );
#define INTEL_xDCTL 0x28
#define INTEL_xDCTL_ENABLE 0x02000000UL /**< Queue enable */
+/** Maximum time to wait for queue disable, in milliseconds */
+#define INTEL_DISABLE_MAX_WAIT_MS 100
+
/** Receive Address Low */
#define INTEL_RAL0 0x05400UL
physaddr_t addr, size_t len );
extern void intel_describe_rx ( struct intel_descriptor *rx,
physaddr_t addr, size_t len );
+extern void intel_reset_ring ( struct intel_nic *intel, unsigned int reg );
extern int intel_create_ring ( struct intel_nic *intel,
struct intel_ring *ring );
extern void intel_destroy_ring ( struct intel_nic *intel,
uint32_t rxdctl;
uint32_t srrctl;
uint32_t dca_rxctrl;
+ unsigned int i;
int vlan_thing;
int rc;
goto err_mbox_set_mtu;
}
+ /* Reset all descriptor rings */
+ for ( i = 0 ; i < INTELXVF_NUM_RINGS ; i++ ) {
+ intel_reset_ring ( intel, INTELXVF_TD ( i ) );
+ intel_reset_ring ( intel, INTELXVF_RD ( i ) );
+ }
+
+ /* Reset packet split receive type register */
+ writel ( 0, intel->regs + INTELXVF_PSRTYPE );
+
/* Get queue configuration. Ignore failures, since the host
* may not support this message.
*/
if ( vlan_thing ) {
DBGC ( intel, "INTEL %p stripping VLAN tags (thing=%d)\n",
intel, vlan_thing );
- rxdctl = readl ( intel->regs + INTELXVF_RD + INTEL_xDCTL );
+ rxdctl = readl ( intel->regs + INTELXVF_RD(0) + INTEL_xDCTL );
rxdctl |= INTELX_RXDCTL_VME;
- writel ( rxdctl, intel->regs + INTELXVF_RD + INTEL_xDCTL );
+ writel ( rxdctl, intel->regs + INTELXVF_RD(0) + INTEL_xDCTL );
}
/* Create transmit descriptor ring */
/* Configure receive buffer sizes and set receive descriptor type */
srrctl = readl ( intel->regs + INTELXVF_SRRCTL );
srrctl &= ~( INTELXVF_SRRCTL_BSIZE_MASK |
+ INTELXVF_SRRCTL_BHDRSIZE_MASK |
INTELXVF_SRRCTL_DESCTYPE_MASK );
srrctl |= ( INTELXVF_SRRCTL_BSIZE_DEFAULT |
- INTELXVF_SRRCTL_DESCTYPE_DEFAULT );
+ INTELXVF_SRRCTL_BHDRSIZE_DEFAULT |
+ INTELXVF_SRRCTL_DESCTYPE_DEFAULT |
+ INTELXVF_SRRCTL_DROP_EN );
writel ( srrctl, intel->regs + INTELXVF_SRRCTL );
/* Clear "must-be-zero" bit for direct cache access (DCA). We
netdev->dev = &pci->dev;
memset ( intel, 0, sizeof ( *intel ) );
intel_init_mbox ( &intel->mbox, INTELXVF_MBCTRL, INTELXVF_MBMEM );
- intel_init_ring ( &intel->tx, INTEL_NUM_TX_DESC, INTELXVF_TD,
+ intel_init_ring ( &intel->tx, INTEL_NUM_TX_DESC, INTELXVF_TD(0),
intel_describe_tx_adv );
- intel_init_ring ( &intel->rx, INTEL_NUM_RX_DESC, INTELXVF_RD,
+ intel_init_ring ( &intel->rx, INTEL_NUM_RX_DESC, INTELXVF_RD(0),
intel_describe_rx );
/* Fix up PCI device */
/** Mailbox Control Register */
#define INTELXVF_MBCTRL 0x02fcUL
+/** Packet Split Receive Type */
+#define INTELXVF_PSRTYPE 0x0300UL
+
/** Receive Descriptor register block */
-#define INTELXVF_RD 0x1000UL
+#define INTELXVF_RD(n) ( 0x1000UL + ( 0x40 * (n) ) )
/** RX DCA Control Register */
#define INTELXVF_DCA_RXCTRL 0x100cUL
#define INTELXVF_SRRCTL_BSIZE(kb) ( (kb) << 0 ) /**< Receive buffer size */
#define INTELXVF_SRRCTL_BSIZE_DEFAULT INTELXVF_SRRCTL_BSIZE ( 0x02 )
#define INTELXVF_SRRCTL_BSIZE_MASK INTELXVF_SRRCTL_BSIZE ( 0x1f )
+#define INTELXVF_SRRCTL_BHDRSIZE(kb) ( (kb) << 8 ) /**< Header size */
+#define INTELXVF_SRRCTL_BHDRSIZE_DEFAULT INTELXVF_SRRCTL_BHDRSIZE ( 0x04 )
+#define INTELXVF_SRRCTL_BHDRSIZE_MASK INTELXVF_SRRCTL_BHDRSIZE ( 0x0f )
#define INTELXVF_SRRCTL_DESCTYPE(typ) ( (typ) << 25 ) /**< Descriptor type */
#define INTELXVF_SRRCTL_DESCTYPE_DEFAULT INTELXVF_SRRCTL_DESCTYPE ( 0x00 )
#define INTELXVF_SRRCTL_DESCTYPE_MASK INTELXVF_SRRCTL_DESCTYPE ( 0x07 )
+#define INTELXVF_SRRCTL_DROP_EN 0x10000000UL
/** Good Packets Received Count */
#define INTELXVF_GPRC 0x101c
#define INTELXVF_MPRC 0x1034
/** Transmit Descriptor register block */
-#define INTELXVF_TD 0x2000UL
+#define INTELXVF_TD(n) ( 0x2000UL + ( 0x40 * (n) ) )
/** Good Packets Transmitted Count */
#define INTELXVF_GPTC 0x201c
/** API version 1.1 */
#define INTELXVF_MSG_VERSION_1_1 0x00000002UL
+/** Number of queues */
+#define INTELXVF_NUM_RINGS 8
+
#endif /* _INTELXVF_H */