#include "wx_type.h"
#include "wx_lib.h"
#include "wx_sriov.h"
+#include "wx_vf.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
{
u32 mask;
+ if (wx->pdev->is_virtfn) {
+ wr32(wx, WX_VXIMC, qmask);
+ return;
+ }
+
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
/* We will try to get MSI-X interrupts first */
ret = wx_acquire_msix_vectors(wx);
- if (ret == 0 || (ret == -ENOMEM))
+ if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn)
return ret;
/* Disable VMDq support */
int ret;
/* Number of supported queues */
- wx_set_num_queues(wx);
+ if (wx->pdev->is_virtfn) {
+ if (wx->set_num_queues)
+ wx->set_num_queues(wx);
+ } else {
+ wx_set_num_queues(wx);
+ }
/* Set interrupt mode */
ret = wx_set_interrupt_capability(wx);
int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
int (*ptp_setup_sdp)(struct wx *wx);
+ void (*set_num_queues)(struct wx *wx);
bool pps_enabled;
u64 pps_width;
struct wx *wx = pci_get_drvdata(pdev);
netif_device_detach(wx->netdev);
+ wx_clear_interrupt_scheme(wx);
pci_disable_device(pdev);
return 0;
struct wx *wx = pci_get_drvdata(pdev);
pci_set_master(pdev);
+ wx_init_interrupt_scheme(wx);
netif_device_attach(wx->netdev);
return 0;
kfree(wx->vfinfo);
kfree(wx->rss_key);
kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
pci_disable_device(pdev);
}
}
- err = request_threaded_irq(wx->msix_entry->vector, NULL,
- wx_msix_misc_vf, IRQF_ONESHOT,
- netdev->name, wx);
+ err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf,
+ NULL, IRQF_ONESHOT, netdev->name, wx);
if (err) {
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
goto free_queue_irqs;
}
EXPORT_SYMBOL(wx_set_mac_vf);
+static void wxvf_irq_enable(struct wx *wx)
+{
+ wr32(wx, WX_VXIMC, wx->eims_enable_mask);
+}
+
+static void wxvf_up_complete(struct wx *wx)
+{
+ wx_configure_msix_vf(wx);
+
+ /* clear any pending interrupts, may auto mask */
+ wr32(wx, WX_VXICR, U32_MAX);
+ wxvf_irq_enable(wx);
+}
+
int wxvf_open(struct net_device *netdev)
{
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ err = wx_request_msix_irqs_vf(wx);
+ if (err)
+ goto err_reset;
+
+ wxvf_up_complete(wx);
+
return 0;
+err_reset:
+ wx_reset_vf(wx);
+ return err;
}
EXPORT_SYMBOL(wxvf_open);
{
struct net_device *netdev = wx->netdev;
+ netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
+ wx_napi_disable_all(wx);
wx_reset_vf(wx);
+
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
}
int wxvf_close(struct net_device *netdev)
struct wx *wx = netdev_priv(netdev);
wxvf_down(wx);
+ wx_free_irq(wx);
return 0;
}
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
#include "../libwx/wx_mbx.h"
#include "../libwx/wx_vf.h"
#include "../libwx/wx_vf_common.h"
.ndo_set_mac_address = wx_set_mac_vf,
};
+static void txgbevf_set_num_queues(struct wx *wx)
+{
+ u32 def_q = 0, num_tcs = 0;
+ u16 rss, queue;
+ int ret = 0;
+
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ /* fetch queue configuration from the PF */
+ ret = wx_get_queues_vf(wx, &num_tcs, &def_q);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+
+ if (ret)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1) {
+ wx->num_rx_queues = num_tcs;
+ } else {
+ rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM);
+ queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues);
+ rss = min_t(u16, queue, rss);
+
+ if (wx->vfinfo->vf_api >= wx_mbox_api_13) {
+ wx->num_rx_queues = rss;
+ wx->num_tx_queues = rss;
+ }
+ }
+}
+
static void txgbevf_init_type_code(struct wx *wx)
{
switch (wx->device_id) {
if (err)
goto err_init_mbx_params;
+ /* max q_vectors */
+ wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS;
/* Initialize the device type */
txgbevf_init_type_code(wx);
/* lock to protect mailbox accesses */
wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK;
+ wx->set_num_queues = txgbevf_set_num_queues;
+
return 0;
err_reset_hw:
kfree(wx->vfinfo);
eth_hw_addr_set(netdev, wx->mac.perm_addr);
ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_sw_init;
+
err = register_netdev(netdev);
if (err)
goto err_register;
return 0;
err_register:
+ wx_clear_interrupt_scheme(wx);
+err_free_sw_init:
kfree(wx->vfinfo);
kfree(wx->rss_key);
kfree(wx->mac_table);
#define TXGBEVF_DEV_ID_AML503F 0x503f
#define TXGBEVF_DEV_ID_AML513F 0x513f
+#define TXGBEVF_MAX_MSIX_VECTORS 2
+#define TXGBEVF_MAX_RSS_NUM 4
#define TXGBEVF_MAX_RX_QUEUES 4
#define TXGBEVF_MAX_TX_QUEUES 4
#define TXGBEVF_DEFAULT_TXD 128