#include <linux/irqdomain.h>
#include <net/mana/mana.h>
+#include <net/mana/hw_channel.h>
struct dentry *mana_debugfs_root;
mana_gd_init_vf_regs(pdev);
}
+/* Suppress logging when we set timeout to zero */
+bool mana_need_log(struct gdma_context *gc, int err)
+{
+ struct hw_channel_context *hwc;
+
+ if (err != -ETIMEDOUT)
+ return true;
+
+ if (!gc)
+ return true;
+
+ hwc = gc->hwc.driver_data;
+ if (hwc && hwc->hwc_timeout == 0)
+ return false;
+
+ return true;
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
- resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
+ resp.hdr.status);
return err ? err : -EPROTO;
}
#define MANA_SERVICE_PERIOD 10
-struct mana_serv_work {
- struct work_struct serv_work;
- struct pci_dev *pdev;
-};
-
-static void mana_serv_func(struct work_struct *w)
+static void mana_serv_fpga(struct pci_dev *pdev)
{
- struct mana_serv_work *mns_wk;
struct pci_bus *bus, *parent;
- struct pci_dev *pdev;
-
- mns_wk = container_of(w, struct mana_serv_work, serv_work);
- pdev = mns_wk->pdev;
pci_lock_rescan_remove();
- if (!pdev)
- goto out;
-
bus = pdev->bus;
if (!bus) {
dev_err(&pdev->dev, "MANA service: no bus\n");
out:
pci_unlock_rescan_remove();
+}
+
+static void mana_serv_reset(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct hw_channel_context *hwc;
+
+ if (!gc) {
+ dev_err(&pdev->dev, "MANA service: no GC\n");
+ return;
+ }
+
+ hwc = gc->hwc.driver_data;
+ if (!hwc) {
+ dev_err(&pdev->dev, "MANA service: no HWC\n");
+ goto out;
+ }
+
+ /* HWC is not responding in this case, so don't wait */
+ hwc->hwc_timeout = 0;
+
+ dev_info(&pdev->dev, "MANA reset cycle start\n");
+ mana_gd_suspend(pdev, PMSG_SUSPEND);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ mana_gd_resume(pdev);
+
+ dev_info(&pdev->dev, "MANA reset cycle completed\n");
+
+out:
+ gc->in_service = false;
+}
+
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
+static void mana_serv_func(struct work_struct *w)
+{
+ struct mana_serv_work *mns_wk;
+ struct pci_dev *pdev;
+
+ mns_wk = container_of(w, struct mana_serv_work, serv_work);
+ pdev = mns_wk->pdev;
+
+ if (!pdev)
+ goto out;
+
+ switch (mns_wk->type) {
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ mana_serv_fpga(pdev);
+ break;
+
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ mana_serv_reset(pdev);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "MANA service: unknown type %d\n",
+ mns_wk->type);
+ break;
+ }
+
+out:
pci_dev_put(pdev);
kfree(mns_wk);
module_put(THIS_MODULE);
break;
case GDMA_EQE_HWC_FPGA_RECONFIG:
+ case GDMA_EQE_HWC_RESET_REQUEST:
dev_info(gc->dev, "Recv MANA service type:%d\n", type);
if (gc->in_service) {
dev_info(gc->dev, "Start MANA service type:%d\n", type);
gc->in_service = true;
mns_wk->pdev = to_pci_dev(gc->dev);
+ mns_wk->type = type;
pci_dev_get(mns_wk->pdev);
INIT_WORK(&mns_wk->serv_work, mana_serv_func);
schedule_work(&mns_wk->serv_work);
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
- dev_err(dev, "test_eq failed: %d\n", err);
+ if (mana_need_log(gc, err))
+ dev_err(dev, "test_eq failed: %d\n", err);
goto out;
}
if (flush_evenets) {
err = mana_gd_test_eq(gc, queue);
- if (err)
+ if (err && mana_need_log(gc, err))
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
}
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
return -EPROTO;
}
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
+ err, resp.hdr.status);
if (!err)
err = -EPROTO;
}
}
/* The 'state' parameter is not used. */
-static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
* fail -- if this happens, it's safer to just report an error than try to undo
* what has been done.
*/
-static int mana_gd_resume(struct pci_dev *pdev)
+int mana_gd_resume(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
.read = mana_dbg_q_read,
};
+static bool mana_en_need_log(struct mana_port_context *apc, int err)
+{
+ if (apc && apc->ac && apc->ac->gdma_dev &&
+ apc->ac->gdma_dev->gdma_context)
+ return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
+ else
+ return true;
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
if (err == -EOPNOTSUPP)
return err;
- if (req->req.msg_type != MANA_QUERY_PHY_STAT)
+ if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
+ mana_need_log(gc, err))
dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
err, resp->status);
return err ? err : -EPROTO;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+
return;
}
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+
return;
}
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+
goto out;
}
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+
return;
}
apc->rss_state = TRI_STATE_FALSE;
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
- if (err) {
+ if (err && mana_en_need_log(apc, err))
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
- return err;
- }
+ /* Even in err case, still need to cleanup the vPort */
mana_destroy_vport(apc);
return 0;
GDMA_EQE_HWC_FPGA_RECONFIG = 132,
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
GDMA_EQE_HWC_SOC_SERVICE = 134,
+ GDMA_EQE_HWC_RESET_REQUEST = 135,
GDMA_EQE_RNIC_QP_FATAL = 176,
};
/* Driver supports dynamic MSI-X vector allocation */
#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
+/* Driver can self reset on EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
+
/* Driver can self reset on FPGA Reconfig EQE notification */
#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE)
#define GDMA_DRV_CAP_FLAGS2 0
int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
+int mana_gd_resume(struct pci_dev *pdev);
+
+bool mana_need_log(struct gdma_context *gc, int err);
+
#endif /* _GDMA_H */