struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+ if (ndev->dev_status <= AIE2_DEV_INIT) {
+ XDNA_ERR(xdna, "device is already stopped");
+ return;
+ }
+
aie2_mgmt_fw_fini(ndev);
xdna_mailbox_stop_channel(ndev->mgmt_chann);
xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ drmm_kfree(&xdna->ddev, ndev->mbox);
+ ndev->mbox = NULL;
aie2_psp_stop(ndev->psp_hdl);
aie2_smu_fini(ndev);
pci_disable_device(pdev);
+
+ ndev->dev_status = AIE2_DEV_INIT;
}
static int aie2_hw_start(struct amdxdna_dev *xdna)
u32 xdna_mailbox_intr_reg;
int mgmt_mb_irq, ret;
+ if (ndev->dev_status >= AIE2_DEV_START) {
+ XDNA_INFO(xdna, "device is already started");
+ return 0;
+ }
+
ret = pci_enable_device(pdev);
if (ret) {
XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
goto destroy_mgmt_chann;
}
+ ndev->dev_status = AIE2_DEV_START;
+
return 0;
destroy_mgmt_chann:
struct drm_syncobj *syncobj;
};
+enum aie2_dev_status {
+ AIE2_DEV_INIT,
+ AIE2_DEV_START,
+};
+
struct amdxdna_dev_hdl {
struct amdxdna_dev *xdna;
const struct amdxdna_dev_priv *priv;
struct mailbox *mbox;
struct mailbox_channel *mgmt_chann;
struct async_events *async_events;
+
+ enum aie2_dev_status dev_status;
};
#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
{
- if (!mb_chann)
- return 0;
-
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
free_irq(mb_chann->msix_irq, mb_chann);
destroy_workqueue(mb_chann->work_q);
void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
{
- if (!mb_chann)
- return;
-
/* Disable an irq and wait. This might sleep. */
disable_irq(mb_chann->msix_irq);