dev_info(&pdev->dev, "MANA reset cycle completed\n");
out:
- gc->in_service = false;
+ clear_bit(GC_IN_SERVICE, &gc->flags);
}
-struct mana_serv_work {
- struct work_struct serv_work;
- struct pci_dev *pdev;
- enum gdma_eqe_type type;
-};
-
static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev)
{
switch (type) {
module_put(THIS_MODULE);
}
+int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type)
+{
+ struct mana_serv_work *mns_wk;
+
+ if (test_and_set_bit(GC_IN_SERVICE, &gc->flags)) {
+ dev_info(gc->dev, "Already in service\n");
+ return -EBUSY;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_info(gc->dev, "Module is unloading\n");
+ clear_bit(GC_IN_SERVICE, &gc->flags);
+ return -ENODEV;
+ }
+
+ mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+ if (!mns_wk) {
+ module_put(THIS_MODULE);
+ clear_bit(GC_IN_SERVICE, &gc->flags);
+ return -ENOMEM;
+ }
+
+ dev_info(gc->dev, "Start MANA service type:%d\n", type);
+ mns_wk->pdev = to_pci_dev(gc->dev);
+ mns_wk->type = type;
+ pci_dev_get(mns_wk->pdev);
+ INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+ schedule_work(&mns_wk->serv_work);
+ return 0;
+}
+
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
- struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
"Service is to be processed in probe\n");
break;
}
-
- if (gc->in_service) {
- dev_info(gc->dev, "Already in service\n");
- break;
- }
-
- if (!try_module_get(THIS_MODULE)) {
- dev_info(gc->dev, "Module is unloading\n");
- break;
- }
-
- mns_wk = kzalloc_obj(*mns_wk, GFP_ATOMIC);
- if (!mns_wk) {
- module_put(THIS_MODULE);
- break;
- }
-
- dev_info(gc->dev, "Start MANA service type:%d\n", type);
- gc->in_service = true;
- mns_wk->pdev = to_pci_dev(gc->dev);
- mns_wk->type = type;
- pci_dev_get(mns_wk->pdev);
- INIT_WORK(&mns_wk->serv_work, mana_serv_func);
- schedule_work(&mns_wk->serv_work);
+ mana_schedule_serv_work(gc, type);
break;
default:
struct gdma_context *gc = ac->gdma_dev->gdma_context;
/* Already in service, hence tx queue reset is not required.*/
- if (gc->in_service)
+ if (test_bit(GC_IN_SERVICE, &gc->flags))
return;
/* Note: If there are pending queue reset work for this port(apc),
{
struct mana_context *ac =
container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
int err;
err = mana_query_gf_stats(ac);
/* HWC timeout detected - reset stats and stop rescheduling */
ac->hwc_timeout_occurred = true;
memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
+ dev_warn(gc->dev,
+ "Gf stats wk handler: gf stats query timed out.\n");
+ /* As HWC timed out, indicating a faulty HW state and needs a
+ * reset.
+ */
+ mana_schedule_serv_work(gc, GDMA_EQE_HWC_RESET_REQUEST);
return;
}
schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
#define GDMA_INVALID_DMA_REGION 0
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
struct gdma_mem_info {
struct device *dev;
enum gdma_context_flags {
GC_PROBE_SUCCEEDED = 0,
+ GC_IN_SERVICE = 1,
};
struct gdma_context {
u32 test_event_eq_id;
bool is_pf;
- bool in_service;
phys_addr_t bar0_pa;
void __iomem *bar0_va;
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
+int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type);
+
struct gdma_wqe {
u32 reserved :24;
u32 last_vbytes :8;
/* Driver can handle hardware recovery events during probe */
#define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
+/* Driver supports self recovery on Hardware Channel timeouts */
+#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY BIT(25)
+
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
- GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY)
+ GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY)
#define GDMA_DRV_CAP_FLAGS2 0