#include <linux/dma-mapping.h>
#include "hinic3_cmdq.h"
+#include "hinic3_eqs.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
#define CMDQ_BUF_SIZE 2048
#define CMDQ_WQEBB_SIZE 64
+#define CMDQ_WQE_HEAD_LEN 32
#define CMDQ_CMD_TIMEOUT 5000
#define CMDQ_ENABLE_WAIT_TIMEOUT 300
#define CMDQ_WQE_NUM_WQEBBS 1
+static void hinic3_dump_cmdq_wqe_head(struct hinic3_hwdev *hwdev,
+ struct cmdq_wqe *wqe)
+{
+ u32 *data = (u32 *)wqe;
+ u32 i;
+
+ for (i = 0; i < (CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 0x4) {
+ dev_dbg(hwdev->dev,
+ "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ *(data + i), *(data + i + 0x1), *(data + i + 0x2),
+ *(data + i + 0x3));
+ }
+}
+
static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci)
{
if (hinic3_wq_get_used(wq) == 0)
case HINIC3_CMD_TYPE_TIMEOUT:
dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n",
cmdq_type, ci);
+ hinic3_dump_cmdq_wqe_head(hwdev, wqe);
fallthrough;
case HINIC3_CMD_TYPE_FAKE_TIMEOUT:
cmdq_clear_cmd_buf(cmd_info, hwdev);
clear_cmd_info(cmd_info, saved_cmd_info);
spin_unlock_bh(&cmdq->cmdq_lock);
+ hinic3_dump_ceq_info(cmdq->hwdev);
+
return err;
}
#define EQ_CI_SIMPLE_INDIR_SET(val, member) \
FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val)
+#define EQ_CONS_IDX_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_CONS_IDX_ADDR : HINIC3_CSR_CEQ_CONS_IDX_ADDR)
+
#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \
(((eq)->type == HINIC3_AEQ) ? \
HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \
struct hinic3_eq *ceq = data;
int err;
+ ceq->soft_intr_jif = jiffies;
/* clear resend timer counters */
hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx,
EQ_MSIX_RESEND_TIMER_CLEAR);
kfree(aeqs);
}
+void hinic3_dump_aeq_info(struct hinic3_hwdev *hwdev)
+{
+ const struct hinic3_aeq_elem *aeqe_pos;
+ u32 addr, ci, pi, ctrl0, idx;
+ struct hinic3_eq *eq;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
+ eq = &hwdev->aeqs->aeq[q_id];
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ addr = HINIC3_CSR_AEQ_CTRL_0_ADDR;
+
+ ctrl0 = hinic3_hwif_read_reg(hwdev->hwif, addr);
+
+ idx = hinic3_hwif_read_reg(hwdev->hwif,
+ HINIC3_EQ_INDIR_IDX_ADDR(eq->type));
+
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ aeqe_pos = get_curr_aeq_elem(eq);
+ dev_err(hwdev->dev,
+ "Aeq id: %d, idx: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x swci:0x%x\n",
+ q_id, idx, ctrl0, ci, pi, work_busy(&eq->aeq_work),
+ eq->wrapped, be32_to_cpu(aeqe_pos->desc), eq->cons_idx);
+ }
+}
+
int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
struct msix_entry *msix_entries)
{
kfree(ceqs);
}
+
+void hinic3_dump_ceq_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_eq *eq;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) {
+ eq = &hwdev->ceqs->ceq[q_id];
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ dev_err(hwdev->dev,
+ "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, wrap: %u, ceqe: 0x%x\n",
+ q_id, ci, eq->cons_idx, pi,
+ eq->wrapped, be32_to_cpu(*get_curr_ceq_elem(eq)));
+
+ dev_err(hwdev->dev, "Ceq last response soft interrupt time: %u\n",
+ jiffies_to_msecs(jiffies - eq->soft_intr_jif));
+ }
+}
u16 msix_entry_idx;
char irq_name[HINIC3_EQ_IRQ_NAME_LEN];
struct work_struct aeq_work;
+
+ u64 soft_intr_jif;
};
struct hinic3_aeq_elem {
hinic3_aeq_event_cb hwe_cb);
void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
enum hinic3_aeq_type event);
+void hinic3_dump_aeq_info(struct hinic3_hwdev *hwdev);
+
int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
struct msix_entry *msix_entries);
void hinic3_ceqs_free(struct hinic3_hwdev *hwdev);
hinic3_ceq_event_cb callback);
void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
enum hinic3_ceq_event event);
+void hinic3_dump_ceq_info(struct hinic3_hwdev *hwdev);
#endif
#include "hinic3_common.h"
#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
mbox_ctrl);
}
+static void hinic3_dump_mbox_reg(struct hinic3_hwdev *hwdev)
+{
+ u32 val;
+
+ val = hinic3_hwif_read_reg(hwdev->hwif,
+ HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF);
+ dev_err(hwdev->dev, "Mailbox control reg: 0x%x\n", val);
+ val = hinic3_hwif_read_reg(hwdev->hwif,
+ HINIC3_FUNC_CSR_MAILBOX_INT_OFF);
+ dev_err(hwdev->dev, "Mailbox interrupt offset: 0x%x\n", val);
+}
+
static u16 get_mbox_status(const struct hinic3_send_mbox *mbox)
{
__be64 *wb_status = mbox->wb_vaddr;
if (err) {
dev_err(hwdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
wb_status);
+ hinic3_dump_mbox_reg(hwdev);
return err;
}
if (wait_mbox_msg_completion(mbox, msg_params->timeout_ms)) {
dev_err(hwdev->dev,
"Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id);
+ hinic3_dump_aeq_info(mbox->hwdev);
err = -ETIMEDOUT;
goto err_send;
}