if (ret)
return ret;
- if (!wait_for_completion_timeout(done, timeout) &&
- hci->io->dequeue_xfer(hci, xfer, n)) {
- dev_err(&hci->master.dev, "%s: timeout error\n", __func__);
- return -ETIMEDOUT;
+ if (!wait_for_completion_timeout(done, timeout)) {
+ if (hci->io->dequeue_xfer(hci, xfer, n)) {
+ dev_err(&hci->master.dev, "%s: timeout error\n", __func__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+ }
+
+ if (hci->io->handle_error) {
+ bool error = false;
+
+ for (int i = 0; i < n && !error; i++)
+ error = RESP_STATUS(xfer[i].response);
+ if (error)
+ return hci->io->handle_error(hci, xfer, n);
}
return 0;
return did_unqueue;
}
+static int hci_dma_handle_error(struct i3c_hci *hci, struct hci_xfer *xfer_list, int n)
+{
+ return hci_dma_dequeue_xfer(hci, xfer_list, n) ? -EIO : 0;
+}
+
static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
{
u32 op1_val, op2_val, resp, *ring_resp;
hci_dma_xfer_done(hci, rh);
if (status & INTR_RING_OP)
complete(&rh->op_done);
-
- if (status & INTR_TRANSFER_ABORT) {
- u32 ring_status;
-
- dev_notice_ratelimited(&hci->master.dev,
- "Ring %d: Transfer Aborted\n", i);
- mipi_i3c_hci_resume(hci);
- ring_status = rh_reg_read(RING_STATUS);
- if (!(ring_status & RING_STATUS_RUNNING) &&
- status & INTR_TRANSFER_COMPLETION &&
- status & INTR_TRANSFER_ERR) {
- /*
- * Ring stop followed by run is an Intel
- * specific required quirk after resuming the
- * halted controller. Do it only when the ring
- * is not in running state after a transfer
- * error.
- */
- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
- RING_CTRL_RUN_STOP);
- }
- }
+ if (status & INTR_TRANSFER_ABORT)
+ dev_dbg(&hci->master.dev, "Ring %d: Transfer Aborted\n", i);
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
"Ring %d: IBI Ring Full Condition\n", i);
.cleanup = hci_dma_cleanup,
.queue_xfer = hci_dma_queue_xfer,
.dequeue_xfer = hci_dma_dequeue_xfer,
+ .handle_error = hci_dma_handle_error,
.irq_handler = hci_dma_irq_handler,
.request_ibi = hci_dma_request_ibi,
.free_ibi = hci_dma_free_ibi,
bool (*irq_handler)(struct i3c_hci *hci);
int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ int (*handle_error)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);