1 // SPDX-License-Identifier: GPL-2.0
3 * MHI Endpoint bus stack
5 * Copyright (C) 2022 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
9 #include <linux/bitfield.h>
10 #include <linux/delay.h>
11 #include <linux/dma-direction.h>
12 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/mhi_ep.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
20 #define M0_WAIT_DELAY_MS 100
21 #define M0_WAIT_COUNT 100
23 static DEFINE_IDA(mhi_ep_cntrl_ida
);
25 static int mhi_ep_create_device(struct mhi_ep_cntrl
*mhi_cntrl
, u32 ch_id
);
26 static int mhi_ep_destroy_device(struct device
*dev
, void *data
);
28 static int mhi_ep_send_event(struct mhi_ep_cntrl
*mhi_cntrl
, u32 ring_idx
,
29 struct mhi_ring_element
*el
, bool bei
)
31 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
32 union mhi_ep_ring_ctx
*ctx
;
33 struct mhi_ep_ring
*ring
;
36 mutex_lock(&mhi_cntrl
->event_lock
);
37 ring
= &mhi_cntrl
->mhi_event
[ring_idx
].ring
;
38 ctx
= (union mhi_ep_ring_ctx
*)&mhi_cntrl
->ev_ctx_cache
[ring_idx
];
40 ret
= mhi_ep_ring_start(mhi_cntrl
, ring
, ctx
);
42 dev_err(dev
, "Error starting event ring (%u)\n", ring_idx
);
47 /* Add element to the event ring */
48 ret
= mhi_ep_ring_add_element(ring
, el
);
50 dev_err(dev
, "Error adding element to event ring (%u)\n", ring_idx
);
54 mutex_unlock(&mhi_cntrl
->event_lock
);
57 * As per the MHI specification, section 4.3, Interrupt moderation:
59 * 1. If BEI flag is not set, cancel any pending intmodt work if started
60 * for the event ring and raise IRQ immediately.
62 * 2. If both BEI and intmodt are set, and if no IRQ is pending for the
63 * same event ring, start the IRQ delayed work as per the value of
64 * intmodt. If previous IRQ is pending, then do nothing as the pending
65 * IRQ is enough for the host to process the current event ring element.
67 * 3. If BEI is set and intmodt is not set, no need to raise IRQ.
70 if (READ_ONCE(ring
->irq_pending
))
71 cancel_delayed_work(&ring
->intmodt_work
);
73 mhi_cntrl
->raise_irq(mhi_cntrl
, ring
->irq_vector
);
74 } else if (ring
->intmodt
&& !READ_ONCE(ring
->irq_pending
)) {
75 WRITE_ONCE(ring
->irq_pending
, true);
76 schedule_delayed_work(&ring
->intmodt_work
, msecs_to_jiffies(ring
->intmodt
));
82 mutex_unlock(&mhi_cntrl
->event_lock
);
87 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl
*mhi_cntrl
, struct mhi_ep_ring
*ring
,
88 struct mhi_ring_element
*tre
, u32 len
, enum mhi_ev_ccs code
)
90 struct mhi_ring_element
*event
;
93 event
= kmem_cache_zalloc(mhi_cntrl
->ev_ring_el_cache
, GFP_KERNEL
| GFP_DMA
);
97 event
->ptr
= cpu_to_le64(ring
->rbase
+ ring
->rd_offset
* sizeof(*tre
));
98 event
->dword
[0] = MHI_TRE_EV_DWORD0(code
, len
);
99 event
->dword
[1] = MHI_TRE_EV_DWORD1(ring
->ch_id
, MHI_PKT_TYPE_TX_EVENT
);
101 ret
= mhi_ep_send_event(mhi_cntrl
, ring
->er_index
, event
, MHI_TRE_DATA_GET_BEI(tre
));
102 kmem_cache_free(mhi_cntrl
->ev_ring_el_cache
, event
);
107 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl
*mhi_cntrl
, enum mhi_state state
)
109 struct mhi_ring_element
*event
;
112 event
= kmem_cache_zalloc(mhi_cntrl
->ev_ring_el_cache
, GFP_KERNEL
| GFP_DMA
);
116 event
->dword
[0] = MHI_SC_EV_DWORD0(state
);
117 event
->dword
[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT
);
119 ret
= mhi_ep_send_event(mhi_cntrl
, 0, event
, 0);
120 kmem_cache_free(mhi_cntrl
->ev_ring_el_cache
, event
);
125 int mhi_ep_send_ee_event(struct mhi_ep_cntrl
*mhi_cntrl
, enum mhi_ee_type exec_env
)
127 struct mhi_ring_element
*event
;
130 event
= kmem_cache_zalloc(mhi_cntrl
->ev_ring_el_cache
, GFP_KERNEL
| GFP_DMA
);
134 event
->dword
[0] = MHI_EE_EV_DWORD0(exec_env
);
135 event
->dword
[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT
);
137 ret
= mhi_ep_send_event(mhi_cntrl
, 0, event
, 0);
138 kmem_cache_free(mhi_cntrl
->ev_ring_el_cache
, event
);
143 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl
*mhi_cntrl
, enum mhi_ev_ccs code
)
145 struct mhi_ep_ring
*ring
= &mhi_cntrl
->mhi_cmd
->ring
;
146 struct mhi_ring_element
*event
;
149 event
= kmem_cache_zalloc(mhi_cntrl
->ev_ring_el_cache
, GFP_KERNEL
| GFP_DMA
);
153 event
->ptr
= cpu_to_le64(ring
->rbase
+ ring
->rd_offset
* sizeof(struct mhi_ring_element
));
154 event
->dword
[0] = MHI_CC_EV_DWORD0(code
);
155 event
->dword
[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT
);
157 ret
= mhi_ep_send_event(mhi_cntrl
, 0, event
, 0);
158 kmem_cache_free(mhi_cntrl
->ev_ring_el_cache
, event
);
163 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring
*ring
, struct mhi_ring_element
*el
)
165 struct mhi_ep_cntrl
*mhi_cntrl
= ring
->mhi_cntrl
;
166 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
167 struct mhi_result result
= {};
168 struct mhi_ep_chan
*mhi_chan
;
169 struct mhi_ep_ring
*ch_ring
;
173 ch_id
= MHI_TRE_GET_CMD_CHID(el
);
175 /* Check if the channel is supported by the controller */
176 if ((ch_id
>= mhi_cntrl
->max_chan
) || !mhi_cntrl
->mhi_chan
[ch_id
].name
) {
177 dev_dbg(dev
, "Channel (%u) not supported!\n", ch_id
);
181 mhi_chan
= &mhi_cntrl
->mhi_chan
[ch_id
];
182 ch_ring
= &mhi_cntrl
->mhi_chan
[ch_id
].ring
;
184 switch (MHI_TRE_GET_CMD_TYPE(el
)) {
185 case MHI_PKT_TYPE_START_CHAN_CMD
:
186 dev_dbg(dev
, "Received START command for channel (%u)\n", ch_id
);
188 mutex_lock(&mhi_chan
->lock
);
189 /* Initialize and configure the corresponding channel ring */
190 if (!ch_ring
->started
) {
191 ret
= mhi_ep_ring_start(mhi_cntrl
, ch_ring
,
192 (union mhi_ep_ring_ctx
*)&mhi_cntrl
->ch_ctx_cache
[ch_id
]);
194 dev_err(dev
, "Failed to start ring for channel (%u)\n", ch_id
);
195 ret
= mhi_ep_send_cmd_comp_event(mhi_cntrl
,
196 MHI_EV_CC_UNDEFINED_ERR
);
198 dev_err(dev
, "Error sending completion event: %d\n", ret
);
203 mhi_chan
->rd_offset
= ch_ring
->rd_offset
;
206 /* Set channel state to RUNNING */
207 mhi_chan
->state
= MHI_CH_STATE_RUNNING
;
208 tmp
= le32_to_cpu(mhi_cntrl
->ch_ctx_cache
[ch_id
].chcfg
);
209 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
210 tmp
|= FIELD_PREP(CHAN_CTX_CHSTATE_MASK
, MHI_CH_STATE_RUNNING
);
211 mhi_cntrl
->ch_ctx_cache
[ch_id
].chcfg
= cpu_to_le32(tmp
);
213 ret
= mhi_ep_send_cmd_comp_event(mhi_cntrl
, MHI_EV_CC_SUCCESS
);
215 dev_err(dev
, "Error sending command completion event (%u)\n",
220 mutex_unlock(&mhi_chan
->lock
);
223 * Create MHI device only during UL channel start. Since the MHI
224 * channels operate in a pair, we'll associate both UL and DL
225 * channels to the same device.
227 * We also need to check for mhi_dev != NULL because, the host
228 * will issue START_CHAN command during resume and we don't
229 * destroy the device during suspend.
231 if (!(ch_id
% 2) && !mhi_chan
->mhi_dev
) {
232 ret
= mhi_ep_create_device(mhi_cntrl
, ch_id
);
234 dev_err(dev
, "Error creating device for channel (%u)\n", ch_id
);
235 mhi_ep_handle_syserr(mhi_cntrl
);
240 /* Finally, enable DB for the channel */
241 mhi_ep_mmio_enable_chdb(mhi_cntrl
, ch_id
);
244 case MHI_PKT_TYPE_STOP_CHAN_CMD
:
245 dev_dbg(dev
, "Received STOP command for channel (%u)\n", ch_id
);
246 if (!ch_ring
->started
) {
247 dev_err(dev
, "Channel (%u) not opened\n", ch_id
);
251 mutex_lock(&mhi_chan
->lock
);
252 /* Disable DB for the channel */
253 mhi_ep_mmio_disable_chdb(mhi_cntrl
, ch_id
);
255 /* Send channel disconnect status to client drivers */
256 if (mhi_chan
->xfer_cb
) {
257 result
.transaction_status
= -ENOTCONN
;
258 result
.bytes_xferd
= 0;
259 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
262 /* Set channel state to STOP */
263 mhi_chan
->state
= MHI_CH_STATE_STOP
;
264 tmp
= le32_to_cpu(mhi_cntrl
->ch_ctx_cache
[ch_id
].chcfg
);
265 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
266 tmp
|= FIELD_PREP(CHAN_CTX_CHSTATE_MASK
, MHI_CH_STATE_STOP
);
267 mhi_cntrl
->ch_ctx_cache
[ch_id
].chcfg
= cpu_to_le32(tmp
);
269 ret
= mhi_ep_send_cmd_comp_event(mhi_cntrl
, MHI_EV_CC_SUCCESS
);
271 dev_err(dev
, "Error sending command completion event (%u)\n",
276 mutex_unlock(&mhi_chan
->lock
);
278 case MHI_PKT_TYPE_RESET_CHAN_CMD
:
279 dev_dbg(dev
, "Received RESET command for channel (%u)\n", ch_id
);
280 if (!ch_ring
->started
) {
281 dev_err(dev
, "Channel (%u) not opened\n", ch_id
);
285 mutex_lock(&mhi_chan
->lock
);
286 /* Stop and reset the transfer ring */
287 mhi_ep_ring_reset(mhi_cntrl
, ch_ring
);
289 /* Send channel disconnect status to client driver */
290 if (mhi_chan
->xfer_cb
) {
291 result
.transaction_status
= -ENOTCONN
;
292 result
.bytes_xferd
= 0;
293 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
296 /* Set channel state to DISABLED */
297 mhi_chan
->state
= MHI_CH_STATE_DISABLED
;
298 tmp
= le32_to_cpu(mhi_cntrl
->ch_ctx_cache
[ch_id
].chcfg
);
299 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
300 tmp
|= FIELD_PREP(CHAN_CTX_CHSTATE_MASK
, MHI_CH_STATE_DISABLED
);
301 mhi_cntrl
->ch_ctx_cache
[ch_id
].chcfg
= cpu_to_le32(tmp
);
303 ret
= mhi_ep_send_cmd_comp_event(mhi_cntrl
, MHI_EV_CC_SUCCESS
);
305 dev_err(dev
, "Error sending command completion event (%u)\n",
310 mutex_unlock(&mhi_chan
->lock
);
313 dev_err(dev
, "Invalid command received: %lu for channel (%u)\n",
314 MHI_TRE_GET_CMD_TYPE(el
), ch_id
);
321 mutex_unlock(&mhi_chan
->lock
);
326 bool mhi_ep_queue_is_empty(struct mhi_ep_device
*mhi_dev
, enum dma_data_direction dir
)
328 struct mhi_ep_chan
*mhi_chan
= (dir
== DMA_FROM_DEVICE
) ? mhi_dev
->dl_chan
:
330 struct mhi_ep_cntrl
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
331 struct mhi_ep_ring
*ring
= &mhi_cntrl
->mhi_chan
[mhi_chan
->chan
].ring
;
333 return !!(mhi_chan
->rd_offset
== ring
->wr_offset
);
335 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty
);
337 static void mhi_ep_read_completion(struct mhi_ep_buf_info
*buf_info
)
339 struct mhi_ep_device
*mhi_dev
= buf_info
->mhi_dev
;
340 struct mhi_ep_cntrl
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
341 struct mhi_ep_chan
*mhi_chan
= mhi_dev
->ul_chan
;
342 struct mhi_ep_ring
*ring
= &mhi_cntrl
->mhi_chan
[mhi_chan
->chan
].ring
;
343 struct mhi_ring_element
*el
= &ring
->ring_cache
[ring
->rd_offset
];
344 struct mhi_result result
= {};
347 if (mhi_chan
->xfer_cb
) {
348 result
.buf_addr
= buf_info
->cb_buf
;
349 result
.dir
= mhi_chan
->dir
;
350 result
.bytes_xferd
= buf_info
->size
;
352 mhi_chan
->xfer_cb(mhi_dev
, &result
);
356 * The host will split the data packet into multiple TREs if it can't fit
357 * the packet in a single TRE. In that case, CHAIN flag will be set by the
358 * host for all TREs except the last one.
360 if (buf_info
->code
!= MHI_EV_CC_OVERFLOW
) {
361 if (MHI_TRE_DATA_GET_CHAIN(el
)) {
363 * IEOB (Interrupt on End of Block) flag will be set by the host if
364 * it expects the completion event for all TREs of a TD.
366 if (MHI_TRE_DATA_GET_IEOB(el
)) {
367 ret
= mhi_ep_send_completion_event(mhi_cntrl
, ring
, el
,
368 MHI_TRE_DATA_GET_LEN(el
),
371 dev_err(&mhi_chan
->mhi_dev
->dev
,
372 "Error sending transfer compl. event\n");
373 goto err_free_tre_buf
;
378 * IEOT (Interrupt on End of Transfer) flag will be set by the host
379 * for the last TRE of the TD and expects the completion event for
382 if (MHI_TRE_DATA_GET_IEOT(el
)) {
383 ret
= mhi_ep_send_completion_event(mhi_cntrl
, ring
, el
,
384 MHI_TRE_DATA_GET_LEN(el
),
387 dev_err(&mhi_chan
->mhi_dev
->dev
,
388 "Error sending transfer compl. event\n");
389 goto err_free_tre_buf
;
395 mhi_ep_ring_inc_index(ring
);
398 kmem_cache_free(mhi_cntrl
->tre_buf_cache
, buf_info
->cb_buf
);
401 static int mhi_ep_read_channel(struct mhi_ep_cntrl
*mhi_cntrl
,
402 struct mhi_ep_ring
*ring
)
404 struct mhi_ep_chan
*mhi_chan
= &mhi_cntrl
->mhi_chan
[ring
->ch_id
];
405 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
406 size_t tr_len
, read_offset
, write_offset
;
407 struct mhi_ep_buf_info buf_info
= {};
408 u32 len
= MHI_EP_DEFAULT_MTU
;
409 struct mhi_ring_element
*el
;
410 bool tr_done
= false;
418 /* Don't process the transfer ring if the channel is not in RUNNING state */
419 if (mhi_chan
->state
!= MHI_CH_STATE_RUNNING
) {
420 dev_err(dev
, "Channel not available\n");
424 el
= &ring
->ring_cache
[mhi_chan
->rd_offset
];
426 /* Check if there is data pending to be read from previous read operation */
427 if (mhi_chan
->tre_bytes_left
) {
428 dev_dbg(dev
, "TRE bytes remaining: %u\n", mhi_chan
->tre_bytes_left
);
429 tr_len
= min(buf_left
, mhi_chan
->tre_bytes_left
);
431 mhi_chan
->tre_loc
= MHI_TRE_DATA_GET_PTR(el
);
432 mhi_chan
->tre_size
= MHI_TRE_DATA_GET_LEN(el
);
433 mhi_chan
->tre_bytes_left
= mhi_chan
->tre_size
;
435 tr_len
= min(buf_left
, mhi_chan
->tre_size
);
438 read_offset
= mhi_chan
->tre_size
- mhi_chan
->tre_bytes_left
;
439 write_offset
= len
- buf_left
;
441 buf_addr
= kmem_cache_zalloc(mhi_cntrl
->tre_buf_cache
, GFP_KERNEL
| GFP_DMA
);
445 buf_info
.host_addr
= mhi_chan
->tre_loc
+ read_offset
;
446 buf_info
.dev_addr
= buf_addr
+ write_offset
;
447 buf_info
.size
= tr_len
;
448 buf_info
.cb
= mhi_ep_read_completion
;
449 buf_info
.cb_buf
= buf_addr
;
450 buf_info
.mhi_dev
= mhi_chan
->mhi_dev
;
452 if (mhi_chan
->tre_bytes_left
- tr_len
)
453 buf_info
.code
= MHI_EV_CC_OVERFLOW
;
455 dev_dbg(dev
, "Reading %zd bytes from channel (%u)\n", tr_len
, ring
->ch_id
);
456 ret
= mhi_cntrl
->read_async(mhi_cntrl
, &buf_info
);
458 dev_err(&mhi_chan
->mhi_dev
->dev
, "Error reading from channel\n");
459 goto err_free_buf_addr
;
463 mhi_chan
->tre_bytes_left
-= tr_len
;
465 if (!mhi_chan
->tre_bytes_left
) {
466 if (MHI_TRE_DATA_GET_IEOT(el
))
469 mhi_chan
->rd_offset
= (mhi_chan
->rd_offset
+ 1) % ring
->ring_size
;
471 } while (buf_left
&& !tr_done
);
476 kmem_cache_free(mhi_cntrl
->tre_buf_cache
, buf_addr
);
481 static int mhi_ep_process_ch_ring(struct mhi_ep_ring
*ring
)
483 struct mhi_ep_cntrl
*mhi_cntrl
= ring
->mhi_cntrl
;
484 struct mhi_result result
= {};
485 struct mhi_ep_chan
*mhi_chan
;
488 mhi_chan
= &mhi_cntrl
->mhi_chan
[ring
->ch_id
];
491 * Bail out if transfer callback is not registered for the channel.
492 * This is most likely due to the client driver not loaded at this point.
494 if (!mhi_chan
->xfer_cb
) {
495 dev_err(&mhi_chan
->mhi_dev
->dev
, "Client driver not available\n");
499 if (ring
->ch_id
% 2) {
501 result
.dir
= mhi_chan
->dir
;
502 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
506 ret
= mhi_ep_read_channel(mhi_cntrl
, ring
);
508 dev_err(&mhi_chan
->mhi_dev
->dev
, "Failed to read channel\n");
512 /* Read until the ring becomes empty */
513 } while (!mhi_ep_queue_is_empty(mhi_chan
->mhi_dev
, DMA_TO_DEVICE
));
519 static void mhi_ep_skb_completion(struct mhi_ep_buf_info
*buf_info
)
521 struct mhi_ep_device
*mhi_dev
= buf_info
->mhi_dev
;
522 struct mhi_ep_cntrl
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
523 struct mhi_ep_chan
*mhi_chan
= mhi_dev
->dl_chan
;
524 struct mhi_ep_ring
*ring
= &mhi_cntrl
->mhi_chan
[mhi_chan
->chan
].ring
;
525 struct mhi_ring_element
*el
= &ring
->ring_cache
[ring
->rd_offset
];
526 struct device
*dev
= &mhi_dev
->dev
;
527 struct mhi_result result
= {};
530 if (mhi_chan
->xfer_cb
) {
531 result
.buf_addr
= buf_info
->cb_buf
;
532 result
.dir
= mhi_chan
->dir
;
533 result
.bytes_xferd
= buf_info
->size
;
535 mhi_chan
->xfer_cb(mhi_dev
, &result
);
538 ret
= mhi_ep_send_completion_event(mhi_cntrl
, ring
, el
, buf_info
->size
,
541 dev_err(dev
, "Error sending transfer completion event\n");
545 mhi_ep_ring_inc_index(ring
);
548 /* TODO: Handle partially formed TDs */
549 int mhi_ep_queue_skb(struct mhi_ep_device
*mhi_dev
, struct sk_buff
*skb
)
551 struct mhi_ep_cntrl
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
552 struct mhi_ep_chan
*mhi_chan
= mhi_dev
->dl_chan
;
553 struct device
*dev
= &mhi_chan
->mhi_dev
->dev
;
554 struct mhi_ep_buf_info buf_info
= {};
555 struct mhi_ring_element
*el
;
556 u32 buf_left
, read_offset
;
557 struct mhi_ep_ring
*ring
;
563 ring
= &mhi_cntrl
->mhi_chan
[mhi_chan
->chan
].ring
;
565 mutex_lock(&mhi_chan
->lock
);
568 /* Don't process the transfer ring if the channel is not in RUNNING state */
569 if (mhi_chan
->state
!= MHI_CH_STATE_RUNNING
) {
570 dev_err(dev
, "Channel not available\n");
575 if (mhi_ep_queue_is_empty(mhi_dev
, DMA_FROM_DEVICE
)) {
576 dev_err(dev
, "TRE not available!\n");
581 el
= &ring
->ring_cache
[mhi_chan
->rd_offset
];
582 tre_len
= MHI_TRE_DATA_GET_LEN(el
);
584 tr_len
= min(buf_left
, tre_len
);
585 read_offset
= skb
->len
- buf_left
;
587 buf_info
.dev_addr
= skb
->data
+ read_offset
;
588 buf_info
.host_addr
= MHI_TRE_DATA_GET_PTR(el
);
589 buf_info
.size
= tr_len
;
590 buf_info
.cb
= mhi_ep_skb_completion
;
591 buf_info
.cb_buf
= skb
;
592 buf_info
.mhi_dev
= mhi_dev
;
595 * For all TREs queued by the host for DL channel, only the EOT flag will be set.
596 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
597 * the host so that the host can adjust the packet boundary to next TREs. Else send
598 * the EOT event to the host indicating the packet boundary.
600 if (buf_left
- tr_len
)
601 buf_info
.code
= MHI_EV_CC_OVERFLOW
;
603 buf_info
.code
= MHI_EV_CC_EOT
;
605 dev_dbg(dev
, "Writing %zd bytes to channel (%u)\n", tr_len
, ring
->ch_id
);
606 ret
= mhi_cntrl
->write_async(mhi_cntrl
, &buf_info
);
608 dev_err(dev
, "Error writing to the channel\n");
615 * Update the read offset cached in mhi_chan. Actual read offset
616 * will be updated by the completion handler.
618 mhi_chan
->rd_offset
= (mhi_chan
->rd_offset
+ 1) % ring
->ring_size
;
621 mutex_unlock(&mhi_chan
->lock
);
626 mutex_unlock(&mhi_chan
->lock
);
630 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb
);
632 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl
*mhi_cntrl
)
634 size_t cmd_ctx_host_size
, ch_ctx_host_size
, ev_ctx_host_size
;
635 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
638 /* Update the number of event rings (NER) programmed by the host */
639 mhi_ep_mmio_update_ner(mhi_cntrl
);
641 dev_dbg(dev
, "Number of Event rings: %u, HW Event rings: %u\n",
642 mhi_cntrl
->event_rings
, mhi_cntrl
->hw_event_rings
);
644 ch_ctx_host_size
= sizeof(struct mhi_chan_ctxt
) * mhi_cntrl
->max_chan
;
645 ev_ctx_host_size
= sizeof(struct mhi_event_ctxt
) * mhi_cntrl
->event_rings
;
646 cmd_ctx_host_size
= sizeof(struct mhi_cmd_ctxt
) * NR_OF_CMD_RINGS
;
648 /* Get the channel context base pointer from host */
649 mhi_ep_mmio_get_chc_base(mhi_cntrl
);
651 /* Allocate and map memory for caching host channel context */
652 ret
= mhi_cntrl
->alloc_map(mhi_cntrl
, mhi_cntrl
->ch_ctx_host_pa
,
653 &mhi_cntrl
->ch_ctx_cache_phys
,
654 (void __iomem
**) &mhi_cntrl
->ch_ctx_cache
,
657 dev_err(dev
, "Failed to allocate and map ch_ctx_cache\n");
661 /* Get the event context base pointer from host */
662 mhi_ep_mmio_get_erc_base(mhi_cntrl
);
664 /* Allocate and map memory for caching host event context */
665 ret
= mhi_cntrl
->alloc_map(mhi_cntrl
, mhi_cntrl
->ev_ctx_host_pa
,
666 &mhi_cntrl
->ev_ctx_cache_phys
,
667 (void __iomem
**) &mhi_cntrl
->ev_ctx_cache
,
670 dev_err(dev
, "Failed to allocate and map ev_ctx_cache\n");
674 /* Get the command context base pointer from host */
675 mhi_ep_mmio_get_crc_base(mhi_cntrl
);
677 /* Allocate and map memory for caching host command context */
678 ret
= mhi_cntrl
->alloc_map(mhi_cntrl
, mhi_cntrl
->cmd_ctx_host_pa
,
679 &mhi_cntrl
->cmd_ctx_cache_phys
,
680 (void __iomem
**) &mhi_cntrl
->cmd_ctx_cache
,
683 dev_err(dev
, "Failed to allocate and map cmd_ctx_cache\n");
687 /* Initialize command ring */
688 ret
= mhi_ep_ring_start(mhi_cntrl
, &mhi_cntrl
->mhi_cmd
->ring
,
689 (union mhi_ep_ring_ctx
*)mhi_cntrl
->cmd_ctx_cache
);
691 dev_err(dev
, "Failed to start the command ring\n");
698 mhi_cntrl
->unmap_free(mhi_cntrl
, mhi_cntrl
->cmd_ctx_host_pa
, mhi_cntrl
->cmd_ctx_cache_phys
,
699 (void __iomem
*) mhi_cntrl
->cmd_ctx_cache
, cmd_ctx_host_size
);
702 mhi_cntrl
->unmap_free(mhi_cntrl
, mhi_cntrl
->ev_ctx_host_pa
, mhi_cntrl
->ev_ctx_cache_phys
,
703 (void __iomem
*) mhi_cntrl
->ev_ctx_cache
, ev_ctx_host_size
);
706 mhi_cntrl
->unmap_free(mhi_cntrl
, mhi_cntrl
->ch_ctx_host_pa
, mhi_cntrl
->ch_ctx_cache_phys
,
707 (void __iomem
*) mhi_cntrl
->ch_ctx_cache
, ch_ctx_host_size
);
712 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl
*mhi_cntrl
)
714 size_t cmd_ctx_host_size
, ch_ctx_host_size
, ev_ctx_host_size
;
716 ch_ctx_host_size
= sizeof(struct mhi_chan_ctxt
) * mhi_cntrl
->max_chan
;
717 ev_ctx_host_size
= sizeof(struct mhi_event_ctxt
) * mhi_cntrl
->event_rings
;
718 cmd_ctx_host_size
= sizeof(struct mhi_cmd_ctxt
) * NR_OF_CMD_RINGS
;
720 mhi_cntrl
->unmap_free(mhi_cntrl
, mhi_cntrl
->cmd_ctx_host_pa
, mhi_cntrl
->cmd_ctx_cache_phys
,
721 (void __iomem
*) mhi_cntrl
->cmd_ctx_cache
, cmd_ctx_host_size
);
723 mhi_cntrl
->unmap_free(mhi_cntrl
, mhi_cntrl
->ev_ctx_host_pa
, mhi_cntrl
->ev_ctx_cache_phys
,
724 (void __iomem
*) mhi_cntrl
->ev_ctx_cache
, ev_ctx_host_size
);
726 mhi_cntrl
->unmap_free(mhi_cntrl
, mhi_cntrl
->ch_ctx_host_pa
, mhi_cntrl
->ch_ctx_cache_phys
,
727 (void __iomem
*) mhi_cntrl
->ch_ctx_cache
, ch_ctx_host_size
);
730 static void mhi_ep_enable_int(struct mhi_ep_cntrl
*mhi_cntrl
)
733 * Doorbell interrupts are enabled when the corresponding channel gets started.
734 * Enabling all interrupts here triggers spurious irqs as some of the interrupts
735 * associated with hw channels always get triggered.
737 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl
);
738 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl
);
741 static int mhi_ep_enable(struct mhi_ep_cntrl
*mhi_cntrl
)
743 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
744 enum mhi_state state
;
749 /* Wait for Host to set the M0 state */
751 msleep(M0_WAIT_DELAY_MS
);
752 mhi_ep_mmio_get_mhi_state(mhi_cntrl
, &state
, &mhi_reset
);
754 /* Clear the MHI reset if host is in reset state */
755 mhi_ep_mmio_clear_reset(mhi_cntrl
);
756 dev_info(dev
, "Detected Host reset while waiting for M0\n");
759 } while (state
!= MHI_STATE_M0
&& count
< M0_WAIT_COUNT
);
761 if (state
!= MHI_STATE_M0
) {
762 dev_err(dev
, "Host failed to enter M0\n");
766 ret
= mhi_ep_cache_host_cfg(mhi_cntrl
);
768 dev_err(dev
, "Failed to cache host config\n");
772 mhi_ep_mmio_set_env(mhi_cntrl
, MHI_EE_AMSS
);
774 /* Enable all interrupts now */
775 mhi_ep_enable_int(mhi_cntrl
);
780 static void mhi_ep_cmd_ring_worker(struct work_struct
*work
)
782 struct mhi_ep_cntrl
*mhi_cntrl
= container_of(work
, struct mhi_ep_cntrl
, cmd_ring_work
);
783 struct mhi_ep_ring
*ring
= &mhi_cntrl
->mhi_cmd
->ring
;
784 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
785 struct mhi_ring_element
*el
;
788 /* Update the write offset for the ring */
789 ret
= mhi_ep_update_wr_offset(ring
);
791 dev_err(dev
, "Error updating write offset for ring\n");
795 /* Sanity check to make sure there are elements in the ring */
796 if (ring
->rd_offset
== ring
->wr_offset
)
800 * Process command ring element till write offset. In case of an error, just try to
801 * process next element.
803 while (ring
->rd_offset
!= ring
->wr_offset
) {
804 el
= &ring
->ring_cache
[ring
->rd_offset
];
806 ret
= mhi_ep_process_cmd_ring(ring
, el
);
807 if (ret
&& ret
!= -ENODEV
)
808 dev_err(dev
, "Error processing cmd ring element: %zu\n", ring
->rd_offset
);
810 mhi_ep_ring_inc_index(ring
);
814 static void mhi_ep_ch_ring_worker(struct work_struct
*work
)
816 struct mhi_ep_cntrl
*mhi_cntrl
= container_of(work
, struct mhi_ep_cntrl
, ch_ring_work
);
817 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
818 struct mhi_ep_ring_item
*itr
, *tmp
;
819 struct mhi_ep_ring
*ring
;
820 struct mhi_ep_chan
*chan
;
825 spin_lock_irqsave(&mhi_cntrl
->list_lock
, flags
);
826 list_splice_tail_init(&mhi_cntrl
->ch_db_list
, &head
);
827 spin_unlock_irqrestore(&mhi_cntrl
->list_lock
, flags
);
829 /* Process each queued channel ring. In case of an error, just process next element. */
830 list_for_each_entry_safe(itr
, tmp
, &head
, node
) {
831 list_del(&itr
->node
);
834 chan
= &mhi_cntrl
->mhi_chan
[ring
->ch_id
];
835 mutex_lock(&chan
->lock
);
838 * The ring could've stopped while we waited to grab the (chan->lock), so do
839 * a sanity check before going further.
841 if (!ring
->started
) {
842 mutex_unlock(&chan
->lock
);
847 /* Update the write offset for the ring */
848 ret
= mhi_ep_update_wr_offset(ring
);
850 dev_err(dev
, "Error updating write offset for ring\n");
851 mutex_unlock(&chan
->lock
);
852 kmem_cache_free(mhi_cntrl
->ring_item_cache
, itr
);
856 /* Sanity check to make sure there are elements in the ring */
857 if (chan
->rd_offset
== ring
->wr_offset
) {
858 mutex_unlock(&chan
->lock
);
859 kmem_cache_free(mhi_cntrl
->ring_item_cache
, itr
);
863 dev_dbg(dev
, "Processing the ring for channel (%u)\n", ring
->ch_id
);
864 ret
= mhi_ep_process_ch_ring(ring
);
866 dev_err(dev
, "Error processing ring for channel (%u): %d\n",
868 mutex_unlock(&chan
->lock
);
869 kmem_cache_free(mhi_cntrl
->ring_item_cache
, itr
);
873 mutex_unlock(&chan
->lock
);
874 kmem_cache_free(mhi_cntrl
->ring_item_cache
, itr
);
878 static void mhi_ep_state_worker(struct work_struct
*work
)
880 struct mhi_ep_cntrl
*mhi_cntrl
= container_of(work
, struct mhi_ep_cntrl
, state_work
);
881 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
882 struct mhi_ep_state_transition
*itr
, *tmp
;
887 spin_lock_irqsave(&mhi_cntrl
->list_lock
, flags
);
888 list_splice_tail_init(&mhi_cntrl
->st_transition_list
, &head
);
889 spin_unlock_irqrestore(&mhi_cntrl
->list_lock
, flags
);
891 list_for_each_entry_safe(itr
, tmp
, &head
, node
) {
892 list_del(&itr
->node
);
893 dev_dbg(dev
, "Handling MHI state transition to %s\n",
894 mhi_state_str(itr
->state
));
896 switch (itr
->state
) {
898 ret
= mhi_ep_set_m0_state(mhi_cntrl
);
900 dev_err(dev
, "Failed to transition to M0 state\n");
903 ret
= mhi_ep_set_m3_state(mhi_cntrl
);
905 dev_err(dev
, "Failed to transition to M3 state\n");
908 dev_err(dev
, "Invalid MHI state transition: %d\n", itr
->state
);
915 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl
*mhi_cntrl
, unsigned long ch_int
,
918 struct mhi_ep_ring_item
*item
;
919 struct mhi_ep_ring
*ring
;
920 bool work
= !!ch_int
;
924 /* First add the ring items to a local list */
925 for_each_set_bit(i
, &ch_int
, 32) {
926 /* Channel index varies for each register: 0, 32, 64, 96 */
927 u32 ch_id
= ch_idx
+ i
;
929 ring
= &mhi_cntrl
->mhi_chan
[ch_id
].ring
;
930 item
= kmem_cache_zalloc(mhi_cntrl
->ring_item_cache
, GFP_ATOMIC
);
935 list_add_tail(&item
->node
, &head
);
938 /* Now, splice the local list into ch_db_list and queue the work item */
940 spin_lock(&mhi_cntrl
->list_lock
);
941 list_splice_tail_init(&head
, &mhi_cntrl
->ch_db_list
);
942 spin_unlock(&mhi_cntrl
->list_lock
);
944 queue_work(mhi_cntrl
->wq
, &mhi_cntrl
->ch_ring_work
);
949 * Channel interrupt statuses are contained in 4 registers each of 32bit length.
950 * For checking all interrupts, we need to loop through each registers and then
951 * check for bits set.
953 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl
*mhi_cntrl
)
955 u32 ch_int
, ch_idx
, i
;
957 /* Bail out if there is no channel doorbell interrupt */
958 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl
))
961 for (i
= 0; i
< MHI_MASK_ROWS_CH_DB
; i
++) {
962 ch_idx
= i
* MHI_MASK_CH_LEN
;
964 /* Only process channel interrupt if the mask is enabled */
965 ch_int
= mhi_cntrl
->chdb
[i
].status
& mhi_cntrl
->chdb
[i
].mask
;
967 mhi_ep_queue_channel_db(mhi_cntrl
, ch_int
, ch_idx
);
968 mhi_ep_mmio_write(mhi_cntrl
, MHI_CHDB_INT_CLEAR_n(i
),
969 mhi_cntrl
->chdb
[i
].status
);
974 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl
*mhi_cntrl
,
975 enum mhi_state state
)
977 struct mhi_ep_state_transition
*item
;
979 item
= kzalloc(sizeof(*item
), GFP_ATOMIC
);
984 spin_lock(&mhi_cntrl
->list_lock
);
985 list_add_tail(&item
->node
, &mhi_cntrl
->st_transition_list
);
986 spin_unlock(&mhi_cntrl
->list_lock
);
988 queue_work(mhi_cntrl
->wq
, &mhi_cntrl
->state_work
);
992 * Interrupt handler that services interrupts raised by the host writing to
993 * MHICTRL and Command ring doorbell (CRDB) registers for state change and
994 * channel interrupts.
996 static irqreturn_t
mhi_ep_irq(int irq
, void *data
)
998 struct mhi_ep_cntrl
*mhi_cntrl
= data
;
999 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1000 enum mhi_state state
;
1004 /* Acknowledge the ctrl interrupt */
1005 int_value
= mhi_ep_mmio_read(mhi_cntrl
, MHI_CTRL_INT_STATUS
);
1006 mhi_ep_mmio_write(mhi_cntrl
, MHI_CTRL_INT_CLEAR
, int_value
);
1008 /* Check for ctrl interrupt */
1009 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK
, int_value
)) {
1010 dev_dbg(dev
, "Processing ctrl interrupt\n");
1011 mhi_ep_mmio_get_mhi_state(mhi_cntrl
, &state
, &mhi_reset
);
1013 dev_info(dev
, "Host triggered MHI reset!\n");
1014 disable_irq_nosync(mhi_cntrl
->irq
);
1015 schedule_work(&mhi_cntrl
->reset_work
);
1019 mhi_ep_process_ctrl_interrupt(mhi_cntrl
, state
);
1022 /* Check for command doorbell interrupt */
1023 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK
, int_value
)) {
1024 dev_dbg(dev
, "Processing command doorbell interrupt\n");
1025 queue_work(mhi_cntrl
->wq
, &mhi_cntrl
->cmd_ring_work
);
1028 /* Check for channel interrupts */
1029 mhi_ep_check_channel_interrupt(mhi_cntrl
);
1034 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl
*mhi_cntrl
)
1036 struct mhi_ep_ring
*ch_ring
, *ev_ring
;
1037 struct mhi_result result
= {};
1038 struct mhi_ep_chan
*mhi_chan
;
1041 /* Stop all the channels */
1042 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++) {
1043 mhi_chan
= &mhi_cntrl
->mhi_chan
[i
];
1044 if (!mhi_chan
->ring
.started
)
1047 mutex_lock(&mhi_chan
->lock
);
1048 /* Send channel disconnect status to client drivers */
1049 if (mhi_chan
->xfer_cb
) {
1050 result
.transaction_status
= -ENOTCONN
;
1051 result
.bytes_xferd
= 0;
1052 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
1055 mhi_chan
->state
= MHI_CH_STATE_DISABLED
;
1056 mutex_unlock(&mhi_chan
->lock
);
1059 flush_workqueue(mhi_cntrl
->wq
);
1061 /* Destroy devices associated with all channels */
1062 device_for_each_child(&mhi_cntrl
->mhi_dev
->dev
, NULL
, mhi_ep_destroy_device
);
1064 /* Stop and reset the transfer rings */
1065 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++) {
1066 mhi_chan
= &mhi_cntrl
->mhi_chan
[i
];
1067 if (!mhi_chan
->ring
.started
)
1070 ch_ring
= &mhi_cntrl
->mhi_chan
[i
].ring
;
1071 mutex_lock(&mhi_chan
->lock
);
1072 mhi_ep_ring_reset(mhi_cntrl
, ch_ring
);
1073 mutex_unlock(&mhi_chan
->lock
);
1076 /* Stop and reset the event rings */
1077 for (i
= 0; i
< mhi_cntrl
->event_rings
; i
++) {
1078 ev_ring
= &mhi_cntrl
->mhi_event
[i
].ring
;
1079 if (!ev_ring
->started
)
1082 mutex_lock(&mhi_cntrl
->event_lock
);
1083 mhi_ep_ring_reset(mhi_cntrl
, ev_ring
);
1084 mutex_unlock(&mhi_cntrl
->event_lock
);
1087 /* Stop and reset the command ring */
1088 mhi_ep_ring_reset(mhi_cntrl
, &mhi_cntrl
->mhi_cmd
->ring
);
1090 mhi_ep_free_host_cfg(mhi_cntrl
);
1091 mhi_ep_mmio_mask_interrupts(mhi_cntrl
);
1093 mhi_cntrl
->enabled
= false;
1096 static void mhi_ep_reset_worker(struct work_struct
*work
)
1098 struct mhi_ep_cntrl
*mhi_cntrl
= container_of(work
, struct mhi_ep_cntrl
, reset_work
);
1099 enum mhi_state cur_state
;
1101 mhi_ep_power_down(mhi_cntrl
);
1103 mutex_lock(&mhi_cntrl
->state_lock
);
1105 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
1106 mhi_ep_mmio_reset(mhi_cntrl
);
1107 cur_state
= mhi_cntrl
->mhi_state
;
1110 * Only proceed further if the reset is due to SYS_ERR. The host will
1111 * issue reset during shutdown also and we don't need to do re-init in
1114 if (cur_state
== MHI_STATE_SYS_ERR
)
1115 mhi_ep_power_up(mhi_cntrl
);
1117 mutex_unlock(&mhi_cntrl
->state_lock
);
1121 * We don't need to do anything special other than setting the MHI SYS_ERR
1122 * state. The host will reset all contexts and issue MHI RESET so that we
1123 * could also recover from error state.
1125 void mhi_ep_handle_syserr(struct mhi_ep_cntrl
*mhi_cntrl
)
1127 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1130 ret
= mhi_ep_set_mhi_state(mhi_cntrl
, MHI_STATE_SYS_ERR
);
1134 /* Signal host that the device went to SYS_ERR state */
1135 ret
= mhi_ep_send_state_change_event(mhi_cntrl
, MHI_STATE_SYS_ERR
);
1137 dev_err(dev
, "Failed sending SYS_ERR state change event: %d\n", ret
);
1140 int mhi_ep_power_up(struct mhi_ep_cntrl
*mhi_cntrl
)
1142 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1146 * Mask all interrupts until the state machine is ready. Interrupts will
1147 * be enabled later with mhi_ep_enable().
1149 mhi_ep_mmio_mask_interrupts(mhi_cntrl
);
1150 mhi_ep_mmio_init(mhi_cntrl
);
1152 mhi_cntrl
->mhi_event
= kcalloc(mhi_cntrl
->event_rings
,
1153 sizeof(*mhi_cntrl
->mhi_event
),
1155 if (!mhi_cntrl
->mhi_event
)
1158 /* Initialize command, channel and event rings */
1159 mhi_ep_ring_init(&mhi_cntrl
->mhi_cmd
->ring
, RING_TYPE_CMD
, 0);
1160 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++)
1161 mhi_ep_ring_init(&mhi_cntrl
->mhi_chan
[i
].ring
, RING_TYPE_CH
, i
);
1162 for (i
= 0; i
< mhi_cntrl
->event_rings
; i
++)
1163 mhi_ep_ring_init(&mhi_cntrl
->mhi_event
[i
].ring
, RING_TYPE_ER
, i
);
1165 mhi_cntrl
->mhi_state
= MHI_STATE_RESET
;
1167 /* Set AMSS EE before signaling ready state */
1168 mhi_ep_mmio_set_env(mhi_cntrl
, MHI_EE_AMSS
);
1170 /* All set, notify the host that we are ready */
1171 ret
= mhi_ep_set_ready_state(mhi_cntrl
);
1173 goto err_free_event
;
1175 dev_dbg(dev
, "READY state notification sent to the host\n");
1177 ret
= mhi_ep_enable(mhi_cntrl
);
1179 dev_err(dev
, "Failed to enable MHI endpoint\n");
1180 goto err_free_event
;
1183 enable_irq(mhi_cntrl
->irq
);
1184 mhi_cntrl
->enabled
= true;
1189 kfree(mhi_cntrl
->mhi_event
);
1193 EXPORT_SYMBOL_GPL(mhi_ep_power_up
);
1195 void mhi_ep_power_down(struct mhi_ep_cntrl
*mhi_cntrl
)
1197 if (mhi_cntrl
->enabled
) {
1198 mhi_ep_abort_transfer(mhi_cntrl
);
1199 kfree(mhi_cntrl
->mhi_event
);
1200 disable_irq(mhi_cntrl
->irq
);
1203 EXPORT_SYMBOL_GPL(mhi_ep_power_down
);
1205 void mhi_ep_suspend_channels(struct mhi_ep_cntrl
*mhi_cntrl
)
1207 struct mhi_ep_chan
*mhi_chan
;
1211 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++) {
1212 mhi_chan
= &mhi_cntrl
->mhi_chan
[i
];
1214 if (!mhi_chan
->mhi_dev
)
1217 mutex_lock(&mhi_chan
->lock
);
1218 /* Skip if the channel is not currently running */
1219 tmp
= le32_to_cpu(mhi_cntrl
->ch_ctx_cache
[i
].chcfg
);
1220 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK
, tmp
) != MHI_CH_STATE_RUNNING
) {
1221 mutex_unlock(&mhi_chan
->lock
);
1225 dev_dbg(&mhi_chan
->mhi_dev
->dev
, "Suspending channel\n");
1226 /* Set channel state to SUSPENDED */
1227 mhi_chan
->state
= MHI_CH_STATE_SUSPENDED
;
1228 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
1229 tmp
|= FIELD_PREP(CHAN_CTX_CHSTATE_MASK
, MHI_CH_STATE_SUSPENDED
);
1230 mhi_cntrl
->ch_ctx_cache
[i
].chcfg
= cpu_to_le32(tmp
);
1231 mutex_unlock(&mhi_chan
->lock
);
1235 void mhi_ep_resume_channels(struct mhi_ep_cntrl
*mhi_cntrl
)
1237 struct mhi_ep_chan
*mhi_chan
;
1241 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++) {
1242 mhi_chan
= &mhi_cntrl
->mhi_chan
[i
];
1244 if (!mhi_chan
->mhi_dev
)
1247 mutex_lock(&mhi_chan
->lock
);
1248 /* Skip if the channel is not currently suspended */
1249 tmp
= le32_to_cpu(mhi_cntrl
->ch_ctx_cache
[i
].chcfg
);
1250 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK
, tmp
) != MHI_CH_STATE_SUSPENDED
) {
1251 mutex_unlock(&mhi_chan
->lock
);
1255 dev_dbg(&mhi_chan
->mhi_dev
->dev
, "Resuming channel\n");
1256 /* Set channel state to RUNNING */
1257 mhi_chan
->state
= MHI_CH_STATE_RUNNING
;
1258 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
1259 tmp
|= FIELD_PREP(CHAN_CTX_CHSTATE_MASK
, MHI_CH_STATE_RUNNING
);
1260 mhi_cntrl
->ch_ctx_cache
[i
].chcfg
= cpu_to_le32(tmp
);
1261 mutex_unlock(&mhi_chan
->lock
);
1265 static void mhi_ep_release_device(struct device
*dev
)
1267 struct mhi_ep_device
*mhi_dev
= to_mhi_ep_device(dev
);
1269 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1270 mhi_dev
->mhi_cntrl
->mhi_dev
= NULL
;
1273 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1274 * devices for the channels will only get created in mhi_ep_create_device()
1275 * if the mhi_dev associated with it is NULL.
1277 if (mhi_dev
->ul_chan
)
1278 mhi_dev
->ul_chan
->mhi_dev
= NULL
;
1280 if (mhi_dev
->dl_chan
)
1281 mhi_dev
->dl_chan
->mhi_dev
= NULL
;
1286 static struct mhi_ep_device
*mhi_ep_alloc_device(struct mhi_ep_cntrl
*mhi_cntrl
,
1287 enum mhi_device_type dev_type
)
1289 struct mhi_ep_device
*mhi_dev
;
1292 mhi_dev
= kzalloc(sizeof(*mhi_dev
), GFP_KERNEL
);
1294 return ERR_PTR(-ENOMEM
);
1296 dev
= &mhi_dev
->dev
;
1297 device_initialize(dev
);
1298 dev
->bus
= &mhi_ep_bus_type
;
1299 dev
->release
= mhi_ep_release_device
;
1301 /* Controller device is always allocated first */
1302 if (dev_type
== MHI_DEVICE_CONTROLLER
)
1303 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
1304 dev
->parent
= mhi_cntrl
->cntrl_dev
;
1306 /* for MHI client devices, parent is the MHI controller device */
1307 dev
->parent
= &mhi_cntrl
->mhi_dev
->dev
;
1309 mhi_dev
->mhi_cntrl
= mhi_cntrl
;
1310 mhi_dev
->dev_type
= dev_type
;
1316 * MHI channels are always defined in pairs with UL as the even numbered
1317 * channel and DL as odd numbered one. This function gets UL channel (primary)
1318 * as the ch_id and always looks after the next entry in channel list for
1319 * the corresponding DL channel (secondary).
1321 static int mhi_ep_create_device(struct mhi_ep_cntrl
*mhi_cntrl
, u32 ch_id
)
1323 struct mhi_ep_chan
*mhi_chan
= &mhi_cntrl
->mhi_chan
[ch_id
];
1324 struct device
*dev
= mhi_cntrl
->cntrl_dev
;
1325 struct mhi_ep_device
*mhi_dev
;
1328 /* Check if the channel name is same for both UL and DL */
1329 if (strcmp(mhi_chan
->name
, mhi_chan
[1].name
)) {
1330 dev_err(dev
, "UL and DL channel names are not same: (%s) != (%s)\n",
1331 mhi_chan
->name
, mhi_chan
[1].name
);
1335 mhi_dev
= mhi_ep_alloc_device(mhi_cntrl
, MHI_DEVICE_XFER
);
1336 if (IS_ERR(mhi_dev
))
1337 return PTR_ERR(mhi_dev
);
1339 /* Configure primary channel */
1340 mhi_dev
->ul_chan
= mhi_chan
;
1341 get_device(&mhi_dev
->dev
);
1342 mhi_chan
->mhi_dev
= mhi_dev
;
1344 /* Configure secondary channel as well */
1346 mhi_dev
->dl_chan
= mhi_chan
;
1347 get_device(&mhi_dev
->dev
);
1348 mhi_chan
->mhi_dev
= mhi_dev
;
1350 /* Channel name is same for both UL and DL */
1351 mhi_dev
->name
= mhi_chan
->name
;
1352 ret
= dev_set_name(&mhi_dev
->dev
, "%s_%s",
1353 dev_name(&mhi_cntrl
->mhi_dev
->dev
),
1356 put_device(&mhi_dev
->dev
);
1360 ret
= device_add(&mhi_dev
->dev
);
1362 put_device(&mhi_dev
->dev
);
1367 static int mhi_ep_destroy_device(struct device
*dev
, void *data
)
1369 struct mhi_ep_device
*mhi_dev
;
1370 struct mhi_ep_cntrl
*mhi_cntrl
;
1371 struct mhi_ep_chan
*ul_chan
, *dl_chan
;
1373 if (dev
->bus
!= &mhi_ep_bus_type
)
1376 mhi_dev
= to_mhi_ep_device(dev
);
1377 mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1379 /* Only destroy devices created for channels */
1380 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1383 ul_chan
= mhi_dev
->ul_chan
;
1384 dl_chan
= mhi_dev
->dl_chan
;
1387 put_device(&ul_chan
->mhi_dev
->dev
);
1390 put_device(&dl_chan
->mhi_dev
->dev
);
1392 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
, "Destroying device for chan:%s\n",
1395 /* Notify the client and remove the device from MHI bus */
1402 static int mhi_ep_chan_init(struct mhi_ep_cntrl
*mhi_cntrl
,
1403 const struct mhi_ep_cntrl_config
*config
)
1405 const struct mhi_ep_channel_config
*ch_cfg
;
1406 struct device
*dev
= mhi_cntrl
->cntrl_dev
;
1410 mhi_cntrl
->max_chan
= config
->max_channels
;
1413 * Allocate max_channels supported by the MHI endpoint and populate
1414 * only the defined channels
1416 mhi_cntrl
->mhi_chan
= kcalloc(mhi_cntrl
->max_chan
, sizeof(*mhi_cntrl
->mhi_chan
),
1418 if (!mhi_cntrl
->mhi_chan
)
1421 for (i
= 0; i
< config
->num_channels
; i
++) {
1422 struct mhi_ep_chan
*mhi_chan
;
1424 ch_cfg
= &config
->ch_cfg
[i
];
1427 if (chan
>= mhi_cntrl
->max_chan
) {
1428 dev_err(dev
, "Channel (%u) exceeds maximum available channels (%u)\n",
1429 chan
, mhi_cntrl
->max_chan
);
1430 goto error_chan_cfg
;
1433 /* Bi-directional and direction less channels are not supported */
1434 if (ch_cfg
->dir
== DMA_BIDIRECTIONAL
|| ch_cfg
->dir
== DMA_NONE
) {
1435 dev_err(dev
, "Invalid direction (%u) for channel (%u)\n",
1437 goto error_chan_cfg
;
1440 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
1441 mhi_chan
->name
= ch_cfg
->name
;
1442 mhi_chan
->chan
= chan
;
1443 mhi_chan
->dir
= ch_cfg
->dir
;
1444 mutex_init(&mhi_chan
->lock
);
1450 kfree(mhi_cntrl
->mhi_chan
);
1456 * Allocate channel and command rings here. Event rings will be allocated
1457 * in mhi_ep_power_up() as the config comes from the host.
1459 int mhi_ep_register_controller(struct mhi_ep_cntrl
*mhi_cntrl
,
1460 const struct mhi_ep_cntrl_config
*config
)
1462 struct mhi_ep_device
*mhi_dev
;
1465 if (!mhi_cntrl
|| !mhi_cntrl
->cntrl_dev
|| !mhi_cntrl
->mmio
|| !mhi_cntrl
->irq
)
1468 if (!mhi_cntrl
->read_sync
|| !mhi_cntrl
->write_sync
||
1469 !mhi_cntrl
->read_async
|| !mhi_cntrl
->write_async
)
1472 ret
= mhi_ep_chan_init(mhi_cntrl
, config
);
1476 mhi_cntrl
->mhi_cmd
= kcalloc(NR_OF_CMD_RINGS
, sizeof(*mhi_cntrl
->mhi_cmd
), GFP_KERNEL
);
1477 if (!mhi_cntrl
->mhi_cmd
) {
1482 mhi_cntrl
->ev_ring_el_cache
= kmem_cache_create("mhi_ep_event_ring_el",
1483 sizeof(struct mhi_ring_element
), 0,
1484 SLAB_CACHE_DMA
, NULL
);
1485 if (!mhi_cntrl
->ev_ring_el_cache
) {
1490 mhi_cntrl
->tre_buf_cache
= kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU
, 0,
1491 SLAB_CACHE_DMA
, NULL
);
1492 if (!mhi_cntrl
->tre_buf_cache
) {
1494 goto err_destroy_ev_ring_el_cache
;
1497 mhi_cntrl
->ring_item_cache
= kmem_cache_create("mhi_ep_ring_item",
1498 sizeof(struct mhi_ep_ring_item
), 0,
1500 if (!mhi_cntrl
->ring_item_cache
) {
1502 goto err_destroy_tre_buf_cache
;
1505 INIT_WORK(&mhi_cntrl
->state_work
, mhi_ep_state_worker
);
1506 INIT_WORK(&mhi_cntrl
->reset_work
, mhi_ep_reset_worker
);
1507 INIT_WORK(&mhi_cntrl
->cmd_ring_work
, mhi_ep_cmd_ring_worker
);
1508 INIT_WORK(&mhi_cntrl
->ch_ring_work
, mhi_ep_ch_ring_worker
);
1510 mhi_cntrl
->wq
= alloc_workqueue("mhi_ep_wq", 0, 0);
1511 if (!mhi_cntrl
->wq
) {
1513 goto err_destroy_ring_item_cache
;
1516 INIT_LIST_HEAD(&mhi_cntrl
->st_transition_list
);
1517 INIT_LIST_HEAD(&mhi_cntrl
->ch_db_list
);
1518 spin_lock_init(&mhi_cntrl
->list_lock
);
1519 mutex_init(&mhi_cntrl
->state_lock
);
1520 mutex_init(&mhi_cntrl
->event_lock
);
1522 /* Set MHI version and AMSS EE before enumeration */
1523 mhi_ep_mmio_write(mhi_cntrl
, EP_MHIVER
, config
->mhi_version
);
1524 mhi_ep_mmio_set_env(mhi_cntrl
, MHI_EE_AMSS
);
1526 /* Set controller index */
1527 ret
= ida_alloc(&mhi_ep_cntrl_ida
, GFP_KERNEL
);
1529 goto err_destroy_wq
;
1531 mhi_cntrl
->index
= ret
;
1533 irq_set_status_flags(mhi_cntrl
->irq
, IRQ_NOAUTOEN
);
1534 ret
= request_irq(mhi_cntrl
->irq
, mhi_ep_irq
, IRQF_TRIGGER_HIGH
,
1535 "doorbell_irq", mhi_cntrl
);
1537 dev_err(mhi_cntrl
->cntrl_dev
, "Failed to request Doorbell IRQ\n");
1541 /* Allocate the controller device */
1542 mhi_dev
= mhi_ep_alloc_device(mhi_cntrl
, MHI_DEVICE_CONTROLLER
);
1543 if (IS_ERR(mhi_dev
)) {
1544 dev_err(mhi_cntrl
->cntrl_dev
, "Failed to allocate controller device\n");
1545 ret
= PTR_ERR(mhi_dev
);
1549 ret
= dev_set_name(&mhi_dev
->dev
, "mhi_ep%u", mhi_cntrl
->index
);
1553 mhi_dev
->name
= dev_name(&mhi_dev
->dev
);
1554 mhi_cntrl
->mhi_dev
= mhi_dev
;
1556 ret
= device_add(&mhi_dev
->dev
);
1560 dev_dbg(&mhi_dev
->dev
, "MHI EP Controller registered\n");
1565 put_device(&mhi_dev
->dev
);
1567 free_irq(mhi_cntrl
->irq
, mhi_cntrl
);
1569 ida_free(&mhi_ep_cntrl_ida
, mhi_cntrl
->index
);
1571 destroy_workqueue(mhi_cntrl
->wq
);
1572 err_destroy_ring_item_cache
:
1573 kmem_cache_destroy(mhi_cntrl
->ring_item_cache
);
1574 err_destroy_ev_ring_el_cache
:
1575 kmem_cache_destroy(mhi_cntrl
->ev_ring_el_cache
);
1576 err_destroy_tre_buf_cache
:
1577 kmem_cache_destroy(mhi_cntrl
->tre_buf_cache
);
1579 kfree(mhi_cntrl
->mhi_cmd
);
1581 kfree(mhi_cntrl
->mhi_chan
);
1585 EXPORT_SYMBOL_GPL(mhi_ep_register_controller
);
1588 * It is expected that the controller drivers will power down the MHI EP stack
1589 * using "mhi_ep_power_down()" before calling this function to unregister themselves.
1591 void mhi_ep_unregister_controller(struct mhi_ep_cntrl
*mhi_cntrl
)
1593 struct mhi_ep_device
*mhi_dev
= mhi_cntrl
->mhi_dev
;
1595 destroy_workqueue(mhi_cntrl
->wq
);
1597 free_irq(mhi_cntrl
->irq
, mhi_cntrl
);
1599 kmem_cache_destroy(mhi_cntrl
->tre_buf_cache
);
1600 kmem_cache_destroy(mhi_cntrl
->ev_ring_el_cache
);
1601 kmem_cache_destroy(mhi_cntrl
->ring_item_cache
);
1602 kfree(mhi_cntrl
->mhi_cmd
);
1603 kfree(mhi_cntrl
->mhi_chan
);
1605 device_del(&mhi_dev
->dev
);
1606 put_device(&mhi_dev
->dev
);
1608 ida_free(&mhi_ep_cntrl_ida
, mhi_cntrl
->index
);
1610 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller
);
1612 static int mhi_ep_driver_probe(struct device
*dev
)
1614 struct mhi_ep_device
*mhi_dev
= to_mhi_ep_device(dev
);
1615 struct mhi_ep_driver
*mhi_drv
= to_mhi_ep_driver(dev
->driver
);
1616 struct mhi_ep_chan
*ul_chan
= mhi_dev
->ul_chan
;
1617 struct mhi_ep_chan
*dl_chan
= mhi_dev
->dl_chan
;
1619 ul_chan
->xfer_cb
= mhi_drv
->ul_xfer_cb
;
1620 dl_chan
->xfer_cb
= mhi_drv
->dl_xfer_cb
;
1622 return mhi_drv
->probe(mhi_dev
, mhi_dev
->id
);
1625 static int mhi_ep_driver_remove(struct device
*dev
)
1627 struct mhi_ep_device
*mhi_dev
= to_mhi_ep_device(dev
);
1628 struct mhi_ep_driver
*mhi_drv
= to_mhi_ep_driver(dev
->driver
);
1629 struct mhi_result result
= {};
1630 struct mhi_ep_chan
*mhi_chan
;
1633 /* Skip if it is a controller device */
1634 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1637 /* Disconnect the channels associated with the driver */
1638 for (dir
= 0; dir
< 2; dir
++) {
1639 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1644 mutex_lock(&mhi_chan
->lock
);
1645 /* Send channel disconnect status to the client driver */
1646 if (mhi_chan
->xfer_cb
) {
1647 result
.transaction_status
= -ENOTCONN
;
1648 result
.bytes_xferd
= 0;
1649 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
1652 mhi_chan
->state
= MHI_CH_STATE_DISABLED
;
1653 mhi_chan
->xfer_cb
= NULL
;
1654 mutex_unlock(&mhi_chan
->lock
);
1657 /* Remove the client driver now */
1658 mhi_drv
->remove(mhi_dev
);
1663 int __mhi_ep_driver_register(struct mhi_ep_driver
*mhi_drv
, struct module
*owner
)
1665 struct device_driver
*driver
= &mhi_drv
->driver
;
1667 if (!mhi_drv
->probe
|| !mhi_drv
->remove
)
1670 /* Client drivers should have callbacks defined for both channels */
1671 if (!mhi_drv
->ul_xfer_cb
|| !mhi_drv
->dl_xfer_cb
)
1674 driver
->bus
= &mhi_ep_bus_type
;
1675 driver
->owner
= owner
;
1676 driver
->probe
= mhi_ep_driver_probe
;
1677 driver
->remove
= mhi_ep_driver_remove
;
1679 return driver_register(driver
);
1681 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register
);
1683 void mhi_ep_driver_unregister(struct mhi_ep_driver
*mhi_drv
)
1685 driver_unregister(&mhi_drv
->driver
);
1687 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister
);
1689 static int mhi_ep_uevent(const struct device
*dev
, struct kobj_uevent_env
*env
)
1691 const struct mhi_ep_device
*mhi_dev
= to_mhi_ep_device(dev
);
1693 return add_uevent_var(env
, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT
,
1697 static int mhi_ep_match(struct device
*dev
, struct device_driver
*drv
)
1699 struct mhi_ep_device
*mhi_dev
= to_mhi_ep_device(dev
);
1700 struct mhi_ep_driver
*mhi_drv
= to_mhi_ep_driver(drv
);
1701 const struct mhi_device_id
*id
;
1704 * If the device is a controller type then there is no client driver
1705 * associated with it
1707 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1710 for (id
= mhi_drv
->id_table
; id
->chan
[0]; id
++)
1711 if (!strcmp(mhi_dev
->name
, id
->chan
)) {
1719 struct bus_type mhi_ep_bus_type
= {
1721 .dev_name
= "mhi_ep",
1722 .match
= mhi_ep_match
,
1723 .uevent
= mhi_ep_uevent
,
1726 static int __init
mhi_ep_init(void)
1728 return bus_register(&mhi_ep_bus_type
);
1731 static void __exit
mhi_ep_exit(void)
1733 bus_unregister(&mhi_ep_bus_type
);
1736 postcore_initcall(mhi_ep_init
);
1737 module_exit(mhi_ep_exit
);
1739 MODULE_LICENSE("GPL v2");
1740 MODULE_DESCRIPTION("MHI Bus Endpoint stack");
1741 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");