1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
18 int __must_check
mhi_read_reg(struct mhi_controller
*mhi_cntrl
,
19 void __iomem
*base
, u32 offset
, u32
*out
)
21 return mhi_cntrl
->read_reg(mhi_cntrl
, base
+ offset
, out
);
24 int __must_check
mhi_read_reg_field(struct mhi_controller
*mhi_cntrl
,
25 void __iomem
*base
, u32 offset
,
26 u32 mask
, u32 shift
, u32
*out
)
31 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
35 *out
= (tmp
& mask
) >> shift
;
40 void mhi_write_reg(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
43 mhi_cntrl
->write_reg(mhi_cntrl
, base
+ offset
, val
);
46 void mhi_write_reg_field(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
47 u32 offset
, u32 mask
, u32 shift
, u32 val
)
52 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
57 tmp
|= (val
<< shift
);
58 mhi_write_reg(mhi_cntrl
, base
, offset
, tmp
);
61 void mhi_write_db(struct mhi_controller
*mhi_cntrl
, void __iomem
*db_addr
,
64 mhi_write_reg(mhi_cntrl
, db_addr
, 4, upper_32_bits(db_val
));
65 mhi_write_reg(mhi_cntrl
, db_addr
, 0, lower_32_bits(db_val
));
68 void mhi_db_brstmode(struct mhi_controller
*mhi_cntrl
,
69 struct db_cfg
*db_cfg
,
70 void __iomem
*db_addr
,
73 if (db_cfg
->db_mode
) {
74 db_cfg
->db_val
= db_val
;
75 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
80 void mhi_db_brstmode_disable(struct mhi_controller
*mhi_cntrl
,
81 struct db_cfg
*db_cfg
,
82 void __iomem
*db_addr
,
85 db_cfg
->db_val
= db_val
;
86 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
89 void mhi_ring_er_db(struct mhi_event
*mhi_event
)
91 struct mhi_ring
*ring
= &mhi_event
->ring
;
93 mhi_event
->db_cfg
.process_db(mhi_event
->mhi_cntrl
, &mhi_event
->db_cfg
,
94 ring
->db_addr
, *ring
->ctxt_wp
);
97 void mhi_ring_cmd_db(struct mhi_controller
*mhi_cntrl
, struct mhi_cmd
*mhi_cmd
)
100 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
102 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
104 mhi_write_db(mhi_cntrl
, ring
->db_addr
, db
);
107 void mhi_ring_chan_db(struct mhi_controller
*mhi_cntrl
,
108 struct mhi_chan
*mhi_chan
)
110 struct mhi_ring
*ring
= &mhi_chan
->tre_ring
;
113 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
115 mhi_chan
->db_cfg
.process_db(mhi_cntrl
, &mhi_chan
->db_cfg
,
119 enum mhi_ee_type
mhi_get_exec_env(struct mhi_controller
*mhi_cntrl
)
122 int ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_EXECENV
, &exec
);
124 return (ret
) ? MHI_EE_MAX
: exec
;
127 enum mhi_state
mhi_get_mhi_state(struct mhi_controller
*mhi_cntrl
)
130 int ret
= mhi_read_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHISTATUS
,
131 MHISTATUS_MHISTATE_MASK
,
132 MHISTATUS_MHISTATE_SHIFT
, &state
);
133 return ret
? MHI_STATE_MAX
: state
;
136 int mhi_map_single_no_bb(struct mhi_controller
*mhi_cntrl
,
137 struct mhi_buf_info
*buf_info
)
139 buf_info
->p_addr
= dma_map_single(mhi_cntrl
->cntrl_dev
,
140 buf_info
->v_addr
, buf_info
->len
,
142 if (dma_mapping_error(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
))
148 int mhi_map_single_use_bb(struct mhi_controller
*mhi_cntrl
,
149 struct mhi_buf_info
*buf_info
)
151 void *buf
= mhi_alloc_coherent(mhi_cntrl
, buf_info
->len
,
152 &buf_info
->p_addr
, GFP_ATOMIC
);
157 if (buf_info
->dir
== DMA_TO_DEVICE
)
158 memcpy(buf
, buf_info
->v_addr
, buf_info
->len
);
160 buf_info
->bb_addr
= buf
;
165 void mhi_unmap_single_no_bb(struct mhi_controller
*mhi_cntrl
,
166 struct mhi_buf_info
*buf_info
)
168 dma_unmap_single(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
, buf_info
->len
,
172 void mhi_unmap_single_use_bb(struct mhi_controller
*mhi_cntrl
,
173 struct mhi_buf_info
*buf_info
)
175 if (buf_info
->dir
== DMA_FROM_DEVICE
)
176 memcpy(buf_info
->v_addr
, buf_info
->bb_addr
, buf_info
->len
);
178 mhi_free_coherent(mhi_cntrl
, buf_info
->len
, buf_info
->bb_addr
,
182 static int get_nr_avail_ring_elements(struct mhi_controller
*mhi_cntrl
,
183 struct mhi_ring
*ring
)
187 if (ring
->wp
< ring
->rp
) {
188 nr_el
= ((ring
->rp
- ring
->wp
) / ring
->el_size
) - 1;
190 nr_el
= (ring
->rp
- ring
->base
) / ring
->el_size
;
191 nr_el
+= ((ring
->base
+ ring
->len
- ring
->wp
) /
198 static void *mhi_to_virtual(struct mhi_ring
*ring
, dma_addr_t addr
)
200 return (addr
- ring
->iommu_base
) + ring
->base
;
203 static void mhi_add_ring_element(struct mhi_controller
*mhi_cntrl
,
204 struct mhi_ring
*ring
)
206 ring
->wp
+= ring
->el_size
;
207 if (ring
->wp
>= (ring
->base
+ ring
->len
))
208 ring
->wp
= ring
->base
;
213 static void mhi_del_ring_element(struct mhi_controller
*mhi_cntrl
,
214 struct mhi_ring
*ring
)
216 ring
->rp
+= ring
->el_size
;
217 if (ring
->rp
>= (ring
->base
+ ring
->len
))
218 ring
->rp
= ring
->base
;
223 int mhi_destroy_device(struct device
*dev
, void *data
)
225 struct mhi_device
*mhi_dev
;
226 struct mhi_controller
*mhi_cntrl
;
228 if (dev
->bus
!= &mhi_bus_type
)
231 mhi_dev
= to_mhi_device(dev
);
232 mhi_cntrl
= mhi_dev
->mhi_cntrl
;
234 /* Only destroy virtual devices thats attached to bus */
235 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
239 * For the suspend and resume case, this function will get called
240 * without mhi_unregister_controller(). Hence, we need to drop the
241 * references to mhi_dev created for ul and dl channels. We can
242 * be sure that there will be no instances of mhi_dev left after
245 if (mhi_dev
->ul_chan
)
246 put_device(&mhi_dev
->ul_chan
->mhi_dev
->dev
);
248 if (mhi_dev
->dl_chan
)
249 put_device(&mhi_dev
->dl_chan
->mhi_dev
->dev
);
251 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
, "destroy device for chan:%s\n",
254 /* Notify the client and remove the device from MHI bus */
261 static void mhi_notify(struct mhi_device
*mhi_dev
, enum mhi_callback cb_reason
)
263 struct mhi_driver
*mhi_drv
;
265 if (!mhi_dev
->dev
.driver
)
268 mhi_drv
= to_mhi_driver(mhi_dev
->dev
.driver
);
270 if (mhi_drv
->status_cb
)
271 mhi_drv
->status_cb(mhi_dev
, cb_reason
);
274 /* Bind MHI channels to MHI devices */
275 void mhi_create_devices(struct mhi_controller
*mhi_cntrl
)
277 struct mhi_chan
*mhi_chan
;
278 struct mhi_device
*mhi_dev
;
279 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
282 mhi_chan
= mhi_cntrl
->mhi_chan
;
283 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
284 if (!mhi_chan
->configured
|| mhi_chan
->mhi_dev
||
285 !(mhi_chan
->ee_mask
& BIT(mhi_cntrl
->ee
)))
287 mhi_dev
= mhi_alloc_device(mhi_cntrl
);
291 mhi_dev
->dev_type
= MHI_DEVICE_XFER
;
292 switch (mhi_chan
->dir
) {
294 mhi_dev
->ul_chan
= mhi_chan
;
295 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
297 case DMA_FROM_DEVICE
:
298 /* We use dl_chan as offload channels */
299 mhi_dev
->dl_chan
= mhi_chan
;
300 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
303 dev_err(dev
, "Direction not supported\n");
304 put_device(&mhi_dev
->dev
);
308 get_device(&mhi_dev
->dev
);
309 mhi_chan
->mhi_dev
= mhi_dev
;
311 /* Check next channel if it matches */
312 if ((i
+ 1) < mhi_cntrl
->max_chan
&& mhi_chan
[1].configured
) {
313 if (!strcmp(mhi_chan
[1].name
, mhi_chan
->name
)) {
316 if (mhi_chan
->dir
== DMA_TO_DEVICE
) {
317 mhi_dev
->ul_chan
= mhi_chan
;
318 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
320 mhi_dev
->dl_chan
= mhi_chan
;
321 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
323 get_device(&mhi_dev
->dev
);
324 mhi_chan
->mhi_dev
= mhi_dev
;
328 /* Channel name is same for both UL and DL */
329 mhi_dev
->chan_name
= mhi_chan
->name
;
330 dev_set_name(&mhi_dev
->dev
, "%s_%s",
331 dev_name(mhi_cntrl
->cntrl_dev
),
334 /* Init wakeup source if available */
335 if (mhi_dev
->dl_chan
&& mhi_dev
->dl_chan
->wake_capable
)
336 device_init_wakeup(&mhi_dev
->dev
, true);
338 ret
= device_add(&mhi_dev
->dev
);
340 put_device(&mhi_dev
->dev
);
344 irqreturn_t
mhi_irq_handler(int irq_number
, void *dev
)
346 struct mhi_event
*mhi_event
= dev
;
347 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
348 struct mhi_event_ctxt
*er_ctxt
=
349 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
350 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
351 void *dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
353 /* Only proceed if event ring has pending events */
354 if (ev_ring
->rp
== dev_rp
)
357 /* For client managed event ring, notify pending data */
358 if (mhi_event
->cl_manage
) {
359 struct mhi_chan
*mhi_chan
= mhi_event
->mhi_chan
;
360 struct mhi_device
*mhi_dev
= mhi_chan
->mhi_dev
;
363 mhi_notify(mhi_dev
, MHI_CB_PENDING_DATA
);
365 tasklet_schedule(&mhi_event
->task
);
371 irqreturn_t
mhi_intvec_threaded_handler(int irq_number
, void *dev
)
373 struct mhi_controller
*mhi_cntrl
= dev
;
374 enum mhi_state state
= MHI_STATE_MAX
;
375 enum mhi_pm_state pm_state
= 0;
376 enum mhi_ee_type ee
= 0;
378 write_lock_irq(&mhi_cntrl
->pm_lock
);
379 if (MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
380 state
= mhi_get_mhi_state(mhi_cntrl
);
382 mhi_cntrl
->ee
= mhi_get_exec_env(mhi_cntrl
);
385 if (state
== MHI_STATE_SYS_ERR
) {
386 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
, "System error detected\n");
387 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
388 MHI_PM_SYS_ERR_DETECT
);
390 write_unlock_irq(&mhi_cntrl
->pm_lock
);
392 /* If device in RDDM don't bother processing SYS error */
393 if (mhi_cntrl
->ee
== MHI_EE_RDDM
) {
394 if (mhi_cntrl
->ee
!= ee
) {
395 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
396 wake_up_all(&mhi_cntrl
->state_event
);
401 if (pm_state
== MHI_PM_SYS_ERR_DETECT
) {
402 wake_up_all(&mhi_cntrl
->state_event
);
404 /* For fatal errors, we let controller decide next step */
406 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_FATAL_ERROR
);
408 schedule_work(&mhi_cntrl
->syserr_worker
);
416 irqreturn_t
mhi_intvec_handler(int irq_number
, void *dev
)
418 struct mhi_controller
*mhi_cntrl
= dev
;
420 /* Wake up events waiting for state change */
421 wake_up_all(&mhi_cntrl
->state_event
);
423 return IRQ_WAKE_THREAD
;
426 static void mhi_recycle_ev_ring_element(struct mhi_controller
*mhi_cntrl
,
427 struct mhi_ring
*ring
)
432 ring
->wp
+= ring
->el_size
;
433 ctxt_wp
= *ring
->ctxt_wp
+ ring
->el_size
;
435 if (ring
->wp
>= (ring
->base
+ ring
->len
)) {
436 ring
->wp
= ring
->base
;
437 ctxt_wp
= ring
->iommu_base
;
440 *ring
->ctxt_wp
= ctxt_wp
;
443 ring
->rp
+= ring
->el_size
;
444 if (ring
->rp
>= (ring
->base
+ ring
->len
))
445 ring
->rp
= ring
->base
;
447 /* Update to all cores */
451 static int parse_xfer_event(struct mhi_controller
*mhi_cntrl
,
452 struct mhi_tre
*event
,
453 struct mhi_chan
*mhi_chan
)
455 struct mhi_ring
*buf_ring
, *tre_ring
;
456 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
457 struct mhi_result result
;
458 unsigned long flags
= 0;
461 ev_code
= MHI_TRE_GET_EV_CODE(event
);
462 buf_ring
= &mhi_chan
->buf_ring
;
463 tre_ring
= &mhi_chan
->tre_ring
;
465 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
469 * If it's a DB Event then we need to grab the lock
470 * with preemption disabled and as a write because we
471 * have to update db register and there are chances that
472 * another thread could be doing the same.
474 if (ev_code
>= MHI_EV_CC_OOB
)
475 write_lock_irqsave(&mhi_chan
->lock
, flags
);
477 read_lock_bh(&mhi_chan
->lock
);
479 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
480 goto end_process_tx_event
;
483 case MHI_EV_CC_OVERFLOW
:
487 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(event
);
488 struct mhi_tre
*local_rp
, *ev_tre
;
490 struct mhi_buf_info
*buf_info
;
493 /* Get the TRB this event points to */
494 ev_tre
= mhi_to_virtual(tre_ring
, ptr
);
497 if (dev_rp
>= (tre_ring
->base
+ tre_ring
->len
))
498 dev_rp
= tre_ring
->base
;
500 result
.dir
= mhi_chan
->dir
;
502 local_rp
= tre_ring
->rp
;
503 while (local_rp
!= dev_rp
) {
504 buf_info
= buf_ring
->rp
;
505 /* If it's the last TRE, get length from the event */
506 if (local_rp
== ev_tre
)
507 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
509 xfer_len
= buf_info
->len
;
511 /* Unmap if it's not pre-mapped by client */
512 if (likely(!buf_info
->pre_mapped
))
513 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
515 result
.buf_addr
= buf_info
->cb_buf
;
516 result
.bytes_xferd
= xfer_len
;
517 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
518 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
519 local_rp
= tre_ring
->rp
;
522 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
524 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
525 atomic_dec(&mhi_cntrl
->pending_pkts
);
528 * Recycle the buffer if buffer is pre-allocated,
529 * if there is an error, not much we can do apart
530 * from dropping the packet
532 if (mhi_chan
->pre_alloc
) {
533 if (mhi_queue_buf(mhi_chan
->mhi_dev
,
536 buf_info
->len
, MHI_EOT
)) {
538 "Error recycling buffer for chan:%d\n",
540 kfree(buf_info
->cb_buf
);
547 case MHI_EV_CC_DB_MODE
:
551 mhi_chan
->db_cfg
.db_mode
= 1;
552 read_lock_irqsave(&mhi_cntrl
->pm_lock
, flags
);
553 if (tre_ring
->wp
!= tre_ring
->rp
&&
554 MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
555 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
557 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, flags
);
560 case MHI_EV_CC_BAD_TRE
:
562 dev_err(dev
, "Unknown event 0x%x\n", ev_code
);
564 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
566 end_process_tx_event
:
567 if (ev_code
>= MHI_EV_CC_OOB
)
568 write_unlock_irqrestore(&mhi_chan
->lock
, flags
);
570 read_unlock_bh(&mhi_chan
->lock
);
575 static int parse_rsc_event(struct mhi_controller
*mhi_cntrl
,
576 struct mhi_tre
*event
,
577 struct mhi_chan
*mhi_chan
)
579 struct mhi_ring
*buf_ring
, *tre_ring
;
580 struct mhi_buf_info
*buf_info
;
581 struct mhi_result result
;
583 u32 cookie
; /* offset to local descriptor */
586 buf_ring
= &mhi_chan
->buf_ring
;
587 tre_ring
= &mhi_chan
->tre_ring
;
589 ev_code
= MHI_TRE_GET_EV_CODE(event
);
590 cookie
= MHI_TRE_GET_EV_COOKIE(event
);
591 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
593 /* Received out of bound cookie */
594 WARN_ON(cookie
>= buf_ring
->len
);
596 buf_info
= buf_ring
->base
+ cookie
;
598 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
600 result
.bytes_xferd
= xfer_len
;
601 result
.buf_addr
= buf_info
->cb_buf
;
602 result
.dir
= mhi_chan
->dir
;
604 read_lock_bh(&mhi_chan
->lock
);
606 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
607 goto end_process_rsc_event
;
609 WARN_ON(!buf_info
->used
);
611 /* notify the client */
612 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
615 * Note: We're arbitrarily incrementing RP even though, completion
616 * packet we processed might not be the same one, reason we can do this
617 * is because device guaranteed to cache descriptors in order it
618 * receive, so even though completion event is different we can re-use
619 * all descriptors in between.
621 * Transfer Ring has descriptors: A, B, C, D
622 * Last descriptor host queue is D (WP) and first descriptor
623 * host queue is A (RP).
624 * The completion event we just serviced is descriptor C.
625 * Then we can safely queue descriptors to replace A, B, and C
626 * even though host did not receive any completions.
628 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
629 buf_info
->used
= false;
631 end_process_rsc_event
:
632 read_unlock_bh(&mhi_chan
->lock
);
637 static void mhi_process_cmd_completion(struct mhi_controller
*mhi_cntrl
,
640 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(tre
);
641 struct mhi_cmd
*cmd_ring
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
642 struct mhi_ring
*mhi_ring
= &cmd_ring
->ring
;
643 struct mhi_tre
*cmd_pkt
;
644 struct mhi_chan
*mhi_chan
;
647 cmd_pkt
= mhi_to_virtual(mhi_ring
, ptr
);
649 chan
= MHI_TRE_GET_CMD_CHID(cmd_pkt
);
650 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
651 write_lock_bh(&mhi_chan
->lock
);
652 mhi_chan
->ccs
= MHI_TRE_GET_EV_CODE(tre
);
653 complete(&mhi_chan
->completion
);
654 write_unlock_bh(&mhi_chan
->lock
);
656 mhi_del_ring_element(mhi_cntrl
, mhi_ring
);
659 int mhi_process_ctrl_ev_ring(struct mhi_controller
*mhi_cntrl
,
660 struct mhi_event
*mhi_event
,
663 struct mhi_tre
*dev_rp
, *local_rp
;
664 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
665 struct mhi_event_ctxt
*er_ctxt
=
666 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
667 struct mhi_chan
*mhi_chan
;
668 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
673 * This is a quick check to avoid unnecessary event processing
674 * in case MHI is already in error state, but it's still possible
675 * to transition to error state while processing events
677 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
680 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
681 local_rp
= ev_ring
->rp
;
683 while (dev_rp
!= local_rp
) {
684 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
687 case MHI_PKT_TYPE_BW_REQ_EVENT
:
689 struct mhi_link_info
*link_info
;
691 link_info
= &mhi_cntrl
->mhi_link_info
;
692 write_lock_irq(&mhi_cntrl
->pm_lock
);
693 link_info
->target_link_speed
=
694 MHI_TRE_GET_EV_LINKSPEED(local_rp
);
695 link_info
->target_link_width
=
696 MHI_TRE_GET_EV_LINKWIDTH(local_rp
);
697 write_unlock_irq(&mhi_cntrl
->pm_lock
);
698 dev_dbg(dev
, "Received BW_REQ event\n");
699 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_BW_REQ
);
702 case MHI_PKT_TYPE_STATE_CHANGE_EVENT
:
704 enum mhi_state new_state
;
706 new_state
= MHI_TRE_GET_EV_STATE(local_rp
);
708 dev_dbg(dev
, "State change event to state: %s\n",
709 TO_MHI_STATE_STR(new_state
));
713 mhi_pm_m0_transition(mhi_cntrl
);
716 mhi_pm_m1_transition(mhi_cntrl
);
719 mhi_pm_m3_transition(mhi_cntrl
);
721 case MHI_STATE_SYS_ERR
:
723 enum mhi_pm_state new_state
;
725 dev_dbg(dev
, "System error detected\n");
726 write_lock_irq(&mhi_cntrl
->pm_lock
);
727 new_state
= mhi_tryset_pm_state(mhi_cntrl
,
728 MHI_PM_SYS_ERR_DETECT
);
729 write_unlock_irq(&mhi_cntrl
->pm_lock
);
730 if (new_state
== MHI_PM_SYS_ERR_DETECT
)
731 schedule_work(&mhi_cntrl
->syserr_worker
);
735 dev_err(dev
, "Invalid state: %s\n",
736 TO_MHI_STATE_STR(new_state
));
741 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT
:
742 mhi_process_cmd_completion(mhi_cntrl
, local_rp
);
744 case MHI_PKT_TYPE_EE_EVENT
:
746 enum dev_st_transition st
= DEV_ST_TRANSITION_MAX
;
747 enum mhi_ee_type event
= MHI_TRE_GET_EV_EXECENV(local_rp
);
749 dev_dbg(dev
, "Received EE event: %s\n",
750 TO_MHI_EXEC_STR(event
));
753 st
= DEV_ST_TRANSITION_SBL
;
757 st
= DEV_ST_TRANSITION_MISSION_MODE
;
760 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
761 write_lock_irq(&mhi_cntrl
->pm_lock
);
762 mhi_cntrl
->ee
= event
;
763 write_unlock_irq(&mhi_cntrl
->pm_lock
);
764 wake_up_all(&mhi_cntrl
->state_event
);
768 "Unhandled EE event: 0x%x\n", type
);
770 if (st
!= DEV_ST_TRANSITION_MAX
)
771 mhi_queue_state_transition(mhi_cntrl
, st
);
775 case MHI_PKT_TYPE_TX_EVENT
:
776 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
777 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
778 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
782 dev_err(dev
, "Unhandled event type: %d\n", type
);
786 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
787 local_rp
= ev_ring
->rp
;
788 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
792 read_lock_bh(&mhi_cntrl
->pm_lock
);
793 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
794 mhi_ring_er_db(mhi_event
);
795 read_unlock_bh(&mhi_cntrl
->pm_lock
);
800 int mhi_process_data_event_ring(struct mhi_controller
*mhi_cntrl
,
801 struct mhi_event
*mhi_event
,
804 struct mhi_tre
*dev_rp
, *local_rp
;
805 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
806 struct mhi_event_ctxt
*er_ctxt
=
807 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
810 struct mhi_chan
*mhi_chan
;
812 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
815 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
816 local_rp
= ev_ring
->rp
;
818 while (dev_rp
!= local_rp
&& event_quota
> 0) {
819 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
821 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
822 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
824 if (likely(type
== MHI_PKT_TYPE_TX_EVENT
)) {
825 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
827 } else if (type
== MHI_PKT_TYPE_RSC_TX_EVENT
) {
828 parse_rsc_event(mhi_cntrl
, local_rp
, mhi_chan
);
832 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
833 local_rp
= ev_ring
->rp
;
834 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
837 read_lock_bh(&mhi_cntrl
->pm_lock
);
838 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
839 mhi_ring_er_db(mhi_event
);
840 read_unlock_bh(&mhi_cntrl
->pm_lock
);
845 void mhi_ev_task(unsigned long data
)
847 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
848 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
850 /* process all pending events */
851 spin_lock_bh(&mhi_event
->lock
);
852 mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
853 spin_unlock_bh(&mhi_event
->lock
);
856 void mhi_ctrl_ev_task(unsigned long data
)
858 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
859 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
860 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
861 enum mhi_state state
;
862 enum mhi_pm_state pm_state
= 0;
866 * We can check PM state w/o a lock here because there is no way
867 * PM state can change from reg access valid to no access while this
868 * thread being executed.
870 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
872 * We may have a pending event but not allowed to
873 * process it since we are probably in a suspended state,
874 * so trigger a resume.
876 mhi_cntrl
->runtime_get(mhi_cntrl
);
877 mhi_cntrl
->runtime_put(mhi_cntrl
);
882 /* Process ctrl events events */
883 ret
= mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
886 * We received an IRQ but no events to process, maybe device went to
887 * SYS_ERR state? Check the state to confirm.
890 write_lock_irq(&mhi_cntrl
->pm_lock
);
891 state
= mhi_get_mhi_state(mhi_cntrl
);
892 if (state
== MHI_STATE_SYS_ERR
) {
893 dev_dbg(dev
, "System error detected\n");
894 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
895 MHI_PM_SYS_ERR_DETECT
);
897 write_unlock_irq(&mhi_cntrl
->pm_lock
);
898 if (pm_state
== MHI_PM_SYS_ERR_DETECT
)
899 schedule_work(&mhi_cntrl
->syserr_worker
);
903 static bool mhi_is_ring_full(struct mhi_controller
*mhi_cntrl
,
904 struct mhi_ring
*ring
)
906 void *tmp
= ring
->wp
+ ring
->el_size
;
908 if (tmp
>= (ring
->base
+ ring
->len
))
911 return (tmp
== ring
->rp
);
914 int mhi_queue_skb(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
915 struct sk_buff
*skb
, size_t len
, enum mhi_flags mflags
)
917 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
918 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
920 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
921 struct mhi_ring
*buf_ring
= &mhi_chan
->buf_ring
;
922 struct mhi_buf_info
*buf_info
;
923 struct mhi_tre
*mhi_tre
;
926 /* If MHI host pre-allocates buffers then client drivers cannot queue */
927 if (mhi_chan
->pre_alloc
)
930 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
933 read_lock_bh(&mhi_cntrl
->pm_lock
);
934 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))) {
935 read_unlock_bh(&mhi_cntrl
->pm_lock
);
939 /* we're in M3 or transitioning to M3 */
940 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
941 mhi_cntrl
->runtime_get(mhi_cntrl
);
942 mhi_cntrl
->runtime_put(mhi_cntrl
);
945 /* Toggle wake to exit out of M2 */
946 mhi_cntrl
->wake_toggle(mhi_cntrl
);
948 /* Generate the TRE */
949 buf_info
= buf_ring
->wp
;
951 buf_info
->v_addr
= skb
->data
;
952 buf_info
->cb_buf
= skb
;
953 buf_info
->wp
= tre_ring
->wp
;
954 buf_info
->dir
= mhi_chan
->dir
;
956 ret
= mhi_cntrl
->map_single(mhi_cntrl
, buf_info
);
960 mhi_tre
= tre_ring
->wp
;
962 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
963 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(buf_info
->len
);
964 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
967 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
968 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
970 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
971 atomic_inc(&mhi_cntrl
->pending_pkts
);
973 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
974 read_lock_bh(&mhi_chan
->lock
);
975 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
976 read_unlock_bh(&mhi_chan
->lock
);
979 read_unlock_bh(&mhi_cntrl
->pm_lock
);
984 read_unlock_bh(&mhi_cntrl
->pm_lock
);
988 EXPORT_SYMBOL_GPL(mhi_queue_skb
);
990 int mhi_queue_dma(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
991 struct mhi_buf
*mhi_buf
, size_t len
, enum mhi_flags mflags
)
993 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
994 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
996 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
997 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
998 struct mhi_ring
*buf_ring
= &mhi_chan
->buf_ring
;
999 struct mhi_buf_info
*buf_info
;
1000 struct mhi_tre
*mhi_tre
;
1002 /* If MHI host pre-allocates buffers then client drivers cannot queue */
1003 if (mhi_chan
->pre_alloc
)
1006 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
1009 read_lock_bh(&mhi_cntrl
->pm_lock
);
1010 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))) {
1011 dev_err(dev
, "MHI is not in activate state, PM state: %s\n",
1012 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
1013 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1018 /* we're in M3 or transitioning to M3 */
1019 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
1020 mhi_cntrl
->runtime_get(mhi_cntrl
);
1021 mhi_cntrl
->runtime_put(mhi_cntrl
);
1024 /* Toggle wake to exit out of M2 */
1025 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1027 /* Generate the TRE */
1028 buf_info
= buf_ring
->wp
;
1029 WARN_ON(buf_info
->used
);
1030 buf_info
->p_addr
= mhi_buf
->dma_addr
;
1031 buf_info
->pre_mapped
= true;
1032 buf_info
->cb_buf
= mhi_buf
;
1033 buf_info
->wp
= tre_ring
->wp
;
1034 buf_info
->dir
= mhi_chan
->dir
;
1035 buf_info
->len
= len
;
1037 mhi_tre
= tre_ring
->wp
;
1039 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
1040 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(buf_info
->len
);
1041 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
1044 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
1045 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
1047 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1048 atomic_inc(&mhi_cntrl
->pending_pkts
);
1050 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
1051 read_lock_bh(&mhi_chan
->lock
);
1052 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1053 read_unlock_bh(&mhi_chan
->lock
);
1056 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1060 EXPORT_SYMBOL_GPL(mhi_queue_dma
);
1062 int mhi_gen_tre(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
,
1063 void *buf
, void *cb
, size_t buf_len
, enum mhi_flags flags
)
1065 struct mhi_ring
*buf_ring
, *tre_ring
;
1066 struct mhi_tre
*mhi_tre
;
1067 struct mhi_buf_info
*buf_info
;
1068 int eot
, eob
, chain
, bei
;
1071 buf_ring
= &mhi_chan
->buf_ring
;
1072 tre_ring
= &mhi_chan
->tre_ring
;
1074 buf_info
= buf_ring
->wp
;
1075 buf_info
->v_addr
= buf
;
1076 buf_info
->cb_buf
= cb
;
1077 buf_info
->wp
= tre_ring
->wp
;
1078 buf_info
->dir
= mhi_chan
->dir
;
1079 buf_info
->len
= buf_len
;
1081 ret
= mhi_cntrl
->map_single(mhi_cntrl
, buf_info
);
1085 eob
= !!(flags
& MHI_EOB
);
1086 eot
= !!(flags
& MHI_EOT
);
1087 chain
= !!(flags
& MHI_CHAIN
);
1088 bei
= !!(mhi_chan
->intmod
);
1090 mhi_tre
= tre_ring
->wp
;
1091 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
1092 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(buf_len
);
1093 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(bei
, eot
, eob
, chain
);
1096 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
1097 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
1102 int mhi_queue_buf(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
1103 void *buf
, size_t len
, enum mhi_flags mflags
)
1105 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1106 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1108 struct mhi_ring
*tre_ring
;
1109 unsigned long flags
;
1113 * this check here only as a guard, it's always
1114 * possible mhi can enter error while executing rest of function,
1115 * which is not fatal so we do not need to hold pm_lock
1117 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)))
1120 tre_ring
= &mhi_chan
->tre_ring
;
1121 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
1124 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, buf
, buf
, len
, mflags
);
1128 read_lock_irqsave(&mhi_cntrl
->pm_lock
, flags
);
1130 /* we're in M3 or transitioning to M3 */
1131 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
1132 mhi_cntrl
->runtime_get(mhi_cntrl
);
1133 mhi_cntrl
->runtime_put(mhi_cntrl
);
1136 /* Toggle wake to exit out of M2 */
1137 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1139 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1140 atomic_inc(&mhi_cntrl
->pending_pkts
);
1142 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
1143 unsigned long flags
;
1145 read_lock_irqsave(&mhi_chan
->lock
, flags
);
1146 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1147 read_unlock_irqrestore(&mhi_chan
->lock
, flags
);
1150 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, flags
);
1154 EXPORT_SYMBOL_GPL(mhi_queue_buf
);
1156 int mhi_send_cmd(struct mhi_controller
*mhi_cntrl
,
1157 struct mhi_chan
*mhi_chan
,
1158 enum mhi_cmd_type cmd
)
1160 struct mhi_tre
*cmd_tre
= NULL
;
1161 struct mhi_cmd
*mhi_cmd
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
1162 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
1163 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1167 chan
= mhi_chan
->chan
;
1169 spin_lock_bh(&mhi_cmd
->lock
);
1170 if (!get_nr_avail_ring_elements(mhi_cntrl
, ring
)) {
1171 spin_unlock_bh(&mhi_cmd
->lock
);
1175 /* prepare the cmd tre */
1178 case MHI_CMD_RESET_CHAN
:
1179 cmd_tre
->ptr
= MHI_TRE_CMD_RESET_PTR
;
1180 cmd_tre
->dword
[0] = MHI_TRE_CMD_RESET_DWORD0
;
1181 cmd_tre
->dword
[1] = MHI_TRE_CMD_RESET_DWORD1(chan
);
1183 case MHI_CMD_START_CHAN
:
1184 cmd_tre
->ptr
= MHI_TRE_CMD_START_PTR
;
1185 cmd_tre
->dword
[0] = MHI_TRE_CMD_START_DWORD0
;
1186 cmd_tre
->dword
[1] = MHI_TRE_CMD_START_DWORD1(chan
);
1189 dev_err(dev
, "Command not supported\n");
1193 /* queue to hardware */
1194 mhi_add_ring_element(mhi_cntrl
, ring
);
1195 read_lock_bh(&mhi_cntrl
->pm_lock
);
1196 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
1197 mhi_ring_cmd_db(mhi_cntrl
, mhi_cmd
);
1198 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1199 spin_unlock_bh(&mhi_cmd
->lock
);
1204 static void __mhi_unprepare_channel(struct mhi_controller
*mhi_cntrl
,
1205 struct mhi_chan
*mhi_chan
)
1208 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1210 dev_dbg(dev
, "Entered: unprepare channel:%d\n", mhi_chan
->chan
);
1212 /* no more processing events for this channel */
1213 mutex_lock(&mhi_chan
->mutex
);
1214 write_lock_irq(&mhi_chan
->lock
);
1215 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
) {
1216 write_unlock_irq(&mhi_chan
->lock
);
1217 mutex_unlock(&mhi_chan
->mutex
);
1221 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
1222 write_unlock_irq(&mhi_chan
->lock
);
1224 reinit_completion(&mhi_chan
->completion
);
1225 read_lock_bh(&mhi_cntrl
->pm_lock
);
1226 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
1227 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1228 goto error_invalid_state
;
1231 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1232 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1234 mhi_cntrl
->runtime_get(mhi_cntrl
);
1235 mhi_cntrl
->runtime_put(mhi_cntrl
);
1236 ret
= mhi_send_cmd(mhi_cntrl
, mhi_chan
, MHI_CMD_RESET_CHAN
);
1238 goto error_invalid_state
;
1240 /* even if it fails we will still reset */
1241 ret
= wait_for_completion_timeout(&mhi_chan
->completion
,
1242 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1243 if (!ret
|| mhi_chan
->ccs
!= MHI_EV_CC_SUCCESS
)
1245 "Failed to receive cmd completion, still resetting\n");
1247 error_invalid_state
:
1248 if (!mhi_chan
->offload_ch
) {
1249 mhi_reset_chan(mhi_cntrl
, mhi_chan
);
1250 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1252 dev_dbg(dev
, "chan:%d successfully resetted\n", mhi_chan
->chan
);
1253 mutex_unlock(&mhi_chan
->mutex
);
1256 int mhi_prepare_channel(struct mhi_controller
*mhi_cntrl
,
1257 struct mhi_chan
*mhi_chan
)
1260 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1262 dev_dbg(dev
, "Preparing channel: %d\n", mhi_chan
->chan
);
1264 if (!(BIT(mhi_cntrl
->ee
) & mhi_chan
->ee_mask
)) {
1266 "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1267 TO_MHI_EXEC_STR(mhi_cntrl
->ee
), mhi_chan
->ee_mask
,
1272 mutex_lock(&mhi_chan
->mutex
);
1274 /* If channel is not in disable state, do not allow it to start */
1275 if (mhi_chan
->ch_state
!= MHI_CH_STATE_DISABLED
) {
1277 dev_dbg(dev
, "channel: %d is not in disabled state\n",
1279 goto error_init_chan
;
1282 /* Check of client manages channel context for offload channels */
1283 if (!mhi_chan
->offload_ch
) {
1284 ret
= mhi_init_chan_ctxt(mhi_cntrl
, mhi_chan
);
1286 goto error_init_chan
;
1289 reinit_completion(&mhi_chan
->completion
);
1290 read_lock_bh(&mhi_cntrl
->pm_lock
);
1291 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
1292 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1294 goto error_pm_state
;
1297 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1298 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1299 mhi_cntrl
->runtime_get(mhi_cntrl
);
1300 mhi_cntrl
->runtime_put(mhi_cntrl
);
1302 ret
= mhi_send_cmd(mhi_cntrl
, mhi_chan
, MHI_CMD_START_CHAN
);
1304 goto error_pm_state
;
1306 ret
= wait_for_completion_timeout(&mhi_chan
->completion
,
1307 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1308 if (!ret
|| mhi_chan
->ccs
!= MHI_EV_CC_SUCCESS
) {
1310 goto error_pm_state
;
1313 write_lock_irq(&mhi_chan
->lock
);
1314 mhi_chan
->ch_state
= MHI_CH_STATE_ENABLED
;
1315 write_unlock_irq(&mhi_chan
->lock
);
1317 /* Pre-allocate buffer for xfer ring */
1318 if (mhi_chan
->pre_alloc
) {
1319 int nr_el
= get_nr_avail_ring_elements(mhi_cntrl
,
1320 &mhi_chan
->tre_ring
);
1321 size_t len
= mhi_cntrl
->buffer_len
;
1326 buf
= kmalloc(len
, GFP_KERNEL
);
1329 goto error_pre_alloc
;
1332 /* Prepare transfer descriptors */
1333 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, buf
, buf
,
1337 goto error_pre_alloc
;
1341 read_lock_bh(&mhi_cntrl
->pm_lock
);
1342 if (MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
1343 read_lock_irq(&mhi_chan
->lock
);
1344 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1345 read_unlock_irq(&mhi_chan
->lock
);
1347 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1350 mutex_unlock(&mhi_chan
->mutex
);
1352 dev_dbg(dev
, "Chan: %d successfully moved to start state\n",
1358 if (!mhi_chan
->offload_ch
)
1359 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1362 mutex_unlock(&mhi_chan
->mutex
);
1367 mutex_unlock(&mhi_chan
->mutex
);
1368 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1373 static void mhi_mark_stale_events(struct mhi_controller
*mhi_cntrl
,
1374 struct mhi_event
*mhi_event
,
1375 struct mhi_event_ctxt
*er_ctxt
,
1379 struct mhi_tre
*dev_rp
, *local_rp
;
1380 struct mhi_ring
*ev_ring
;
1381 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1382 unsigned long flags
;
1384 dev_dbg(dev
, "Marking all events for chan: %d as stale\n", chan
);
1386 ev_ring
= &mhi_event
->ring
;
1388 /* mark all stale events related to channel as STALE event */
1389 spin_lock_irqsave(&mhi_event
->lock
, flags
);
1390 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
1392 local_rp
= ev_ring
->rp
;
1393 while (dev_rp
!= local_rp
) {
1394 if (MHI_TRE_GET_EV_TYPE(local_rp
) == MHI_PKT_TYPE_TX_EVENT
&&
1395 chan
== MHI_TRE_GET_EV_CHID(local_rp
))
1396 local_rp
->dword
[1] = MHI_TRE_EV_DWORD1(chan
,
1397 MHI_PKT_TYPE_STALE_EVENT
);
1399 if (local_rp
== (ev_ring
->base
+ ev_ring
->len
))
1400 local_rp
= ev_ring
->base
;
1403 dev_dbg(dev
, "Finished marking events as stale events\n");
1404 spin_unlock_irqrestore(&mhi_event
->lock
, flags
);
1407 static void mhi_reset_data_chan(struct mhi_controller
*mhi_cntrl
,
1408 struct mhi_chan
*mhi_chan
)
1410 struct mhi_ring
*buf_ring
, *tre_ring
;
1411 struct mhi_result result
;
1413 /* Reset any pending buffers */
1414 buf_ring
= &mhi_chan
->buf_ring
;
1415 tre_ring
= &mhi_chan
->tre_ring
;
1416 result
.transaction_status
= -ENOTCONN
;
1417 result
.bytes_xferd
= 0;
1418 while (tre_ring
->rp
!= tre_ring
->wp
) {
1419 struct mhi_buf_info
*buf_info
= buf_ring
->rp
;
1421 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1422 atomic_dec(&mhi_cntrl
->pending_pkts
);
1424 if (!buf_info
->pre_mapped
)
1425 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
1427 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
1428 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
1430 if (mhi_chan
->pre_alloc
) {
1431 kfree(buf_info
->cb_buf
);
1433 result
.buf_addr
= buf_info
->cb_buf
;
1434 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
1439 void mhi_reset_chan(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
)
1441 struct mhi_event
*mhi_event
;
1442 struct mhi_event_ctxt
*er_ctxt
;
1443 int chan
= mhi_chan
->chan
;
1445 /* Nothing to reset, client doesn't queue buffers */
1446 if (mhi_chan
->offload_ch
)
1449 read_lock_bh(&mhi_cntrl
->pm_lock
);
1450 mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
1451 er_ctxt
= &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_chan
->er_index
];
1453 mhi_mark_stale_events(mhi_cntrl
, mhi_event
, er_ctxt
, chan
);
1455 mhi_reset_data_chan(mhi_cntrl
, mhi_chan
);
1457 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1460 /* Move channel to start state */
1461 int mhi_prepare_for_transfer(struct mhi_device
*mhi_dev
)
1464 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1465 struct mhi_chan
*mhi_chan
;
1467 for (dir
= 0; dir
< 2; dir
++) {
1468 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1472 ret
= mhi_prepare_channel(mhi_cntrl
, mhi_chan
);
1474 goto error_open_chan
;
1480 for (--dir
; dir
>= 0; dir
--) {
1481 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1485 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1490 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer
);
1492 void mhi_unprepare_from_transfer(struct mhi_device
*mhi_dev
)
1494 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1495 struct mhi_chan
*mhi_chan
;
1498 for (dir
= 0; dir
< 2; dir
++) {
1499 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1503 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1506 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer
);
1508 int mhi_poll(struct mhi_device
*mhi_dev
, u32 budget
)
1510 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1511 struct mhi_chan
*mhi_chan
= mhi_dev
->dl_chan
;
1512 struct mhi_event
*mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
1515 spin_lock_bh(&mhi_event
->lock
);
1516 ret
= mhi_event
->process_event(mhi_cntrl
, mhi_event
, budget
);
1517 spin_unlock_bh(&mhi_event
->lock
);
1521 EXPORT_SYMBOL_GPL(mhi_poll
);