1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
18 int __must_check
mhi_read_reg(struct mhi_controller
*mhi_cntrl
,
19 void __iomem
*base
, u32 offset
, u32
*out
)
21 u32 tmp
= readl(base
+ offset
);
23 /* If there is any unexpected value, query the link status */
24 if (PCI_INVALID_READ(tmp
) &&
25 mhi_cntrl
->link_status(mhi_cntrl
))
33 int __must_check
mhi_read_reg_field(struct mhi_controller
*mhi_cntrl
,
34 void __iomem
*base
, u32 offset
,
35 u32 mask
, u32 shift
, u32
*out
)
40 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
44 *out
= (tmp
& mask
) >> shift
;
49 void mhi_write_reg(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
52 writel(val
, base
+ offset
);
55 void mhi_write_reg_field(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
56 u32 offset
, u32 mask
, u32 shift
, u32 val
)
61 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
66 tmp
|= (val
<< shift
);
67 mhi_write_reg(mhi_cntrl
, base
, offset
, tmp
);
70 void mhi_write_db(struct mhi_controller
*mhi_cntrl
, void __iomem
*db_addr
,
73 mhi_write_reg(mhi_cntrl
, db_addr
, 4, upper_32_bits(db_val
));
74 mhi_write_reg(mhi_cntrl
, db_addr
, 0, lower_32_bits(db_val
));
77 void mhi_db_brstmode(struct mhi_controller
*mhi_cntrl
,
78 struct db_cfg
*db_cfg
,
79 void __iomem
*db_addr
,
82 if (db_cfg
->db_mode
) {
83 db_cfg
->db_val
= db_val
;
84 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
89 void mhi_db_brstmode_disable(struct mhi_controller
*mhi_cntrl
,
90 struct db_cfg
*db_cfg
,
91 void __iomem
*db_addr
,
94 db_cfg
->db_val
= db_val
;
95 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
98 void mhi_ring_er_db(struct mhi_event
*mhi_event
)
100 struct mhi_ring
*ring
= &mhi_event
->ring
;
102 mhi_event
->db_cfg
.process_db(mhi_event
->mhi_cntrl
, &mhi_event
->db_cfg
,
103 ring
->db_addr
, *ring
->ctxt_wp
);
106 void mhi_ring_cmd_db(struct mhi_controller
*mhi_cntrl
, struct mhi_cmd
*mhi_cmd
)
109 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
111 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
113 mhi_write_db(mhi_cntrl
, ring
->db_addr
, db
);
116 void mhi_ring_chan_db(struct mhi_controller
*mhi_cntrl
,
117 struct mhi_chan
*mhi_chan
)
119 struct mhi_ring
*ring
= &mhi_chan
->tre_ring
;
122 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
124 mhi_chan
->db_cfg
.process_db(mhi_cntrl
, &mhi_chan
->db_cfg
,
128 enum mhi_ee_type
mhi_get_exec_env(struct mhi_controller
*mhi_cntrl
)
131 int ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_EXECENV
, &exec
);
133 return (ret
) ? MHI_EE_MAX
: exec
;
136 enum mhi_state
mhi_get_mhi_state(struct mhi_controller
*mhi_cntrl
)
139 int ret
= mhi_read_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHISTATUS
,
140 MHISTATUS_MHISTATE_MASK
,
141 MHISTATUS_MHISTATE_SHIFT
, &state
);
142 return ret
? MHI_STATE_MAX
: state
;
145 int mhi_map_single_no_bb(struct mhi_controller
*mhi_cntrl
,
146 struct mhi_buf_info
*buf_info
)
148 buf_info
->p_addr
= dma_map_single(mhi_cntrl
->cntrl_dev
,
149 buf_info
->v_addr
, buf_info
->len
,
151 if (dma_mapping_error(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
))
157 int mhi_map_single_use_bb(struct mhi_controller
*mhi_cntrl
,
158 struct mhi_buf_info
*buf_info
)
160 void *buf
= mhi_alloc_coherent(mhi_cntrl
, buf_info
->len
,
161 &buf_info
->p_addr
, GFP_ATOMIC
);
166 if (buf_info
->dir
== DMA_TO_DEVICE
)
167 memcpy(buf
, buf_info
->v_addr
, buf_info
->len
);
169 buf_info
->bb_addr
= buf
;
174 void mhi_unmap_single_no_bb(struct mhi_controller
*mhi_cntrl
,
175 struct mhi_buf_info
*buf_info
)
177 dma_unmap_single(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
, buf_info
->len
,
181 void mhi_unmap_single_use_bb(struct mhi_controller
*mhi_cntrl
,
182 struct mhi_buf_info
*buf_info
)
184 if (buf_info
->dir
== DMA_FROM_DEVICE
)
185 memcpy(buf_info
->v_addr
, buf_info
->bb_addr
, buf_info
->len
);
187 mhi_free_coherent(mhi_cntrl
, buf_info
->len
, buf_info
->bb_addr
,
191 static int get_nr_avail_ring_elements(struct mhi_controller
*mhi_cntrl
,
192 struct mhi_ring
*ring
)
196 if (ring
->wp
< ring
->rp
) {
197 nr_el
= ((ring
->rp
- ring
->wp
) / ring
->el_size
) - 1;
199 nr_el
= (ring
->rp
- ring
->base
) / ring
->el_size
;
200 nr_el
+= ((ring
->base
+ ring
->len
- ring
->wp
) /
207 static void *mhi_to_virtual(struct mhi_ring
*ring
, dma_addr_t addr
)
209 return (addr
- ring
->iommu_base
) + ring
->base
;
212 static void mhi_add_ring_element(struct mhi_controller
*mhi_cntrl
,
213 struct mhi_ring
*ring
)
215 ring
->wp
+= ring
->el_size
;
216 if (ring
->wp
>= (ring
->base
+ ring
->len
))
217 ring
->wp
= ring
->base
;
222 static void mhi_del_ring_element(struct mhi_controller
*mhi_cntrl
,
223 struct mhi_ring
*ring
)
225 ring
->rp
+= ring
->el_size
;
226 if (ring
->rp
>= (ring
->base
+ ring
->len
))
227 ring
->rp
= ring
->base
;
232 int mhi_destroy_device(struct device
*dev
, void *data
)
234 struct mhi_device
*mhi_dev
;
235 struct mhi_controller
*mhi_cntrl
;
237 if (dev
->bus
!= &mhi_bus_type
)
240 mhi_dev
= to_mhi_device(dev
);
241 mhi_cntrl
= mhi_dev
->mhi_cntrl
;
243 /* Only destroy virtual devices thats attached to bus */
244 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
248 * For the suspend and resume case, this function will get called
249 * without mhi_unregister_controller(). Hence, we need to drop the
250 * references to mhi_dev created for ul and dl channels. We can
251 * be sure that there will be no instances of mhi_dev left after
254 if (mhi_dev
->ul_chan
)
255 put_device(&mhi_dev
->ul_chan
->mhi_dev
->dev
);
257 if (mhi_dev
->dl_chan
)
258 put_device(&mhi_dev
->dl_chan
->mhi_dev
->dev
);
260 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
, "destroy device for chan:%s\n",
263 /* Notify the client and remove the device from MHI bus */
270 static void mhi_notify(struct mhi_device
*mhi_dev
, enum mhi_callback cb_reason
)
272 struct mhi_driver
*mhi_drv
;
274 if (!mhi_dev
->dev
.driver
)
277 mhi_drv
= to_mhi_driver(mhi_dev
->dev
.driver
);
279 if (mhi_drv
->status_cb
)
280 mhi_drv
->status_cb(mhi_dev
, cb_reason
);
283 /* Bind MHI channels to MHI devices */
284 void mhi_create_devices(struct mhi_controller
*mhi_cntrl
)
286 struct mhi_chan
*mhi_chan
;
287 struct mhi_device
*mhi_dev
;
288 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
291 mhi_chan
= mhi_cntrl
->mhi_chan
;
292 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
293 if (!mhi_chan
->configured
|| mhi_chan
->mhi_dev
||
294 !(mhi_chan
->ee_mask
& BIT(mhi_cntrl
->ee
)))
296 mhi_dev
= mhi_alloc_device(mhi_cntrl
);
300 mhi_dev
->dev_type
= MHI_DEVICE_XFER
;
301 switch (mhi_chan
->dir
) {
303 mhi_dev
->ul_chan
= mhi_chan
;
304 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
306 case DMA_FROM_DEVICE
:
307 /* We use dl_chan as offload channels */
308 mhi_dev
->dl_chan
= mhi_chan
;
309 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
312 dev_err(dev
, "Direction not supported\n");
313 put_device(&mhi_dev
->dev
);
317 get_device(&mhi_dev
->dev
);
318 mhi_chan
->mhi_dev
= mhi_dev
;
320 /* Check next channel if it matches */
321 if ((i
+ 1) < mhi_cntrl
->max_chan
&& mhi_chan
[1].configured
) {
322 if (!strcmp(mhi_chan
[1].name
, mhi_chan
->name
)) {
325 if (mhi_chan
->dir
== DMA_TO_DEVICE
) {
326 mhi_dev
->ul_chan
= mhi_chan
;
327 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
329 mhi_dev
->dl_chan
= mhi_chan
;
330 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
332 get_device(&mhi_dev
->dev
);
333 mhi_chan
->mhi_dev
= mhi_dev
;
337 /* Channel name is same for both UL and DL */
338 mhi_dev
->chan_name
= mhi_chan
->name
;
339 dev_set_name(&mhi_dev
->dev
, "%04x_%s", mhi_chan
->chan
,
342 /* Init wakeup source if available */
343 if (mhi_dev
->dl_chan
&& mhi_dev
->dl_chan
->wake_capable
)
344 device_init_wakeup(&mhi_dev
->dev
, true);
346 ret
= device_add(&mhi_dev
->dev
);
348 put_device(&mhi_dev
->dev
);
352 irqreturn_t
mhi_irq_handler(int irq_number
, void *dev
)
354 struct mhi_event
*mhi_event
= dev
;
355 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
356 struct mhi_event_ctxt
*er_ctxt
=
357 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
358 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
359 void *dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
361 /* Only proceed if event ring has pending events */
362 if (ev_ring
->rp
== dev_rp
)
365 /* For client managed event ring, notify pending data */
366 if (mhi_event
->cl_manage
) {
367 struct mhi_chan
*mhi_chan
= mhi_event
->mhi_chan
;
368 struct mhi_device
*mhi_dev
= mhi_chan
->mhi_dev
;
371 mhi_notify(mhi_dev
, MHI_CB_PENDING_DATA
);
373 tasklet_schedule(&mhi_event
->task
);
379 irqreturn_t
mhi_intvec_threaded_handler(int irq_number
, void *dev
)
381 struct mhi_controller
*mhi_cntrl
= dev
;
382 enum mhi_state state
= MHI_STATE_MAX
;
383 enum mhi_pm_state pm_state
= 0;
384 enum mhi_ee_type ee
= 0;
386 write_lock_irq(&mhi_cntrl
->pm_lock
);
387 if (MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
388 state
= mhi_get_mhi_state(mhi_cntrl
);
390 mhi_cntrl
->ee
= mhi_get_exec_env(mhi_cntrl
);
393 if (state
== MHI_STATE_SYS_ERR
) {
394 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
, "System error detected\n");
395 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
396 MHI_PM_SYS_ERR_DETECT
);
398 write_unlock_irq(&mhi_cntrl
->pm_lock
);
400 /* If device in RDDM don't bother processing SYS error */
401 if (mhi_cntrl
->ee
== MHI_EE_RDDM
) {
402 if (mhi_cntrl
->ee
!= ee
) {
403 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
404 wake_up_all(&mhi_cntrl
->state_event
);
409 if (pm_state
== MHI_PM_SYS_ERR_DETECT
) {
410 wake_up_all(&mhi_cntrl
->state_event
);
412 /* For fatal errors, we let controller decide next step */
414 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_FATAL_ERROR
);
416 schedule_work(&mhi_cntrl
->syserr_worker
);
424 irqreturn_t
mhi_intvec_handler(int irq_number
, void *dev
)
426 struct mhi_controller
*mhi_cntrl
= dev
;
428 /* Wake up events waiting for state change */
429 wake_up_all(&mhi_cntrl
->state_event
);
431 return IRQ_WAKE_THREAD
;
434 static void mhi_recycle_ev_ring_element(struct mhi_controller
*mhi_cntrl
,
435 struct mhi_ring
*ring
)
440 ring
->wp
+= ring
->el_size
;
441 ctxt_wp
= *ring
->ctxt_wp
+ ring
->el_size
;
443 if (ring
->wp
>= (ring
->base
+ ring
->len
)) {
444 ring
->wp
= ring
->base
;
445 ctxt_wp
= ring
->iommu_base
;
448 *ring
->ctxt_wp
= ctxt_wp
;
451 ring
->rp
+= ring
->el_size
;
452 if (ring
->rp
>= (ring
->base
+ ring
->len
))
453 ring
->rp
= ring
->base
;
455 /* Update to all cores */
459 static int parse_xfer_event(struct mhi_controller
*mhi_cntrl
,
460 struct mhi_tre
*event
,
461 struct mhi_chan
*mhi_chan
)
463 struct mhi_ring
*buf_ring
, *tre_ring
;
464 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
465 struct mhi_result result
;
466 unsigned long flags
= 0;
469 ev_code
= MHI_TRE_GET_EV_CODE(event
);
470 buf_ring
= &mhi_chan
->buf_ring
;
471 tre_ring
= &mhi_chan
->tre_ring
;
473 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
477 * If it's a DB Event then we need to grab the lock
478 * with preemption disabled and as a write because we
479 * have to update db register and there are chances that
480 * another thread could be doing the same.
482 if (ev_code
>= MHI_EV_CC_OOB
)
483 write_lock_irqsave(&mhi_chan
->lock
, flags
);
485 read_lock_bh(&mhi_chan
->lock
);
487 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
488 goto end_process_tx_event
;
491 case MHI_EV_CC_OVERFLOW
:
495 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(event
);
496 struct mhi_tre
*local_rp
, *ev_tre
;
498 struct mhi_buf_info
*buf_info
;
501 /* Get the TRB this event points to */
502 ev_tre
= mhi_to_virtual(tre_ring
, ptr
);
505 if (dev_rp
>= (tre_ring
->base
+ tre_ring
->len
))
506 dev_rp
= tre_ring
->base
;
508 result
.dir
= mhi_chan
->dir
;
510 local_rp
= tre_ring
->rp
;
511 while (local_rp
!= dev_rp
) {
512 buf_info
= buf_ring
->rp
;
513 /* If it's the last TRE, get length from the event */
514 if (local_rp
== ev_tre
)
515 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
517 xfer_len
= buf_info
->len
;
519 /* Unmap if it's not pre-mapped by client */
520 if (likely(!buf_info
->pre_mapped
))
521 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
523 result
.buf_addr
= buf_info
->cb_buf
;
524 result
.bytes_xferd
= xfer_len
;
525 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
526 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
527 local_rp
= tre_ring
->rp
;
530 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
532 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
533 atomic_dec(&mhi_cntrl
->pending_pkts
);
536 * Recycle the buffer if buffer is pre-allocated,
537 * if there is an error, not much we can do apart
538 * from dropping the packet
540 if (mhi_chan
->pre_alloc
) {
541 if (mhi_queue_buf(mhi_chan
->mhi_dev
,
544 buf_info
->len
, MHI_EOT
)) {
546 "Error recycling buffer for chan:%d\n",
548 kfree(buf_info
->cb_buf
);
555 case MHI_EV_CC_DB_MODE
:
559 mhi_chan
->db_cfg
.db_mode
= 1;
560 read_lock_irqsave(&mhi_cntrl
->pm_lock
, flags
);
561 if (tre_ring
->wp
!= tre_ring
->rp
&&
562 MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
563 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
565 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, flags
);
568 case MHI_EV_CC_BAD_TRE
:
570 dev_err(dev
, "Unknown event 0x%x\n", ev_code
);
572 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
574 end_process_tx_event
:
575 if (ev_code
>= MHI_EV_CC_OOB
)
576 write_unlock_irqrestore(&mhi_chan
->lock
, flags
);
578 read_unlock_bh(&mhi_chan
->lock
);
583 static int parse_rsc_event(struct mhi_controller
*mhi_cntrl
,
584 struct mhi_tre
*event
,
585 struct mhi_chan
*mhi_chan
)
587 struct mhi_ring
*buf_ring
, *tre_ring
;
588 struct mhi_buf_info
*buf_info
;
589 struct mhi_result result
;
591 u32 cookie
; /* offset to local descriptor */
594 buf_ring
= &mhi_chan
->buf_ring
;
595 tre_ring
= &mhi_chan
->tre_ring
;
597 ev_code
= MHI_TRE_GET_EV_CODE(event
);
598 cookie
= MHI_TRE_GET_EV_COOKIE(event
);
599 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
601 /* Received out of bound cookie */
602 WARN_ON(cookie
>= buf_ring
->len
);
604 buf_info
= buf_ring
->base
+ cookie
;
606 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
608 result
.bytes_xferd
= xfer_len
;
609 result
.buf_addr
= buf_info
->cb_buf
;
610 result
.dir
= mhi_chan
->dir
;
612 read_lock_bh(&mhi_chan
->lock
);
614 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
615 goto end_process_rsc_event
;
617 WARN_ON(!buf_info
->used
);
619 /* notify the client */
620 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
623 * Note: We're arbitrarily incrementing RP even though, completion
624 * packet we processed might not be the same one, reason we can do this
625 * is because device guaranteed to cache descriptors in order it
626 * receive, so even though completion event is different we can re-use
627 * all descriptors in between.
629 * Transfer Ring has descriptors: A, B, C, D
630 * Last descriptor host queue is D (WP) and first descriptor
631 * host queue is A (RP).
632 * The completion event we just serviced is descriptor C.
633 * Then we can safely queue descriptors to replace A, B, and C
634 * even though host did not receive any completions.
636 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
637 buf_info
->used
= false;
639 end_process_rsc_event
:
640 read_unlock_bh(&mhi_chan
->lock
);
645 static void mhi_process_cmd_completion(struct mhi_controller
*mhi_cntrl
,
648 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(tre
);
649 struct mhi_cmd
*cmd_ring
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
650 struct mhi_ring
*mhi_ring
= &cmd_ring
->ring
;
651 struct mhi_tre
*cmd_pkt
;
652 struct mhi_chan
*mhi_chan
;
655 cmd_pkt
= mhi_to_virtual(mhi_ring
, ptr
);
657 chan
= MHI_TRE_GET_CMD_CHID(cmd_pkt
);
658 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
659 write_lock_bh(&mhi_chan
->lock
);
660 mhi_chan
->ccs
= MHI_TRE_GET_EV_CODE(tre
);
661 complete(&mhi_chan
->completion
);
662 write_unlock_bh(&mhi_chan
->lock
);
664 mhi_del_ring_element(mhi_cntrl
, mhi_ring
);
667 int mhi_process_ctrl_ev_ring(struct mhi_controller
*mhi_cntrl
,
668 struct mhi_event
*mhi_event
,
671 struct mhi_tre
*dev_rp
, *local_rp
;
672 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
673 struct mhi_event_ctxt
*er_ctxt
=
674 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
675 struct mhi_chan
*mhi_chan
;
676 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
681 * This is a quick check to avoid unnecessary event processing
682 * in case MHI is already in error state, but it's still possible
683 * to transition to error state while processing events
685 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
688 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
689 local_rp
= ev_ring
->rp
;
691 while (dev_rp
!= local_rp
) {
692 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
695 case MHI_PKT_TYPE_BW_REQ_EVENT
:
697 struct mhi_link_info
*link_info
;
699 link_info
= &mhi_cntrl
->mhi_link_info
;
700 write_lock_irq(&mhi_cntrl
->pm_lock
);
701 link_info
->target_link_speed
=
702 MHI_TRE_GET_EV_LINKSPEED(local_rp
);
703 link_info
->target_link_width
=
704 MHI_TRE_GET_EV_LINKWIDTH(local_rp
);
705 write_unlock_irq(&mhi_cntrl
->pm_lock
);
706 dev_dbg(dev
, "Received BW_REQ event\n");
707 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_BW_REQ
);
710 case MHI_PKT_TYPE_STATE_CHANGE_EVENT
:
712 enum mhi_state new_state
;
714 new_state
= MHI_TRE_GET_EV_STATE(local_rp
);
716 dev_dbg(dev
, "State change event to state: %s\n",
717 TO_MHI_STATE_STR(new_state
));
721 mhi_pm_m0_transition(mhi_cntrl
);
724 mhi_pm_m1_transition(mhi_cntrl
);
727 mhi_pm_m3_transition(mhi_cntrl
);
729 case MHI_STATE_SYS_ERR
:
731 enum mhi_pm_state new_state
;
733 dev_dbg(dev
, "System error detected\n");
734 write_lock_irq(&mhi_cntrl
->pm_lock
);
735 new_state
= mhi_tryset_pm_state(mhi_cntrl
,
736 MHI_PM_SYS_ERR_DETECT
);
737 write_unlock_irq(&mhi_cntrl
->pm_lock
);
738 if (new_state
== MHI_PM_SYS_ERR_DETECT
)
739 schedule_work(&mhi_cntrl
->syserr_worker
);
743 dev_err(dev
, "Invalid state: %s\n",
744 TO_MHI_STATE_STR(new_state
));
749 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT
:
750 mhi_process_cmd_completion(mhi_cntrl
, local_rp
);
752 case MHI_PKT_TYPE_EE_EVENT
:
754 enum dev_st_transition st
= DEV_ST_TRANSITION_MAX
;
755 enum mhi_ee_type event
= MHI_TRE_GET_EV_EXECENV(local_rp
);
757 dev_dbg(dev
, "Received EE event: %s\n",
758 TO_MHI_EXEC_STR(event
));
761 st
= DEV_ST_TRANSITION_SBL
;
765 st
= DEV_ST_TRANSITION_MISSION_MODE
;
768 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
769 write_lock_irq(&mhi_cntrl
->pm_lock
);
770 mhi_cntrl
->ee
= event
;
771 write_unlock_irq(&mhi_cntrl
->pm_lock
);
772 wake_up_all(&mhi_cntrl
->state_event
);
776 "Unhandled EE event: 0x%x\n", type
);
778 if (st
!= DEV_ST_TRANSITION_MAX
)
779 mhi_queue_state_transition(mhi_cntrl
, st
);
783 case MHI_PKT_TYPE_TX_EVENT
:
784 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
785 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
786 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
790 dev_err(dev
, "Unhandled event type: %d\n", type
);
794 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
795 local_rp
= ev_ring
->rp
;
796 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
800 read_lock_bh(&mhi_cntrl
->pm_lock
);
801 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
802 mhi_ring_er_db(mhi_event
);
803 read_unlock_bh(&mhi_cntrl
->pm_lock
);
808 int mhi_process_data_event_ring(struct mhi_controller
*mhi_cntrl
,
809 struct mhi_event
*mhi_event
,
812 struct mhi_tre
*dev_rp
, *local_rp
;
813 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
814 struct mhi_event_ctxt
*er_ctxt
=
815 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
818 struct mhi_chan
*mhi_chan
;
820 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
823 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
824 local_rp
= ev_ring
->rp
;
826 while (dev_rp
!= local_rp
&& event_quota
> 0) {
827 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
829 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
830 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
832 if (likely(type
== MHI_PKT_TYPE_TX_EVENT
)) {
833 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
835 } else if (type
== MHI_PKT_TYPE_RSC_TX_EVENT
) {
836 parse_rsc_event(mhi_cntrl
, local_rp
, mhi_chan
);
840 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
841 local_rp
= ev_ring
->rp
;
842 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
845 read_lock_bh(&mhi_cntrl
->pm_lock
);
846 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
847 mhi_ring_er_db(mhi_event
);
848 read_unlock_bh(&mhi_cntrl
->pm_lock
);
853 void mhi_ev_task(unsigned long data
)
855 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
856 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
858 /* process all pending events */
859 spin_lock_bh(&mhi_event
->lock
);
860 mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
861 spin_unlock_bh(&mhi_event
->lock
);
864 void mhi_ctrl_ev_task(unsigned long data
)
866 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
867 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
868 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
869 enum mhi_state state
;
870 enum mhi_pm_state pm_state
= 0;
874 * We can check PM state w/o a lock here because there is no way
875 * PM state can change from reg access valid to no access while this
876 * thread being executed.
878 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
880 * We may have a pending event but not allowed to
881 * process it since we are probably in a suspended state,
882 * so trigger a resume.
884 mhi_cntrl
->runtime_get(mhi_cntrl
);
885 mhi_cntrl
->runtime_put(mhi_cntrl
);
890 /* Process ctrl events events */
891 ret
= mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
894 * We received an IRQ but no events to process, maybe device went to
895 * SYS_ERR state? Check the state to confirm.
898 write_lock_irq(&mhi_cntrl
->pm_lock
);
899 state
= mhi_get_mhi_state(mhi_cntrl
);
900 if (state
== MHI_STATE_SYS_ERR
) {
901 dev_dbg(dev
, "System error detected\n");
902 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
903 MHI_PM_SYS_ERR_DETECT
);
905 write_unlock_irq(&mhi_cntrl
->pm_lock
);
906 if (pm_state
== MHI_PM_SYS_ERR_DETECT
)
907 schedule_work(&mhi_cntrl
->syserr_worker
);
911 static bool mhi_is_ring_full(struct mhi_controller
*mhi_cntrl
,
912 struct mhi_ring
*ring
)
914 void *tmp
= ring
->wp
+ ring
->el_size
;
916 if (tmp
>= (ring
->base
+ ring
->len
))
919 return (tmp
== ring
->rp
);
922 int mhi_queue_skb(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
923 struct sk_buff
*skb
, size_t len
, enum mhi_flags mflags
)
925 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
926 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
928 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
929 struct mhi_ring
*buf_ring
= &mhi_chan
->buf_ring
;
930 struct mhi_buf_info
*buf_info
;
931 struct mhi_tre
*mhi_tre
;
934 /* If MHI host pre-allocates buffers then client drivers cannot queue */
935 if (mhi_chan
->pre_alloc
)
938 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
941 read_lock_bh(&mhi_cntrl
->pm_lock
);
942 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))) {
943 read_unlock_bh(&mhi_cntrl
->pm_lock
);
947 /* we're in M3 or transitioning to M3 */
948 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
949 mhi_cntrl
->runtime_get(mhi_cntrl
);
950 mhi_cntrl
->runtime_put(mhi_cntrl
);
953 /* Toggle wake to exit out of M2 */
954 mhi_cntrl
->wake_toggle(mhi_cntrl
);
956 /* Generate the TRE */
957 buf_info
= buf_ring
->wp
;
959 buf_info
->v_addr
= skb
->data
;
960 buf_info
->cb_buf
= skb
;
961 buf_info
->wp
= tre_ring
->wp
;
962 buf_info
->dir
= mhi_chan
->dir
;
964 ret
= mhi_cntrl
->map_single(mhi_cntrl
, buf_info
);
968 mhi_tre
= tre_ring
->wp
;
970 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
971 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(buf_info
->len
);
972 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
975 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
976 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
978 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
979 atomic_inc(&mhi_cntrl
->pending_pkts
);
981 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
982 read_lock_bh(&mhi_chan
->lock
);
983 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
984 read_unlock_bh(&mhi_chan
->lock
);
987 read_unlock_bh(&mhi_cntrl
->pm_lock
);
992 read_unlock_bh(&mhi_cntrl
->pm_lock
);
996 EXPORT_SYMBOL_GPL(mhi_queue_skb
);
998 int mhi_queue_dma(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
999 struct mhi_buf
*mhi_buf
, size_t len
, enum mhi_flags mflags
)
1001 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1002 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1004 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1005 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
1006 struct mhi_ring
*buf_ring
= &mhi_chan
->buf_ring
;
1007 struct mhi_buf_info
*buf_info
;
1008 struct mhi_tre
*mhi_tre
;
1010 /* If MHI host pre-allocates buffers then client drivers cannot queue */
1011 if (mhi_chan
->pre_alloc
)
1014 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
1017 read_lock_bh(&mhi_cntrl
->pm_lock
);
1018 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))) {
1019 dev_err(dev
, "MHI is not in activate state, PM state: %s\n",
1020 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
1021 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1026 /* we're in M3 or transitioning to M3 */
1027 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
1028 mhi_cntrl
->runtime_get(mhi_cntrl
);
1029 mhi_cntrl
->runtime_put(mhi_cntrl
);
1032 /* Toggle wake to exit out of M2 */
1033 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1035 /* Generate the TRE */
1036 buf_info
= buf_ring
->wp
;
1037 WARN_ON(buf_info
->used
);
1038 buf_info
->p_addr
= mhi_buf
->dma_addr
;
1039 buf_info
->pre_mapped
= true;
1040 buf_info
->cb_buf
= mhi_buf
;
1041 buf_info
->wp
= tre_ring
->wp
;
1042 buf_info
->dir
= mhi_chan
->dir
;
1043 buf_info
->len
= len
;
1045 mhi_tre
= tre_ring
->wp
;
1047 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
1048 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(buf_info
->len
);
1049 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
1052 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
1053 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
1055 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1056 atomic_inc(&mhi_cntrl
->pending_pkts
);
1058 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
1059 read_lock_bh(&mhi_chan
->lock
);
1060 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1061 read_unlock_bh(&mhi_chan
->lock
);
1064 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1068 EXPORT_SYMBOL_GPL(mhi_queue_dma
);
1070 int mhi_gen_tre(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
,
1071 void *buf
, void *cb
, size_t buf_len
, enum mhi_flags flags
)
1073 struct mhi_ring
*buf_ring
, *tre_ring
;
1074 struct mhi_tre
*mhi_tre
;
1075 struct mhi_buf_info
*buf_info
;
1076 int eot
, eob
, chain
, bei
;
1079 buf_ring
= &mhi_chan
->buf_ring
;
1080 tre_ring
= &mhi_chan
->tre_ring
;
1082 buf_info
= buf_ring
->wp
;
1083 buf_info
->v_addr
= buf
;
1084 buf_info
->cb_buf
= cb
;
1085 buf_info
->wp
= tre_ring
->wp
;
1086 buf_info
->dir
= mhi_chan
->dir
;
1087 buf_info
->len
= buf_len
;
1089 ret
= mhi_cntrl
->map_single(mhi_cntrl
, buf_info
);
1093 eob
= !!(flags
& MHI_EOB
);
1094 eot
= !!(flags
& MHI_EOT
);
1095 chain
= !!(flags
& MHI_CHAIN
);
1096 bei
= !!(mhi_chan
->intmod
);
1098 mhi_tre
= tre_ring
->wp
;
1099 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
1100 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(buf_len
);
1101 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(bei
, eot
, eob
, chain
);
1104 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
1105 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
1110 int mhi_queue_buf(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
1111 void *buf
, size_t len
, enum mhi_flags mflags
)
1113 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1114 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1116 struct mhi_ring
*tre_ring
;
1117 unsigned long flags
;
1121 * this check here only as a guard, it's always
1122 * possible mhi can enter error while executing rest of function,
1123 * which is not fatal so we do not need to hold pm_lock
1125 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)))
1128 tre_ring
= &mhi_chan
->tre_ring
;
1129 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
1132 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, buf
, buf
, len
, mflags
);
1136 read_lock_irqsave(&mhi_cntrl
->pm_lock
, flags
);
1138 /* we're in M3 or transitioning to M3 */
1139 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
1140 mhi_cntrl
->runtime_get(mhi_cntrl
);
1141 mhi_cntrl
->runtime_put(mhi_cntrl
);
1144 /* Toggle wake to exit out of M2 */
1145 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1147 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1148 atomic_inc(&mhi_cntrl
->pending_pkts
);
1150 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
1151 unsigned long flags
;
1153 read_lock_irqsave(&mhi_chan
->lock
, flags
);
1154 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1155 read_unlock_irqrestore(&mhi_chan
->lock
, flags
);
1158 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, flags
);
1162 EXPORT_SYMBOL_GPL(mhi_queue_buf
);
1164 int mhi_send_cmd(struct mhi_controller
*mhi_cntrl
,
1165 struct mhi_chan
*mhi_chan
,
1166 enum mhi_cmd_type cmd
)
1168 struct mhi_tre
*cmd_tre
= NULL
;
1169 struct mhi_cmd
*mhi_cmd
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
1170 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
1171 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1175 chan
= mhi_chan
->chan
;
1177 spin_lock_bh(&mhi_cmd
->lock
);
1178 if (!get_nr_avail_ring_elements(mhi_cntrl
, ring
)) {
1179 spin_unlock_bh(&mhi_cmd
->lock
);
1183 /* prepare the cmd tre */
1186 case MHI_CMD_RESET_CHAN
:
1187 cmd_tre
->ptr
= MHI_TRE_CMD_RESET_PTR
;
1188 cmd_tre
->dword
[0] = MHI_TRE_CMD_RESET_DWORD0
;
1189 cmd_tre
->dword
[1] = MHI_TRE_CMD_RESET_DWORD1(chan
);
1191 case MHI_CMD_START_CHAN
:
1192 cmd_tre
->ptr
= MHI_TRE_CMD_START_PTR
;
1193 cmd_tre
->dword
[0] = MHI_TRE_CMD_START_DWORD0
;
1194 cmd_tre
->dword
[1] = MHI_TRE_CMD_START_DWORD1(chan
);
1197 dev_err(dev
, "Command not supported\n");
1201 /* queue to hardware */
1202 mhi_add_ring_element(mhi_cntrl
, ring
);
1203 read_lock_bh(&mhi_cntrl
->pm_lock
);
1204 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
1205 mhi_ring_cmd_db(mhi_cntrl
, mhi_cmd
);
1206 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1207 spin_unlock_bh(&mhi_cmd
->lock
);
1212 static void __mhi_unprepare_channel(struct mhi_controller
*mhi_cntrl
,
1213 struct mhi_chan
*mhi_chan
)
1216 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1218 dev_dbg(dev
, "Entered: unprepare channel:%d\n", mhi_chan
->chan
);
1220 /* no more processing events for this channel */
1221 mutex_lock(&mhi_chan
->mutex
);
1222 write_lock_irq(&mhi_chan
->lock
);
1223 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
) {
1224 write_unlock_irq(&mhi_chan
->lock
);
1225 mutex_unlock(&mhi_chan
->mutex
);
1229 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
1230 write_unlock_irq(&mhi_chan
->lock
);
1232 reinit_completion(&mhi_chan
->completion
);
1233 read_lock_bh(&mhi_cntrl
->pm_lock
);
1234 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
1235 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1236 goto error_invalid_state
;
1239 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1240 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1242 mhi_cntrl
->runtime_get(mhi_cntrl
);
1243 mhi_cntrl
->runtime_put(mhi_cntrl
);
1244 ret
= mhi_send_cmd(mhi_cntrl
, mhi_chan
, MHI_CMD_RESET_CHAN
);
1246 goto error_invalid_state
;
1248 /* even if it fails we will still reset */
1249 ret
= wait_for_completion_timeout(&mhi_chan
->completion
,
1250 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1251 if (!ret
|| mhi_chan
->ccs
!= MHI_EV_CC_SUCCESS
)
1253 "Failed to receive cmd completion, still resetting\n");
1255 error_invalid_state
:
1256 if (!mhi_chan
->offload_ch
) {
1257 mhi_reset_chan(mhi_cntrl
, mhi_chan
);
1258 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1260 dev_dbg(dev
, "chan:%d successfully resetted\n", mhi_chan
->chan
);
1261 mutex_unlock(&mhi_chan
->mutex
);
1264 int mhi_prepare_channel(struct mhi_controller
*mhi_cntrl
,
1265 struct mhi_chan
*mhi_chan
)
1268 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1270 dev_dbg(dev
, "Preparing channel: %d\n", mhi_chan
->chan
);
1272 if (!(BIT(mhi_cntrl
->ee
) & mhi_chan
->ee_mask
)) {
1274 "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1275 TO_MHI_EXEC_STR(mhi_cntrl
->ee
), mhi_chan
->ee_mask
,
1280 mutex_lock(&mhi_chan
->mutex
);
1282 /* If channel is not in disable state, do not allow it to start */
1283 if (mhi_chan
->ch_state
!= MHI_CH_STATE_DISABLED
) {
1285 dev_dbg(dev
, "channel: %d is not in disabled state\n",
1287 goto error_init_chan
;
1290 /* Check of client manages channel context for offload channels */
1291 if (!mhi_chan
->offload_ch
) {
1292 ret
= mhi_init_chan_ctxt(mhi_cntrl
, mhi_chan
);
1294 goto error_init_chan
;
1297 reinit_completion(&mhi_chan
->completion
);
1298 read_lock_bh(&mhi_cntrl
->pm_lock
);
1299 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
1300 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1302 goto error_pm_state
;
1305 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1306 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1307 mhi_cntrl
->runtime_get(mhi_cntrl
);
1308 mhi_cntrl
->runtime_put(mhi_cntrl
);
1310 ret
= mhi_send_cmd(mhi_cntrl
, mhi_chan
, MHI_CMD_START_CHAN
);
1312 goto error_pm_state
;
1314 ret
= wait_for_completion_timeout(&mhi_chan
->completion
,
1315 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1316 if (!ret
|| mhi_chan
->ccs
!= MHI_EV_CC_SUCCESS
) {
1318 goto error_pm_state
;
1321 write_lock_irq(&mhi_chan
->lock
);
1322 mhi_chan
->ch_state
= MHI_CH_STATE_ENABLED
;
1323 write_unlock_irq(&mhi_chan
->lock
);
1325 /* Pre-allocate buffer for xfer ring */
1326 if (mhi_chan
->pre_alloc
) {
1327 int nr_el
= get_nr_avail_ring_elements(mhi_cntrl
,
1328 &mhi_chan
->tre_ring
);
1329 size_t len
= mhi_cntrl
->buffer_len
;
1334 buf
= kmalloc(len
, GFP_KERNEL
);
1337 goto error_pre_alloc
;
1340 /* Prepare transfer descriptors */
1341 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, buf
, buf
,
1345 goto error_pre_alloc
;
1349 read_lock_bh(&mhi_cntrl
->pm_lock
);
1350 if (MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
1351 read_lock_irq(&mhi_chan
->lock
);
1352 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1353 read_unlock_irq(&mhi_chan
->lock
);
1355 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1358 mutex_unlock(&mhi_chan
->mutex
);
1360 dev_dbg(dev
, "Chan: %d successfully moved to start state\n",
1366 if (!mhi_chan
->offload_ch
)
1367 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1370 mutex_unlock(&mhi_chan
->mutex
);
1375 mutex_unlock(&mhi_chan
->mutex
);
1376 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1381 static void mhi_mark_stale_events(struct mhi_controller
*mhi_cntrl
,
1382 struct mhi_event
*mhi_event
,
1383 struct mhi_event_ctxt
*er_ctxt
,
1387 struct mhi_tre
*dev_rp
, *local_rp
;
1388 struct mhi_ring
*ev_ring
;
1389 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1390 unsigned long flags
;
1392 dev_dbg(dev
, "Marking all events for chan: %d as stale\n", chan
);
1394 ev_ring
= &mhi_event
->ring
;
1396 /* mark all stale events related to channel as STALE event */
1397 spin_lock_irqsave(&mhi_event
->lock
, flags
);
1398 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
1400 local_rp
= ev_ring
->rp
;
1401 while (dev_rp
!= local_rp
) {
1402 if (MHI_TRE_GET_EV_TYPE(local_rp
) == MHI_PKT_TYPE_TX_EVENT
&&
1403 chan
== MHI_TRE_GET_EV_CHID(local_rp
))
1404 local_rp
->dword
[1] = MHI_TRE_EV_DWORD1(chan
,
1405 MHI_PKT_TYPE_STALE_EVENT
);
1407 if (local_rp
== (ev_ring
->base
+ ev_ring
->len
))
1408 local_rp
= ev_ring
->base
;
1411 dev_dbg(dev
, "Finished marking events as stale events\n");
1412 spin_unlock_irqrestore(&mhi_event
->lock
, flags
);
1415 static void mhi_reset_data_chan(struct mhi_controller
*mhi_cntrl
,
1416 struct mhi_chan
*mhi_chan
)
1418 struct mhi_ring
*buf_ring
, *tre_ring
;
1419 struct mhi_result result
;
1421 /* Reset any pending buffers */
1422 buf_ring
= &mhi_chan
->buf_ring
;
1423 tre_ring
= &mhi_chan
->tre_ring
;
1424 result
.transaction_status
= -ENOTCONN
;
1425 result
.bytes_xferd
= 0;
1426 while (tre_ring
->rp
!= tre_ring
->wp
) {
1427 struct mhi_buf_info
*buf_info
= buf_ring
->rp
;
1429 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1430 atomic_dec(&mhi_cntrl
->pending_pkts
);
1432 if (!buf_info
->pre_mapped
)
1433 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
1435 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
1436 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
1438 if (mhi_chan
->pre_alloc
) {
1439 kfree(buf_info
->cb_buf
);
1441 result
.buf_addr
= buf_info
->cb_buf
;
1442 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
1447 void mhi_reset_chan(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
)
1449 struct mhi_event
*mhi_event
;
1450 struct mhi_event_ctxt
*er_ctxt
;
1451 int chan
= mhi_chan
->chan
;
1453 /* Nothing to reset, client doesn't queue buffers */
1454 if (mhi_chan
->offload_ch
)
1457 read_lock_bh(&mhi_cntrl
->pm_lock
);
1458 mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
1459 er_ctxt
= &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_chan
->er_index
];
1461 mhi_mark_stale_events(mhi_cntrl
, mhi_event
, er_ctxt
, chan
);
1463 mhi_reset_data_chan(mhi_cntrl
, mhi_chan
);
1465 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1468 /* Move channel to start state */
1469 int mhi_prepare_for_transfer(struct mhi_device
*mhi_dev
)
1472 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1473 struct mhi_chan
*mhi_chan
;
1475 for (dir
= 0; dir
< 2; dir
++) {
1476 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1480 ret
= mhi_prepare_channel(mhi_cntrl
, mhi_chan
);
1482 goto error_open_chan
;
1488 for (--dir
; dir
>= 0; dir
--) {
1489 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1493 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1498 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer
);
1500 void mhi_unprepare_from_transfer(struct mhi_device
*mhi_dev
)
1502 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1503 struct mhi_chan
*mhi_chan
;
1506 for (dir
= 0; dir
< 2; dir
++) {
1507 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1511 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1514 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer
);
1516 int mhi_poll(struct mhi_device
*mhi_dev
, u32 budget
)
1518 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1519 struct mhi_chan
*mhi_chan
= mhi_dev
->dl_chan
;
1520 struct mhi_event
*mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
1523 spin_lock_bh(&mhi_event
->lock
);
1524 ret
= mhi_event
->process_event(mhi_cntrl
, mhi_event
, budget
);
1525 spin_unlock_bh(&mhi_event
->lock
);
1529 EXPORT_SYMBOL_GPL(mhi_poll
);