1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
31 * L0: DISABLE <--> POR
33 * POR -> M0 -> M2 --> M0
35 * FW_DL_ERR <--> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> DISABLE
41 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
42 * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
44 static struct mhi_pm_transitions
const dev_state_transitions
[] = {
52 MHI_PM_POR
| MHI_PM_DISABLE
| MHI_PM_M0
|
53 MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
54 MHI_PM_LD_ERR_FATAL_DETECT
| MHI_PM_FW_DL_ERR
58 MHI_PM_M0
| MHI_PM_M2
| MHI_PM_M3_ENTER
|
59 MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
60 MHI_PM_LD_ERR_FATAL_DETECT
| MHI_PM_FW_DL_ERR
64 MHI_PM_M0
| MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
65 MHI_PM_LD_ERR_FATAL_DETECT
69 MHI_PM_M3
| MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
70 MHI_PM_LD_ERR_FATAL_DETECT
74 MHI_PM_M3_EXIT
| MHI_PM_SYS_ERR_DETECT
|
75 MHI_PM_SHUTDOWN_PROCESS
| MHI_PM_LD_ERR_FATAL_DETECT
79 MHI_PM_M0
| MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
80 MHI_PM_LD_ERR_FATAL_DETECT
84 MHI_PM_FW_DL_ERR
| MHI_PM_SYS_ERR_DETECT
|
85 MHI_PM_SHUTDOWN_PROCESS
| MHI_PM_LD_ERR_FATAL_DETECT
89 MHI_PM_SYS_ERR_DETECT
,
90 MHI_PM_SYS_ERR_PROCESS
| MHI_PM_SHUTDOWN_PROCESS
|
91 MHI_PM_LD_ERR_FATAL_DETECT
94 MHI_PM_SYS_ERR_PROCESS
,
95 MHI_PM_POR
| MHI_PM_SHUTDOWN_PROCESS
|
96 MHI_PM_LD_ERR_FATAL_DETECT
100 MHI_PM_SHUTDOWN_PROCESS
,
101 MHI_PM_DISABLE
| MHI_PM_LD_ERR_FATAL_DETECT
105 MHI_PM_LD_ERR_FATAL_DETECT
,
106 MHI_PM_LD_ERR_FATAL_DETECT
| MHI_PM_SHUTDOWN_PROCESS
110 enum mhi_pm_state __must_check
mhi_tryset_pm_state(struct mhi_controller
*mhi_cntrl
,
111 enum mhi_pm_state state
)
113 unsigned long cur_state
= mhi_cntrl
->pm_state
;
114 int index
= find_last_bit(&cur_state
, 32);
116 if (unlikely(index
>= ARRAY_SIZE(dev_state_transitions
)))
119 if (unlikely(dev_state_transitions
[index
].from_state
!= cur_state
))
122 if (unlikely(!(dev_state_transitions
[index
].to_states
& state
)))
125 mhi_cntrl
->pm_state
= state
;
126 return mhi_cntrl
->pm_state
;
129 void mhi_set_mhi_state(struct mhi_controller
*mhi_cntrl
, enum mhi_state state
)
131 if (state
== MHI_STATE_RESET
) {
132 mhi_write_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHICTRL
,
133 MHICTRL_RESET_MASK
, MHICTRL_RESET_SHIFT
, 1);
135 mhi_write_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHICTRL
,
136 MHICTRL_MHISTATE_MASK
,
137 MHICTRL_MHISTATE_SHIFT
, state
);
141 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
142 static void mhi_toggle_dev_wake_nop(struct mhi_controller
*mhi_cntrl
)
146 static void mhi_toggle_dev_wake(struct mhi_controller
*mhi_cntrl
)
148 mhi_cntrl
->wake_get(mhi_cntrl
, false);
149 mhi_cntrl
->wake_put(mhi_cntrl
, true);
152 /* Handle device ready state transition */
153 int mhi_ready_state_transition(struct mhi_controller
*mhi_cntrl
)
155 void __iomem
*base
= mhi_cntrl
->regs
;
156 struct mhi_event
*mhi_event
;
157 enum mhi_pm_state cur_state
;
158 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
159 u32 reset
= 1, ready
= 0;
162 /* Wait for RESET to be cleared and READY bit to be set by the device */
163 wait_event_timeout(mhi_cntrl
->state_event
,
164 MHI_PM_IN_FATAL_STATE(mhi_cntrl
->pm_state
) ||
165 mhi_read_reg_field(mhi_cntrl
, base
, MHICTRL
,
167 MHICTRL_RESET_SHIFT
, &reset
) ||
168 mhi_read_reg_field(mhi_cntrl
, base
, MHISTATUS
,
169 MHISTATUS_READY_MASK
,
170 MHISTATUS_READY_SHIFT
, &ready
) ||
172 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
174 /* Check if device entered error state */
175 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl
->pm_state
)) {
176 dev_err(dev
, "Device link is not accessible\n");
180 /* Timeout if device did not transition to ready state */
181 if (reset
|| !ready
) {
182 dev_err(dev
, "Device Ready timeout\n");
186 dev_dbg(dev
, "Device in READY State\n");
187 write_lock_irq(&mhi_cntrl
->pm_lock
);
188 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_POR
);
189 mhi_cntrl
->dev_state
= MHI_STATE_READY
;
190 write_unlock_irq(&mhi_cntrl
->pm_lock
);
192 if (cur_state
!= MHI_PM_POR
) {
193 dev_err(dev
, "Error moving to state %s from %s\n",
194 to_mhi_pm_state_str(MHI_PM_POR
),
195 to_mhi_pm_state_str(cur_state
));
199 read_lock_bh(&mhi_cntrl
->pm_lock
);
200 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
201 dev_err(dev
, "Device registers not accessible\n");
205 /* Configure MMIO registers */
206 ret
= mhi_init_mmio(mhi_cntrl
);
208 dev_err(dev
, "Error configuring MMIO registers\n");
212 /* Add elements to all SW event rings */
213 mhi_event
= mhi_cntrl
->mhi_event
;
214 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
215 struct mhi_ring
*ring
= &mhi_event
->ring
;
217 /* Skip if this is an offload or HW event */
218 if (mhi_event
->offload_ev
|| mhi_event
->hw_ring
)
221 ring
->wp
= ring
->base
+ ring
->len
- ring
->el_size
;
222 *ring
->ctxt_wp
= ring
->iommu_base
+ ring
->len
- ring
->el_size
;
223 /* Update all cores */
226 /* Ring the event ring db */
227 spin_lock_irq(&mhi_event
->lock
);
228 mhi_ring_er_db(mhi_event
);
229 spin_unlock_irq(&mhi_event
->lock
);
232 /* Set MHI to M0 state */
233 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_M0
);
234 read_unlock_bh(&mhi_cntrl
->pm_lock
);
239 read_unlock_bh(&mhi_cntrl
->pm_lock
);
244 int mhi_pm_m0_transition(struct mhi_controller
*mhi_cntrl
)
246 enum mhi_pm_state cur_state
;
247 struct mhi_chan
*mhi_chan
;
248 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
251 write_lock_irq(&mhi_cntrl
->pm_lock
);
252 mhi_cntrl
->dev_state
= MHI_STATE_M0
;
253 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M0
);
254 write_unlock_irq(&mhi_cntrl
->pm_lock
);
255 if (unlikely(cur_state
!= MHI_PM_M0
)) {
256 dev_err(dev
, "Unable to transition to M0 state\n");
260 /* Wake up the device */
261 read_lock_bh(&mhi_cntrl
->pm_lock
);
262 mhi_cntrl
->wake_get(mhi_cntrl
, true);
264 /* Ring all event rings and CMD ring only if we're in mission mode */
265 if (MHI_IN_MISSION_MODE(mhi_cntrl
->ee
)) {
266 struct mhi_event
*mhi_event
= mhi_cntrl
->mhi_event
;
267 struct mhi_cmd
*mhi_cmd
=
268 &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
270 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
271 if (mhi_event
->offload_ev
)
274 spin_lock_irq(&mhi_event
->lock
);
275 mhi_ring_er_db(mhi_event
);
276 spin_unlock_irq(&mhi_event
->lock
);
279 /* Only ring primary cmd ring if ring is not empty */
280 spin_lock_irq(&mhi_cmd
->lock
);
281 if (mhi_cmd
->ring
.rp
!= mhi_cmd
->ring
.wp
)
282 mhi_ring_cmd_db(mhi_cntrl
, mhi_cmd
);
283 spin_unlock_irq(&mhi_cmd
->lock
);
286 /* Ring channel DB registers */
287 mhi_chan
= mhi_cntrl
->mhi_chan
;
288 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
289 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
291 write_lock_irq(&mhi_chan
->lock
);
292 if (mhi_chan
->db_cfg
.reset_req
)
293 mhi_chan
->db_cfg
.db_mode
= true;
295 /* Only ring DB if ring is not empty */
296 if (tre_ring
->base
&& tre_ring
->wp
!= tre_ring
->rp
)
297 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
298 write_unlock_irq(&mhi_chan
->lock
);
301 mhi_cntrl
->wake_put(mhi_cntrl
, false);
302 read_unlock_bh(&mhi_cntrl
->pm_lock
);
303 wake_up_all(&mhi_cntrl
->state_event
);
309 * After receiving the MHI state change event from the device indicating the
310 * transition to M1 state, the host can transition the device to M2 state
311 * for keeping it in low power state.
313 void mhi_pm_m1_transition(struct mhi_controller
*mhi_cntrl
)
315 enum mhi_pm_state state
;
316 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
318 write_lock_irq(&mhi_cntrl
->pm_lock
);
319 state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M2
);
320 if (state
== MHI_PM_M2
) {
321 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_M2
);
322 mhi_cntrl
->dev_state
= MHI_STATE_M2
;
324 write_unlock_irq(&mhi_cntrl
->pm_lock
);
325 wake_up_all(&mhi_cntrl
->state_event
);
327 /* If there are any pending resources, exit M2 immediately */
328 if (unlikely(atomic_read(&mhi_cntrl
->pending_pkts
) ||
329 atomic_read(&mhi_cntrl
->dev_wake
))) {
331 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
332 atomic_read(&mhi_cntrl
->pending_pkts
),
333 atomic_read(&mhi_cntrl
->dev_wake
));
334 read_lock_bh(&mhi_cntrl
->pm_lock
);
335 mhi_cntrl
->wake_get(mhi_cntrl
, true);
336 mhi_cntrl
->wake_put(mhi_cntrl
, true);
337 read_unlock_bh(&mhi_cntrl
->pm_lock
);
339 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_IDLE
);
342 write_unlock_irq(&mhi_cntrl
->pm_lock
);
346 /* MHI M3 completion handler */
347 int mhi_pm_m3_transition(struct mhi_controller
*mhi_cntrl
)
349 enum mhi_pm_state state
;
350 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
352 write_lock_irq(&mhi_cntrl
->pm_lock
);
353 mhi_cntrl
->dev_state
= MHI_STATE_M3
;
354 state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M3
);
355 write_unlock_irq(&mhi_cntrl
->pm_lock
);
356 if (state
!= MHI_PM_M3
) {
357 dev_err(dev
, "Unable to transition to M3 state\n");
361 wake_up_all(&mhi_cntrl
->state_event
);
366 /* Handle device Mission Mode transition */
367 static int mhi_pm_mission_mode_transition(struct mhi_controller
*mhi_cntrl
)
369 struct mhi_event
*mhi_event
;
370 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
373 dev_dbg(dev
, "Processing Mission Mode transition\n");
375 write_lock_irq(&mhi_cntrl
->pm_lock
);
376 if (MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
))
377 mhi_cntrl
->ee
= mhi_get_exec_env(mhi_cntrl
);
378 write_unlock_irq(&mhi_cntrl
->pm_lock
);
380 if (!MHI_IN_MISSION_MODE(mhi_cntrl
->ee
))
383 wake_up_all(&mhi_cntrl
->state_event
);
385 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_MISSION_MODE
);
387 /* Force MHI to be in M0 state before continuing */
388 ret
= __mhi_device_get_sync(mhi_cntrl
);
392 read_lock_bh(&mhi_cntrl
->pm_lock
);
394 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
396 goto error_mission_mode
;
399 /* Add elements to all HW event rings */
400 mhi_event
= mhi_cntrl
->mhi_event
;
401 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
402 struct mhi_ring
*ring
= &mhi_event
->ring
;
404 if (mhi_event
->offload_ev
|| !mhi_event
->hw_ring
)
407 ring
->wp
= ring
->base
+ ring
->len
- ring
->el_size
;
408 *ring
->ctxt_wp
= ring
->iommu_base
+ ring
->len
- ring
->el_size
;
409 /* Update to all cores */
412 spin_lock_irq(&mhi_event
->lock
);
413 if (MHI_DB_ACCESS_VALID(mhi_cntrl
))
414 mhi_ring_er_db(mhi_event
);
415 spin_unlock_irq(&mhi_event
->lock
);
418 read_unlock_bh(&mhi_cntrl
->pm_lock
);
421 * The MHI devices are only created when the client device switches its
422 * Execution Environment (EE) to either SBL or AMSS states
424 mhi_create_devices(mhi_cntrl
);
426 read_lock_bh(&mhi_cntrl
->pm_lock
);
429 mhi_cntrl
->wake_put(mhi_cntrl
, false);
430 read_unlock_bh(&mhi_cntrl
->pm_lock
);
435 /* Handle SYS_ERR and Shutdown transitions */
436 static void mhi_pm_disable_transition(struct mhi_controller
*mhi_cntrl
,
437 enum mhi_pm_state transition_state
)
439 enum mhi_pm_state cur_state
, prev_state
;
440 struct mhi_event
*mhi_event
;
441 struct mhi_cmd_ctxt
*cmd_ctxt
;
442 struct mhi_cmd
*mhi_cmd
;
443 struct mhi_event_ctxt
*er_ctxt
;
444 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
447 dev_dbg(dev
, "Transitioning from PM state: %s to: %s\n",
448 to_mhi_pm_state_str(mhi_cntrl
->pm_state
),
449 to_mhi_pm_state_str(transition_state
));
451 /* We must notify MHI control driver so it can clean up first */
452 if (transition_state
== MHI_PM_SYS_ERR_PROCESS
) {
454 * If controller supports RDDM, we do not process
455 * SYS error state, instead we will jump directly
458 if (mhi_cntrl
->rddm_image
) {
460 "Controller supports RDDM, so skip SYS_ERR\n");
463 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_SYS_ERROR
);
466 mutex_lock(&mhi_cntrl
->pm_mutex
);
467 write_lock_irq(&mhi_cntrl
->pm_lock
);
468 prev_state
= mhi_cntrl
->pm_state
;
469 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, transition_state
);
470 if (cur_state
== transition_state
) {
471 mhi_cntrl
->ee
= MHI_EE_DISABLE_TRANSITION
;
472 mhi_cntrl
->dev_state
= MHI_STATE_RESET
;
474 write_unlock_irq(&mhi_cntrl
->pm_lock
);
476 /* Wake up threads waiting for state transition */
477 wake_up_all(&mhi_cntrl
->state_event
);
479 if (cur_state
!= transition_state
) {
480 dev_err(dev
, "Failed to transition to state: %s from: %s\n",
481 to_mhi_pm_state_str(transition_state
),
482 to_mhi_pm_state_str(cur_state
));
483 mutex_unlock(&mhi_cntrl
->pm_mutex
);
487 /* Trigger MHI RESET so that the device will not access host memory */
488 if (MHI_REG_ACCESS_VALID(prev_state
)) {
490 unsigned long timeout
= msecs_to_jiffies(mhi_cntrl
->timeout_ms
);
492 dev_dbg(dev
, "Triggering MHI Reset in device\n");
493 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_RESET
);
495 /* Wait for the reset bit to be cleared by the device */
496 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
497 mhi_read_reg_field(mhi_cntrl
,
504 if ((!ret
|| in_reset
) && cur_state
== MHI_PM_SYS_ERR_PROCESS
) {
505 dev_err(dev
, "Device failed to exit MHI Reset state\n");
506 mutex_unlock(&mhi_cntrl
->pm_mutex
);
511 * Device will clear BHI_INTVEC as a part of RESET processing,
512 * hence re-program it
514 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_INTVEC
, 0);
518 "Waiting for all pending event ring processing to complete\n");
519 mhi_event
= mhi_cntrl
->mhi_event
;
520 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
521 if (mhi_event
->offload_ev
)
523 tasklet_kill(&mhi_event
->task
);
526 /* Release lock and wait for all pending threads to complete */
527 mutex_unlock(&mhi_cntrl
->pm_mutex
);
528 dev_dbg(dev
, "Waiting for all pending threads to complete\n");
529 wake_up_all(&mhi_cntrl
->state_event
);
530 flush_work(&mhi_cntrl
->st_worker
);
531 flush_work(&mhi_cntrl
->fw_worker
);
533 dev_dbg(dev
, "Reset all active channels and remove MHI devices\n");
534 device_for_each_child(mhi_cntrl
->cntrl_dev
, NULL
, mhi_destroy_device
);
536 mutex_lock(&mhi_cntrl
->pm_mutex
);
538 WARN_ON(atomic_read(&mhi_cntrl
->dev_wake
));
539 WARN_ON(atomic_read(&mhi_cntrl
->pending_pkts
));
541 /* Reset the ev rings and cmd rings */
542 dev_dbg(dev
, "Resetting EV CTXT and CMD CTXT\n");
543 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
544 cmd_ctxt
= mhi_cntrl
->mhi_ctxt
->cmd_ctxt
;
545 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++, cmd_ctxt
++) {
546 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
548 ring
->rp
= ring
->base
;
549 ring
->wp
= ring
->base
;
550 cmd_ctxt
->rp
= cmd_ctxt
->rbase
;
551 cmd_ctxt
->wp
= cmd_ctxt
->rbase
;
554 mhi_event
= mhi_cntrl
->mhi_event
;
555 er_ctxt
= mhi_cntrl
->mhi_ctxt
->er_ctxt
;
556 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, er_ctxt
++,
558 struct mhi_ring
*ring
= &mhi_event
->ring
;
560 /* Skip offload events */
561 if (mhi_event
->offload_ev
)
564 ring
->rp
= ring
->base
;
565 ring
->wp
= ring
->base
;
566 er_ctxt
->rp
= er_ctxt
->rbase
;
567 er_ctxt
->wp
= er_ctxt
->rbase
;
570 if (cur_state
== MHI_PM_SYS_ERR_PROCESS
) {
571 mhi_ready_state_transition(mhi_cntrl
);
573 /* Move to disable state */
574 write_lock_irq(&mhi_cntrl
->pm_lock
);
575 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_DISABLE
);
576 write_unlock_irq(&mhi_cntrl
->pm_lock
);
577 if (unlikely(cur_state
!= MHI_PM_DISABLE
))
578 dev_err(dev
, "Error moving from PM state: %s to: %s\n",
579 to_mhi_pm_state_str(cur_state
),
580 to_mhi_pm_state_str(MHI_PM_DISABLE
));
583 dev_dbg(dev
, "Exiting with PM state: %s, MHI state: %s\n",
584 to_mhi_pm_state_str(mhi_cntrl
->pm_state
),
585 TO_MHI_STATE_STR(mhi_cntrl
->dev_state
));
587 mutex_unlock(&mhi_cntrl
->pm_mutex
);
590 /* Queue a new work item and schedule work */
591 int mhi_queue_state_transition(struct mhi_controller
*mhi_cntrl
,
592 enum dev_st_transition state
)
594 struct state_transition
*item
= kmalloc(sizeof(*item
), GFP_ATOMIC
);
601 spin_lock_irqsave(&mhi_cntrl
->transition_lock
, flags
);
602 list_add_tail(&item
->node
, &mhi_cntrl
->transition_list
);
603 spin_unlock_irqrestore(&mhi_cntrl
->transition_lock
, flags
);
605 schedule_work(&mhi_cntrl
->st_worker
);
611 void mhi_pm_sys_err_worker(struct work_struct
*work
)
613 struct mhi_controller
*mhi_cntrl
= container_of(work
,
614 struct mhi_controller
,
617 mhi_pm_disable_transition(mhi_cntrl
, MHI_PM_SYS_ERR_PROCESS
);
620 /* Device State Transition worker */
621 void mhi_pm_st_worker(struct work_struct
*work
)
623 struct state_transition
*itr
, *tmp
;
625 struct mhi_controller
*mhi_cntrl
= container_of(work
,
626 struct mhi_controller
,
628 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
630 spin_lock_irq(&mhi_cntrl
->transition_lock
);
631 list_splice_tail_init(&mhi_cntrl
->transition_list
, &head
);
632 spin_unlock_irq(&mhi_cntrl
->transition_lock
);
634 list_for_each_entry_safe(itr
, tmp
, &head
, node
) {
635 list_del(&itr
->node
);
636 dev_dbg(dev
, "Handling state transition: %s\n",
637 TO_DEV_STATE_TRANS_STR(itr
->state
));
639 switch (itr
->state
) {
640 case DEV_ST_TRANSITION_PBL
:
641 write_lock_irq(&mhi_cntrl
->pm_lock
);
642 if (MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
))
643 mhi_cntrl
->ee
= mhi_get_exec_env(mhi_cntrl
);
644 write_unlock_irq(&mhi_cntrl
->pm_lock
);
645 if (MHI_IN_PBL(mhi_cntrl
->ee
))
646 wake_up_all(&mhi_cntrl
->state_event
);
648 case DEV_ST_TRANSITION_SBL
:
649 write_lock_irq(&mhi_cntrl
->pm_lock
);
650 mhi_cntrl
->ee
= MHI_EE_SBL
;
651 write_unlock_irq(&mhi_cntrl
->pm_lock
);
653 * The MHI devices are only created when the client
654 * device switches its Execution Environment (EE) to
655 * either SBL or AMSS states
657 mhi_create_devices(mhi_cntrl
);
659 case DEV_ST_TRANSITION_MISSION_MODE
:
660 mhi_pm_mission_mode_transition(mhi_cntrl
);
662 case DEV_ST_TRANSITION_READY
:
663 mhi_ready_state_transition(mhi_cntrl
);
672 int __mhi_device_get_sync(struct mhi_controller
*mhi_cntrl
)
676 /* Wake up the device */
677 read_lock_bh(&mhi_cntrl
->pm_lock
);
678 mhi_cntrl
->wake_get(mhi_cntrl
, true);
679 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
680 pm_wakeup_event(&mhi_cntrl
->mhi_dev
->dev
, 0);
681 mhi_cntrl
->runtime_get(mhi_cntrl
);
682 mhi_cntrl
->runtime_put(mhi_cntrl
);
684 read_unlock_bh(&mhi_cntrl
->pm_lock
);
686 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
687 mhi_cntrl
->pm_state
== MHI_PM_M0
||
688 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
689 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
691 if (!ret
|| MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
692 read_lock_bh(&mhi_cntrl
->pm_lock
);
693 mhi_cntrl
->wake_put(mhi_cntrl
, false);
694 read_unlock_bh(&mhi_cntrl
->pm_lock
);
701 /* Assert device wake db */
702 static void mhi_assert_dev_wake(struct mhi_controller
*mhi_cntrl
, bool force
)
707 * If force flag is set, then increment the wake count value and
710 if (unlikely(force
)) {
711 spin_lock_irqsave(&mhi_cntrl
->wlock
, flags
);
712 atomic_inc(&mhi_cntrl
->dev_wake
);
713 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl
->pm_state
) &&
714 !mhi_cntrl
->wake_set
) {
715 mhi_write_db(mhi_cntrl
, mhi_cntrl
->wake_db
, 1);
716 mhi_cntrl
->wake_set
= true;
718 spin_unlock_irqrestore(&mhi_cntrl
->wlock
, flags
);
721 * If resources are already requested, then just increment
722 * the wake count value and return
724 if (likely(atomic_add_unless(&mhi_cntrl
->dev_wake
, 1, 0)))
727 spin_lock_irqsave(&mhi_cntrl
->wlock
, flags
);
728 if ((atomic_inc_return(&mhi_cntrl
->dev_wake
) == 1) &&
729 MHI_WAKE_DB_SET_VALID(mhi_cntrl
->pm_state
) &&
730 !mhi_cntrl
->wake_set
) {
731 mhi_write_db(mhi_cntrl
, mhi_cntrl
->wake_db
, 1);
732 mhi_cntrl
->wake_set
= true;
734 spin_unlock_irqrestore(&mhi_cntrl
->wlock
, flags
);
738 /* De-assert device wake db */
739 static void mhi_deassert_dev_wake(struct mhi_controller
*mhi_cntrl
,
745 * Only continue if there is a single resource, else just decrement
748 if (likely(atomic_add_unless(&mhi_cntrl
->dev_wake
, -1, 1)))
751 spin_lock_irqsave(&mhi_cntrl
->wlock
, flags
);
752 if ((atomic_dec_return(&mhi_cntrl
->dev_wake
) == 0) &&
753 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl
->pm_state
) && !override
&&
754 mhi_cntrl
->wake_set
) {
755 mhi_write_db(mhi_cntrl
, mhi_cntrl
->wake_db
, 0);
756 mhi_cntrl
->wake_set
= false;
758 spin_unlock_irqrestore(&mhi_cntrl
->wlock
, flags
);
761 int mhi_async_power_up(struct mhi_controller
*mhi_cntrl
)
763 enum mhi_ee_type current_ee
;
764 enum dev_st_transition next_state
;
765 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
769 dev_info(dev
, "Requested to power ON\n");
771 if (mhi_cntrl
->nr_irqs
< mhi_cntrl
->total_ev_rings
)
774 /* Supply default wake routines if not provided by controller driver */
775 if (!mhi_cntrl
->wake_get
|| !mhi_cntrl
->wake_put
||
776 !mhi_cntrl
->wake_toggle
) {
777 mhi_cntrl
->wake_get
= mhi_assert_dev_wake
;
778 mhi_cntrl
->wake_put
= mhi_deassert_dev_wake
;
779 mhi_cntrl
->wake_toggle
= (mhi_cntrl
->db_access
& MHI_PM_M2
) ?
780 mhi_toggle_dev_wake_nop
: mhi_toggle_dev_wake
;
783 mutex_lock(&mhi_cntrl
->pm_mutex
);
784 mhi_cntrl
->pm_state
= MHI_PM_DISABLE
;
786 if (!mhi_cntrl
->pre_init
) {
787 /* Setup device context */
788 ret
= mhi_init_dev_ctxt(mhi_cntrl
);
793 ret
= mhi_init_irq_setup(mhi_cntrl
);
795 goto error_setup_irq
;
797 /* Setup BHI offset & INTVEC */
798 write_lock_irq(&mhi_cntrl
->pm_lock
);
799 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
, BHIOFF
, &val
);
801 write_unlock_irq(&mhi_cntrl
->pm_lock
);
802 goto error_bhi_offset
;
805 mhi_cntrl
->bhi
= mhi_cntrl
->regs
+ val
;
807 /* Setup BHIE offset */
808 if (mhi_cntrl
->fbc_download
) {
809 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
, BHIEOFF
, &val
);
811 write_unlock_irq(&mhi_cntrl
->pm_lock
);
812 dev_err(dev
, "Error reading BHIE offset\n");
813 goto error_bhi_offset
;
816 mhi_cntrl
->bhie
= mhi_cntrl
->regs
+ val
;
819 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_INTVEC
, 0);
820 mhi_cntrl
->pm_state
= MHI_PM_POR
;
821 mhi_cntrl
->ee
= MHI_EE_MAX
;
822 current_ee
= mhi_get_exec_env(mhi_cntrl
);
823 write_unlock_irq(&mhi_cntrl
->pm_lock
);
825 /* Confirm that the device is in valid exec env */
826 if (!MHI_IN_PBL(current_ee
) && current_ee
!= MHI_EE_AMSS
) {
827 dev_err(dev
, "Not a valid EE for power on\n");
829 goto error_bhi_offset
;
832 /* Transition to next state */
833 next_state
= MHI_IN_PBL(current_ee
) ?
834 DEV_ST_TRANSITION_PBL
: DEV_ST_TRANSITION_READY
;
836 if (next_state
== DEV_ST_TRANSITION_PBL
)
837 schedule_work(&mhi_cntrl
->fw_worker
);
839 mhi_queue_state_transition(mhi_cntrl
, next_state
);
841 mutex_unlock(&mhi_cntrl
->pm_mutex
);
843 dev_info(dev
, "Power on setup success\n");
848 mhi_deinit_free_irq(mhi_cntrl
);
851 if (!mhi_cntrl
->pre_init
)
852 mhi_deinit_dev_ctxt(mhi_cntrl
);
855 mutex_unlock(&mhi_cntrl
->pm_mutex
);
859 EXPORT_SYMBOL_GPL(mhi_async_power_up
);
861 void mhi_power_down(struct mhi_controller
*mhi_cntrl
, bool graceful
)
863 enum mhi_pm_state cur_state
;
864 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
866 /* If it's not a graceful shutdown, force MHI to linkdown state */
868 mutex_lock(&mhi_cntrl
->pm_mutex
);
869 write_lock_irq(&mhi_cntrl
->pm_lock
);
870 cur_state
= mhi_tryset_pm_state(mhi_cntrl
,
871 MHI_PM_LD_ERR_FATAL_DETECT
);
872 write_unlock_irq(&mhi_cntrl
->pm_lock
);
873 mutex_unlock(&mhi_cntrl
->pm_mutex
);
874 if (cur_state
!= MHI_PM_LD_ERR_FATAL_DETECT
)
875 dev_dbg(dev
, "Failed to move to state: %s from: %s\n",
876 to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT
),
877 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
879 mhi_pm_disable_transition(mhi_cntrl
, MHI_PM_SHUTDOWN_PROCESS
);
880 mhi_deinit_free_irq(mhi_cntrl
);
882 if (!mhi_cntrl
->pre_init
) {
883 /* Free all allocated resources */
884 if (mhi_cntrl
->fbc_image
) {
885 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->fbc_image
);
886 mhi_cntrl
->fbc_image
= NULL
;
888 mhi_deinit_dev_ctxt(mhi_cntrl
);
891 EXPORT_SYMBOL_GPL(mhi_power_down
);
893 int mhi_sync_power_up(struct mhi_controller
*mhi_cntrl
)
895 int ret
= mhi_async_power_up(mhi_cntrl
);
900 wait_event_timeout(mhi_cntrl
->state_event
,
901 MHI_IN_MISSION_MODE(mhi_cntrl
->ee
) ||
902 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
903 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
905 return (MHI_IN_MISSION_MODE(mhi_cntrl
->ee
)) ? 0 : -EIO
;
907 EXPORT_SYMBOL(mhi_sync_power_up
);
909 int mhi_force_rddm_mode(struct mhi_controller
*mhi_cntrl
)
911 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
914 /* Check if device is already in RDDM */
915 if (mhi_cntrl
->ee
== MHI_EE_RDDM
)
918 dev_dbg(dev
, "Triggering SYS_ERR to force RDDM state\n");
919 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_SYS_ERR
);
921 /* Wait for RDDM event */
922 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
923 mhi_cntrl
->ee
== MHI_EE_RDDM
,
924 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
925 ret
= ret
? 0 : -EIO
;
929 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode
);
931 void mhi_device_get(struct mhi_device
*mhi_dev
)
933 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
936 read_lock_bh(&mhi_cntrl
->pm_lock
);
937 mhi_cntrl
->wake_get(mhi_cntrl
, true);
938 read_unlock_bh(&mhi_cntrl
->pm_lock
);
940 EXPORT_SYMBOL_GPL(mhi_device_get
);
942 int mhi_device_get_sync(struct mhi_device
*mhi_dev
)
944 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
947 ret
= __mhi_device_get_sync(mhi_cntrl
);
953 EXPORT_SYMBOL_GPL(mhi_device_get_sync
);
955 void mhi_device_put(struct mhi_device
*mhi_dev
)
957 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
960 read_lock_bh(&mhi_cntrl
->pm_lock
);
961 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
)) {
962 mhi_cntrl
->runtime_get(mhi_cntrl
);
963 mhi_cntrl
->runtime_put(mhi_cntrl
);
966 mhi_cntrl
->wake_put(mhi_cntrl
, false);
967 read_unlock_bh(&mhi_cntrl
->pm_lock
);
969 EXPORT_SYMBOL_GPL(mhi_device_put
);