]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/bus/mhi/core/pm.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / bus / mhi / core / pm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include "internal.h"
18
19 /*
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
23 *
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
28 * L1 or L0 states.
29 *
30 * Valid transitions:
31 * L0: DISABLE <--> POR
32 * POR <--> POR
33 * POR -> M0 -> M2 --> M0
34 * POR -> FW_DL_ERR
35 * FW_DL_ERR <--> FW_DL_ERR
36 * M0 <--> M0
37 * M0 -> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> DISABLE
41 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
42 * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
43 */
44 static struct mhi_pm_transitions const dev_state_transitions[] = {
45 /* L0 States */
46 {
47 MHI_PM_DISABLE,
48 MHI_PM_POR
49 },
50 {
51 MHI_PM_POR,
52 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
53 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
54 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
55 },
56 {
57 MHI_PM_M0,
58 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
59 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
60 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
61 },
62 {
63 MHI_PM_M2,
64 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
65 MHI_PM_LD_ERR_FATAL_DETECT
66 },
67 {
68 MHI_PM_M3_ENTER,
69 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
70 MHI_PM_LD_ERR_FATAL_DETECT
71 },
72 {
73 MHI_PM_M3,
74 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
75 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
76 },
77 {
78 MHI_PM_M3_EXIT,
79 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
80 MHI_PM_LD_ERR_FATAL_DETECT
81 },
82 {
83 MHI_PM_FW_DL_ERR,
84 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
85 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
86 },
87 /* L1 States */
88 {
89 MHI_PM_SYS_ERR_DETECT,
90 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
91 MHI_PM_LD_ERR_FATAL_DETECT
92 },
93 {
94 MHI_PM_SYS_ERR_PROCESS,
95 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
96 MHI_PM_LD_ERR_FATAL_DETECT
97 },
98 /* L2 States */
99 {
100 MHI_PM_SHUTDOWN_PROCESS,
101 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
102 },
103 /* L3 States */
104 {
105 MHI_PM_LD_ERR_FATAL_DETECT,
106 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
107 },
108 };
109
110 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
111 enum mhi_pm_state state)
112 {
113 unsigned long cur_state = mhi_cntrl->pm_state;
114 int index = find_last_bit(&cur_state, 32);
115
116 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
117 return cur_state;
118
119 if (unlikely(dev_state_transitions[index].from_state != cur_state))
120 return cur_state;
121
122 if (unlikely(!(dev_state_transitions[index].to_states & state)))
123 return cur_state;
124
125 mhi_cntrl->pm_state = state;
126 return mhi_cntrl->pm_state;
127 }
128
129 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
130 {
131 if (state == MHI_STATE_RESET) {
132 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
133 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
134 } else {
135 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
136 MHICTRL_MHISTATE_MASK,
137 MHICTRL_MHISTATE_SHIFT, state);
138 }
139 }
140
141 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
142 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
143 {
144 }
145
146 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
147 {
148 mhi_cntrl->wake_get(mhi_cntrl, false);
149 mhi_cntrl->wake_put(mhi_cntrl, true);
150 }
151
152 /* Handle device ready state transition */
153 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
154 {
155 void __iomem *base = mhi_cntrl->regs;
156 struct mhi_event *mhi_event;
157 enum mhi_pm_state cur_state;
158 struct device *dev = &mhi_cntrl->mhi_dev->dev;
159 u32 reset = 1, ready = 0;
160 int ret, i;
161
162 /* Wait for RESET to be cleared and READY bit to be set by the device */
163 wait_event_timeout(mhi_cntrl->state_event,
164 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
165 mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
166 MHICTRL_RESET_MASK,
167 MHICTRL_RESET_SHIFT, &reset) ||
168 mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
169 MHISTATUS_READY_MASK,
170 MHISTATUS_READY_SHIFT, &ready) ||
171 (!reset && ready),
172 msecs_to_jiffies(mhi_cntrl->timeout_ms));
173
174 /* Check if device entered error state */
175 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
176 dev_err(dev, "Device link is not accessible\n");
177 return -EIO;
178 }
179
180 /* Timeout if device did not transition to ready state */
181 if (reset || !ready) {
182 dev_err(dev, "Device Ready timeout\n");
183 return -ETIMEDOUT;
184 }
185
186 dev_dbg(dev, "Device in READY State\n");
187 write_lock_irq(&mhi_cntrl->pm_lock);
188 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
189 mhi_cntrl->dev_state = MHI_STATE_READY;
190 write_unlock_irq(&mhi_cntrl->pm_lock);
191
192 if (cur_state != MHI_PM_POR) {
193 dev_err(dev, "Error moving to state %s from %s\n",
194 to_mhi_pm_state_str(MHI_PM_POR),
195 to_mhi_pm_state_str(cur_state));
196 return -EIO;
197 }
198
199 read_lock_bh(&mhi_cntrl->pm_lock);
200 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
201 dev_err(dev, "Device registers not accessible\n");
202 goto error_mmio;
203 }
204
205 /* Configure MMIO registers */
206 ret = mhi_init_mmio(mhi_cntrl);
207 if (ret) {
208 dev_err(dev, "Error configuring MMIO registers\n");
209 goto error_mmio;
210 }
211
212 /* Add elements to all SW event rings */
213 mhi_event = mhi_cntrl->mhi_event;
214 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
215 struct mhi_ring *ring = &mhi_event->ring;
216
217 /* Skip if this is an offload or HW event */
218 if (mhi_event->offload_ev || mhi_event->hw_ring)
219 continue;
220
221 ring->wp = ring->base + ring->len - ring->el_size;
222 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
223 /* Update all cores */
224 smp_wmb();
225
226 /* Ring the event ring db */
227 spin_lock_irq(&mhi_event->lock);
228 mhi_ring_er_db(mhi_event);
229 spin_unlock_irq(&mhi_event->lock);
230 }
231
232 /* Set MHI to M0 state */
233 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
234 read_unlock_bh(&mhi_cntrl->pm_lock);
235
236 return 0;
237
238 error_mmio:
239 read_unlock_bh(&mhi_cntrl->pm_lock);
240
241 return -EIO;
242 }
243
244 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
245 {
246 enum mhi_pm_state cur_state;
247 struct mhi_chan *mhi_chan;
248 struct device *dev = &mhi_cntrl->mhi_dev->dev;
249 int i;
250
251 write_lock_irq(&mhi_cntrl->pm_lock);
252 mhi_cntrl->dev_state = MHI_STATE_M0;
253 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
254 write_unlock_irq(&mhi_cntrl->pm_lock);
255 if (unlikely(cur_state != MHI_PM_M0)) {
256 dev_err(dev, "Unable to transition to M0 state\n");
257 return -EIO;
258 }
259
260 /* Wake up the device */
261 read_lock_bh(&mhi_cntrl->pm_lock);
262 mhi_cntrl->wake_get(mhi_cntrl, true);
263
264 /* Ring all event rings and CMD ring only if we're in mission mode */
265 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
266 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
267 struct mhi_cmd *mhi_cmd =
268 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
269
270 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
271 if (mhi_event->offload_ev)
272 continue;
273
274 spin_lock_irq(&mhi_event->lock);
275 mhi_ring_er_db(mhi_event);
276 spin_unlock_irq(&mhi_event->lock);
277 }
278
279 /* Only ring primary cmd ring if ring is not empty */
280 spin_lock_irq(&mhi_cmd->lock);
281 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
282 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
283 spin_unlock_irq(&mhi_cmd->lock);
284 }
285
286 /* Ring channel DB registers */
287 mhi_chan = mhi_cntrl->mhi_chan;
288 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
289 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
290
291 write_lock_irq(&mhi_chan->lock);
292 if (mhi_chan->db_cfg.reset_req)
293 mhi_chan->db_cfg.db_mode = true;
294
295 /* Only ring DB if ring is not empty */
296 if (tre_ring->base && tre_ring->wp != tre_ring->rp)
297 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
298 write_unlock_irq(&mhi_chan->lock);
299 }
300
301 mhi_cntrl->wake_put(mhi_cntrl, false);
302 read_unlock_bh(&mhi_cntrl->pm_lock);
303 wake_up_all(&mhi_cntrl->state_event);
304
305 return 0;
306 }
307
308 /*
309 * After receiving the MHI state change event from the device indicating the
310 * transition to M1 state, the host can transition the device to M2 state
311 * for keeping it in low power state.
312 */
313 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
314 {
315 enum mhi_pm_state state;
316 struct device *dev = &mhi_cntrl->mhi_dev->dev;
317
318 write_lock_irq(&mhi_cntrl->pm_lock);
319 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
320 if (state == MHI_PM_M2) {
321 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
322 mhi_cntrl->dev_state = MHI_STATE_M2;
323
324 write_unlock_irq(&mhi_cntrl->pm_lock);
325 wake_up_all(&mhi_cntrl->state_event);
326
327 /* If there are any pending resources, exit M2 immediately */
328 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
329 atomic_read(&mhi_cntrl->dev_wake))) {
330 dev_dbg(dev,
331 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
332 atomic_read(&mhi_cntrl->pending_pkts),
333 atomic_read(&mhi_cntrl->dev_wake));
334 read_lock_bh(&mhi_cntrl->pm_lock);
335 mhi_cntrl->wake_get(mhi_cntrl, true);
336 mhi_cntrl->wake_put(mhi_cntrl, true);
337 read_unlock_bh(&mhi_cntrl->pm_lock);
338 } else {
339 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
340 }
341 } else {
342 write_unlock_irq(&mhi_cntrl->pm_lock);
343 }
344 }
345
346 /* MHI M3 completion handler */
347 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
348 {
349 enum mhi_pm_state state;
350 struct device *dev = &mhi_cntrl->mhi_dev->dev;
351
352 write_lock_irq(&mhi_cntrl->pm_lock);
353 mhi_cntrl->dev_state = MHI_STATE_M3;
354 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
355 write_unlock_irq(&mhi_cntrl->pm_lock);
356 if (state != MHI_PM_M3) {
357 dev_err(dev, "Unable to transition to M3 state\n");
358 return -EIO;
359 }
360
361 wake_up_all(&mhi_cntrl->state_event);
362
363 return 0;
364 }
365
366 /* Handle device Mission Mode transition */
367 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
368 {
369 struct mhi_event *mhi_event;
370 struct device *dev = &mhi_cntrl->mhi_dev->dev;
371 int i, ret;
372
373 dev_dbg(dev, "Processing Mission Mode transition\n");
374
375 write_lock_irq(&mhi_cntrl->pm_lock);
376 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
377 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
378 write_unlock_irq(&mhi_cntrl->pm_lock);
379
380 if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
381 return -EIO;
382
383 wake_up_all(&mhi_cntrl->state_event);
384
385 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
386
387 /* Force MHI to be in M0 state before continuing */
388 ret = __mhi_device_get_sync(mhi_cntrl);
389 if (ret)
390 return ret;
391
392 read_lock_bh(&mhi_cntrl->pm_lock);
393
394 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
395 ret = -EIO;
396 goto error_mission_mode;
397 }
398
399 /* Add elements to all HW event rings */
400 mhi_event = mhi_cntrl->mhi_event;
401 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
402 struct mhi_ring *ring = &mhi_event->ring;
403
404 if (mhi_event->offload_ev || !mhi_event->hw_ring)
405 continue;
406
407 ring->wp = ring->base + ring->len - ring->el_size;
408 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
409 /* Update to all cores */
410 smp_wmb();
411
412 spin_lock_irq(&mhi_event->lock);
413 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
414 mhi_ring_er_db(mhi_event);
415 spin_unlock_irq(&mhi_event->lock);
416 }
417
418 read_unlock_bh(&mhi_cntrl->pm_lock);
419
420 /*
421 * The MHI devices are only created when the client device switches its
422 * Execution Environment (EE) to either SBL or AMSS states
423 */
424 mhi_create_devices(mhi_cntrl);
425
426 read_lock_bh(&mhi_cntrl->pm_lock);
427
428 error_mission_mode:
429 mhi_cntrl->wake_put(mhi_cntrl, false);
430 read_unlock_bh(&mhi_cntrl->pm_lock);
431
432 return ret;
433 }
434
435 /* Handle SYS_ERR and Shutdown transitions */
436 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
437 enum mhi_pm_state transition_state)
438 {
439 enum mhi_pm_state cur_state, prev_state;
440 struct mhi_event *mhi_event;
441 struct mhi_cmd_ctxt *cmd_ctxt;
442 struct mhi_cmd *mhi_cmd;
443 struct mhi_event_ctxt *er_ctxt;
444 struct device *dev = &mhi_cntrl->mhi_dev->dev;
445 int ret, i;
446
447 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
448 to_mhi_pm_state_str(mhi_cntrl->pm_state),
449 to_mhi_pm_state_str(transition_state));
450
451 /* We must notify MHI control driver so it can clean up first */
452 if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
453 /*
454 * If controller supports RDDM, we do not process
455 * SYS error state, instead we will jump directly
456 * to RDDM state
457 */
458 if (mhi_cntrl->rddm_image) {
459 dev_dbg(dev,
460 "Controller supports RDDM, so skip SYS_ERR\n");
461 return;
462 }
463 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
464 }
465
466 mutex_lock(&mhi_cntrl->pm_mutex);
467 write_lock_irq(&mhi_cntrl->pm_lock);
468 prev_state = mhi_cntrl->pm_state;
469 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
470 if (cur_state == transition_state) {
471 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
472 mhi_cntrl->dev_state = MHI_STATE_RESET;
473 }
474 write_unlock_irq(&mhi_cntrl->pm_lock);
475
476 /* Wake up threads waiting for state transition */
477 wake_up_all(&mhi_cntrl->state_event);
478
479 if (cur_state != transition_state) {
480 dev_err(dev, "Failed to transition to state: %s from: %s\n",
481 to_mhi_pm_state_str(transition_state),
482 to_mhi_pm_state_str(cur_state));
483 mutex_unlock(&mhi_cntrl->pm_mutex);
484 return;
485 }
486
487 /* Trigger MHI RESET so that the device will not access host memory */
488 if (MHI_REG_ACCESS_VALID(prev_state)) {
489 u32 in_reset = -1;
490 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
491
492 dev_dbg(dev, "Triggering MHI Reset in device\n");
493 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
494
495 /* Wait for the reset bit to be cleared by the device */
496 ret = wait_event_timeout(mhi_cntrl->state_event,
497 mhi_read_reg_field(mhi_cntrl,
498 mhi_cntrl->regs,
499 MHICTRL,
500 MHICTRL_RESET_MASK,
501 MHICTRL_RESET_SHIFT,
502 &in_reset) ||
503 !in_reset, timeout);
504 if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
505 dev_err(dev, "Device failed to exit MHI Reset state\n");
506 mutex_unlock(&mhi_cntrl->pm_mutex);
507 return;
508 }
509
510 /*
511 * Device will clear BHI_INTVEC as a part of RESET processing,
512 * hence re-program it
513 */
514 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
515 }
516
517 dev_dbg(dev,
518 "Waiting for all pending event ring processing to complete\n");
519 mhi_event = mhi_cntrl->mhi_event;
520 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
521 if (mhi_event->offload_ev)
522 continue;
523 tasklet_kill(&mhi_event->task);
524 }
525
526 /* Release lock and wait for all pending threads to complete */
527 mutex_unlock(&mhi_cntrl->pm_mutex);
528 dev_dbg(dev, "Waiting for all pending threads to complete\n");
529 wake_up_all(&mhi_cntrl->state_event);
530 flush_work(&mhi_cntrl->st_worker);
531 flush_work(&mhi_cntrl->fw_worker);
532
533 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
534 device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
535
536 mutex_lock(&mhi_cntrl->pm_mutex);
537
538 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
539 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
540
541 /* Reset the ev rings and cmd rings */
542 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
543 mhi_cmd = mhi_cntrl->mhi_cmd;
544 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
545 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
546 struct mhi_ring *ring = &mhi_cmd->ring;
547
548 ring->rp = ring->base;
549 ring->wp = ring->base;
550 cmd_ctxt->rp = cmd_ctxt->rbase;
551 cmd_ctxt->wp = cmd_ctxt->rbase;
552 }
553
554 mhi_event = mhi_cntrl->mhi_event;
555 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
556 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
557 mhi_event++) {
558 struct mhi_ring *ring = &mhi_event->ring;
559
560 /* Skip offload events */
561 if (mhi_event->offload_ev)
562 continue;
563
564 ring->rp = ring->base;
565 ring->wp = ring->base;
566 er_ctxt->rp = er_ctxt->rbase;
567 er_ctxt->wp = er_ctxt->rbase;
568 }
569
570 if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
571 mhi_ready_state_transition(mhi_cntrl);
572 } else {
573 /* Move to disable state */
574 write_lock_irq(&mhi_cntrl->pm_lock);
575 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
576 write_unlock_irq(&mhi_cntrl->pm_lock);
577 if (unlikely(cur_state != MHI_PM_DISABLE))
578 dev_err(dev, "Error moving from PM state: %s to: %s\n",
579 to_mhi_pm_state_str(cur_state),
580 to_mhi_pm_state_str(MHI_PM_DISABLE));
581 }
582
583 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
584 to_mhi_pm_state_str(mhi_cntrl->pm_state),
585 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
586
587 mutex_unlock(&mhi_cntrl->pm_mutex);
588 }
589
590 /* Queue a new work item and schedule work */
591 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
592 enum dev_st_transition state)
593 {
594 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
595 unsigned long flags;
596
597 if (!item)
598 return -ENOMEM;
599
600 item->state = state;
601 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
602 list_add_tail(&item->node, &mhi_cntrl->transition_list);
603 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
604
605 schedule_work(&mhi_cntrl->st_worker);
606
607 return 0;
608 }
609
610 /* SYS_ERR worker */
611 void mhi_pm_sys_err_worker(struct work_struct *work)
612 {
613 struct mhi_controller *mhi_cntrl = container_of(work,
614 struct mhi_controller,
615 syserr_worker);
616
617 mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
618 }
619
620 /* Device State Transition worker */
621 void mhi_pm_st_worker(struct work_struct *work)
622 {
623 struct state_transition *itr, *tmp;
624 LIST_HEAD(head);
625 struct mhi_controller *mhi_cntrl = container_of(work,
626 struct mhi_controller,
627 st_worker);
628 struct device *dev = &mhi_cntrl->mhi_dev->dev;
629
630 spin_lock_irq(&mhi_cntrl->transition_lock);
631 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
632 spin_unlock_irq(&mhi_cntrl->transition_lock);
633
634 list_for_each_entry_safe(itr, tmp, &head, node) {
635 list_del(&itr->node);
636 dev_dbg(dev, "Handling state transition: %s\n",
637 TO_DEV_STATE_TRANS_STR(itr->state));
638
639 switch (itr->state) {
640 case DEV_ST_TRANSITION_PBL:
641 write_lock_irq(&mhi_cntrl->pm_lock);
642 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
643 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
644 write_unlock_irq(&mhi_cntrl->pm_lock);
645 if (MHI_IN_PBL(mhi_cntrl->ee))
646 wake_up_all(&mhi_cntrl->state_event);
647 break;
648 case DEV_ST_TRANSITION_SBL:
649 write_lock_irq(&mhi_cntrl->pm_lock);
650 mhi_cntrl->ee = MHI_EE_SBL;
651 write_unlock_irq(&mhi_cntrl->pm_lock);
652 /*
653 * The MHI devices are only created when the client
654 * device switches its Execution Environment (EE) to
655 * either SBL or AMSS states
656 */
657 mhi_create_devices(mhi_cntrl);
658 break;
659 case DEV_ST_TRANSITION_MISSION_MODE:
660 mhi_pm_mission_mode_transition(mhi_cntrl);
661 break;
662 case DEV_ST_TRANSITION_READY:
663 mhi_ready_state_transition(mhi_cntrl);
664 break;
665 default:
666 break;
667 }
668 kfree(itr);
669 }
670 }
671
672 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
673 {
674 int ret;
675
676 /* Wake up the device */
677 read_lock_bh(&mhi_cntrl->pm_lock);
678 mhi_cntrl->wake_get(mhi_cntrl, true);
679 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
680 pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
681 mhi_cntrl->runtime_get(mhi_cntrl);
682 mhi_cntrl->runtime_put(mhi_cntrl);
683 }
684 read_unlock_bh(&mhi_cntrl->pm_lock);
685
686 ret = wait_event_timeout(mhi_cntrl->state_event,
687 mhi_cntrl->pm_state == MHI_PM_M0 ||
688 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
689 msecs_to_jiffies(mhi_cntrl->timeout_ms));
690
691 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
692 read_lock_bh(&mhi_cntrl->pm_lock);
693 mhi_cntrl->wake_put(mhi_cntrl, false);
694 read_unlock_bh(&mhi_cntrl->pm_lock);
695 return -EIO;
696 }
697
698 return 0;
699 }
700
701 /* Assert device wake db */
702 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
703 {
704 unsigned long flags;
705
706 /*
707 * If force flag is set, then increment the wake count value and
708 * ring wake db
709 */
710 if (unlikely(force)) {
711 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
712 atomic_inc(&mhi_cntrl->dev_wake);
713 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
714 !mhi_cntrl->wake_set) {
715 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
716 mhi_cntrl->wake_set = true;
717 }
718 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
719 } else {
720 /*
721 * If resources are already requested, then just increment
722 * the wake count value and return
723 */
724 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
725 return;
726
727 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
728 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
729 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
730 !mhi_cntrl->wake_set) {
731 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
732 mhi_cntrl->wake_set = true;
733 }
734 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
735 }
736 }
737
738 /* De-assert device wake db */
739 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
740 bool override)
741 {
742 unsigned long flags;
743
744 /*
745 * Only continue if there is a single resource, else just decrement
746 * and return
747 */
748 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
749 return;
750
751 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
752 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
753 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
754 mhi_cntrl->wake_set) {
755 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
756 mhi_cntrl->wake_set = false;
757 }
758 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
759 }
760
761 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
762 {
763 enum mhi_ee_type current_ee;
764 enum dev_st_transition next_state;
765 struct device *dev = &mhi_cntrl->mhi_dev->dev;
766 u32 val;
767 int ret;
768
769 dev_info(dev, "Requested to power ON\n");
770
771 if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings)
772 return -EINVAL;
773
774 /* Supply default wake routines if not provided by controller driver */
775 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
776 !mhi_cntrl->wake_toggle) {
777 mhi_cntrl->wake_get = mhi_assert_dev_wake;
778 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
779 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
780 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
781 }
782
783 mutex_lock(&mhi_cntrl->pm_mutex);
784 mhi_cntrl->pm_state = MHI_PM_DISABLE;
785
786 if (!mhi_cntrl->pre_init) {
787 /* Setup device context */
788 ret = mhi_init_dev_ctxt(mhi_cntrl);
789 if (ret)
790 goto error_dev_ctxt;
791 }
792
793 ret = mhi_init_irq_setup(mhi_cntrl);
794 if (ret)
795 goto error_setup_irq;
796
797 /* Setup BHI offset & INTVEC */
798 write_lock_irq(&mhi_cntrl->pm_lock);
799 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
800 if (ret) {
801 write_unlock_irq(&mhi_cntrl->pm_lock);
802 goto error_bhi_offset;
803 }
804
805 mhi_cntrl->bhi = mhi_cntrl->regs + val;
806
807 /* Setup BHIE offset */
808 if (mhi_cntrl->fbc_download) {
809 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
810 if (ret) {
811 write_unlock_irq(&mhi_cntrl->pm_lock);
812 dev_err(dev, "Error reading BHIE offset\n");
813 goto error_bhi_offset;
814 }
815
816 mhi_cntrl->bhie = mhi_cntrl->regs + val;
817 }
818
819 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
820 mhi_cntrl->pm_state = MHI_PM_POR;
821 mhi_cntrl->ee = MHI_EE_MAX;
822 current_ee = mhi_get_exec_env(mhi_cntrl);
823 write_unlock_irq(&mhi_cntrl->pm_lock);
824
825 /* Confirm that the device is in valid exec env */
826 if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
827 dev_err(dev, "Not a valid EE for power on\n");
828 ret = -EIO;
829 goto error_bhi_offset;
830 }
831
832 /* Transition to next state */
833 next_state = MHI_IN_PBL(current_ee) ?
834 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
835
836 if (next_state == DEV_ST_TRANSITION_PBL)
837 schedule_work(&mhi_cntrl->fw_worker);
838
839 mhi_queue_state_transition(mhi_cntrl, next_state);
840
841 mutex_unlock(&mhi_cntrl->pm_mutex);
842
843 dev_info(dev, "Power on setup success\n");
844
845 return 0;
846
847 error_bhi_offset:
848 mhi_deinit_free_irq(mhi_cntrl);
849
850 error_setup_irq:
851 if (!mhi_cntrl->pre_init)
852 mhi_deinit_dev_ctxt(mhi_cntrl);
853
854 error_dev_ctxt:
855 mutex_unlock(&mhi_cntrl->pm_mutex);
856
857 return ret;
858 }
859 EXPORT_SYMBOL_GPL(mhi_async_power_up);
860
861 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
862 {
863 enum mhi_pm_state cur_state;
864 struct device *dev = &mhi_cntrl->mhi_dev->dev;
865
866 /* If it's not a graceful shutdown, force MHI to linkdown state */
867 if (!graceful) {
868 mutex_lock(&mhi_cntrl->pm_mutex);
869 write_lock_irq(&mhi_cntrl->pm_lock);
870 cur_state = mhi_tryset_pm_state(mhi_cntrl,
871 MHI_PM_LD_ERR_FATAL_DETECT);
872 write_unlock_irq(&mhi_cntrl->pm_lock);
873 mutex_unlock(&mhi_cntrl->pm_mutex);
874 if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
875 dev_dbg(dev, "Failed to move to state: %s from: %s\n",
876 to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
877 to_mhi_pm_state_str(mhi_cntrl->pm_state));
878 }
879 mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
880 mhi_deinit_free_irq(mhi_cntrl);
881
882 if (!mhi_cntrl->pre_init) {
883 /* Free all allocated resources */
884 if (mhi_cntrl->fbc_image) {
885 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
886 mhi_cntrl->fbc_image = NULL;
887 }
888 mhi_deinit_dev_ctxt(mhi_cntrl);
889 }
890 }
891 EXPORT_SYMBOL_GPL(mhi_power_down);
892
893 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
894 {
895 int ret = mhi_async_power_up(mhi_cntrl);
896
897 if (ret)
898 return ret;
899
900 wait_event_timeout(mhi_cntrl->state_event,
901 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
902 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
903 msecs_to_jiffies(mhi_cntrl->timeout_ms));
904
905 return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
906 }
907 EXPORT_SYMBOL(mhi_sync_power_up);
908
909 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
910 {
911 struct device *dev = &mhi_cntrl->mhi_dev->dev;
912 int ret;
913
914 /* Check if device is already in RDDM */
915 if (mhi_cntrl->ee == MHI_EE_RDDM)
916 return 0;
917
918 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
919 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
920
921 /* Wait for RDDM event */
922 ret = wait_event_timeout(mhi_cntrl->state_event,
923 mhi_cntrl->ee == MHI_EE_RDDM,
924 msecs_to_jiffies(mhi_cntrl->timeout_ms));
925 ret = ret ? 0 : -EIO;
926
927 return ret;
928 }
929 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
930
931 void mhi_device_get(struct mhi_device *mhi_dev)
932 {
933 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
934
935 mhi_dev->dev_wake++;
936 read_lock_bh(&mhi_cntrl->pm_lock);
937 mhi_cntrl->wake_get(mhi_cntrl, true);
938 read_unlock_bh(&mhi_cntrl->pm_lock);
939 }
940 EXPORT_SYMBOL_GPL(mhi_device_get);
941
942 int mhi_device_get_sync(struct mhi_device *mhi_dev)
943 {
944 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
945 int ret;
946
947 ret = __mhi_device_get_sync(mhi_cntrl);
948 if (!ret)
949 mhi_dev->dev_wake++;
950
951 return ret;
952 }
953 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
954
955 void mhi_device_put(struct mhi_device *mhi_dev)
956 {
957 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
958
959 mhi_dev->dev_wake--;
960 read_lock_bh(&mhi_cntrl->pm_lock);
961 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
962 mhi_cntrl->runtime_get(mhi_cntrl);
963 mhi_cntrl->runtime_put(mhi_cntrl);
964 }
965
966 mhi_cntrl->wake_put(mhi_cntrl, false);
967 read_unlock_bh(&mhi_cntrl->pm_lock);
968 }
969 EXPORT_SYMBOL_GPL(mhi_device_put);