]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/bus/mhi/core/main.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / bus / mhi / core / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include "internal.h"
17
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19 void __iomem *base, u32 offset, u32 *out)
20 {
21 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
22 }
23
24 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
25 void __iomem *base, u32 offset,
26 u32 mask, u32 shift, u32 *out)
27 {
28 u32 tmp;
29 int ret;
30
31 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
32 if (ret)
33 return ret;
34
35 *out = (tmp & mask) >> shift;
36
37 return 0;
38 }
39
40 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
41 u32 offset, u32 val)
42 {
43 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
44 }
45
46 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
47 u32 offset, u32 mask, u32 shift, u32 val)
48 {
49 int ret;
50 u32 tmp;
51
52 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
53 if (ret)
54 return;
55
56 tmp &= ~mask;
57 tmp |= (val << shift);
58 mhi_write_reg(mhi_cntrl, base, offset, tmp);
59 }
60
61 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
62 dma_addr_t db_val)
63 {
64 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
65 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
66 }
67
68 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
69 struct db_cfg *db_cfg,
70 void __iomem *db_addr,
71 dma_addr_t db_val)
72 {
73 if (db_cfg->db_mode) {
74 db_cfg->db_val = db_val;
75 mhi_write_db(mhi_cntrl, db_addr, db_val);
76 db_cfg->db_mode = 0;
77 }
78 }
79
80 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
81 struct db_cfg *db_cfg,
82 void __iomem *db_addr,
83 dma_addr_t db_val)
84 {
85 db_cfg->db_val = db_val;
86 mhi_write_db(mhi_cntrl, db_addr, db_val);
87 }
88
89 void mhi_ring_er_db(struct mhi_event *mhi_event)
90 {
91 struct mhi_ring *ring = &mhi_event->ring;
92
93 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
94 ring->db_addr, *ring->ctxt_wp);
95 }
96
97 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
98 {
99 dma_addr_t db;
100 struct mhi_ring *ring = &mhi_cmd->ring;
101
102 db = ring->iommu_base + (ring->wp - ring->base);
103 *ring->ctxt_wp = db;
104 mhi_write_db(mhi_cntrl, ring->db_addr, db);
105 }
106
107 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
108 struct mhi_chan *mhi_chan)
109 {
110 struct mhi_ring *ring = &mhi_chan->tre_ring;
111 dma_addr_t db;
112
113 db = ring->iommu_base + (ring->wp - ring->base);
114 *ring->ctxt_wp = db;
115 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
116 ring->db_addr, db);
117 }
118
119 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
120 {
121 u32 exec;
122 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
123
124 return (ret) ? MHI_EE_MAX : exec;
125 }
126
127 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
128 {
129 u32 state;
130 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
131 MHISTATUS_MHISTATE_MASK,
132 MHISTATUS_MHISTATE_SHIFT, &state);
133 return ret ? MHI_STATE_MAX : state;
134 }
135
136 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
137 struct mhi_buf_info *buf_info)
138 {
139 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
140 buf_info->v_addr, buf_info->len,
141 buf_info->dir);
142 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
143 return -ENOMEM;
144
145 return 0;
146 }
147
148 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
149 struct mhi_buf_info *buf_info)
150 {
151 void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
152 &buf_info->p_addr, GFP_ATOMIC);
153
154 if (!buf)
155 return -ENOMEM;
156
157 if (buf_info->dir == DMA_TO_DEVICE)
158 memcpy(buf, buf_info->v_addr, buf_info->len);
159
160 buf_info->bb_addr = buf;
161
162 return 0;
163 }
164
165 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
166 struct mhi_buf_info *buf_info)
167 {
168 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
169 buf_info->dir);
170 }
171
172 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
173 struct mhi_buf_info *buf_info)
174 {
175 if (buf_info->dir == DMA_FROM_DEVICE)
176 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
177
178 mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
179 buf_info->p_addr);
180 }
181
182 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
183 struct mhi_ring *ring)
184 {
185 int nr_el;
186
187 if (ring->wp < ring->rp) {
188 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
189 } else {
190 nr_el = (ring->rp - ring->base) / ring->el_size;
191 nr_el += ((ring->base + ring->len - ring->wp) /
192 ring->el_size) - 1;
193 }
194
195 return nr_el;
196 }
197
198 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
199 {
200 return (addr - ring->iommu_base) + ring->base;
201 }
202
203 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
204 struct mhi_ring *ring)
205 {
206 ring->wp += ring->el_size;
207 if (ring->wp >= (ring->base + ring->len))
208 ring->wp = ring->base;
209 /* smp update */
210 smp_wmb();
211 }
212
213 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
214 struct mhi_ring *ring)
215 {
216 ring->rp += ring->el_size;
217 if (ring->rp >= (ring->base + ring->len))
218 ring->rp = ring->base;
219 /* smp update */
220 smp_wmb();
221 }
222
223 int mhi_destroy_device(struct device *dev, void *data)
224 {
225 struct mhi_device *mhi_dev;
226 struct mhi_controller *mhi_cntrl;
227
228 if (dev->bus != &mhi_bus_type)
229 return 0;
230
231 mhi_dev = to_mhi_device(dev);
232 mhi_cntrl = mhi_dev->mhi_cntrl;
233
234 /* Only destroy virtual devices thats attached to bus */
235 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
236 return 0;
237
238 /*
239 * For the suspend and resume case, this function will get called
240 * without mhi_unregister_controller(). Hence, we need to drop the
241 * references to mhi_dev created for ul and dl channels. We can
242 * be sure that there will be no instances of mhi_dev left after
243 * this.
244 */
245 if (mhi_dev->ul_chan)
246 put_device(&mhi_dev->ul_chan->mhi_dev->dev);
247
248 if (mhi_dev->dl_chan)
249 put_device(&mhi_dev->dl_chan->mhi_dev->dev);
250
251 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
252 mhi_dev->chan_name);
253
254 /* Notify the client and remove the device from MHI bus */
255 device_del(dev);
256 put_device(dev);
257
258 return 0;
259 }
260
261 static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
262 {
263 struct mhi_driver *mhi_drv;
264
265 if (!mhi_dev->dev.driver)
266 return;
267
268 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
269
270 if (mhi_drv->status_cb)
271 mhi_drv->status_cb(mhi_dev, cb_reason);
272 }
273
274 /* Bind MHI channels to MHI devices */
275 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
276 {
277 struct mhi_chan *mhi_chan;
278 struct mhi_device *mhi_dev;
279 struct device *dev = &mhi_cntrl->mhi_dev->dev;
280 int i, ret;
281
282 mhi_chan = mhi_cntrl->mhi_chan;
283 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
284 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
285 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
286 continue;
287 mhi_dev = mhi_alloc_device(mhi_cntrl);
288 if (IS_ERR(mhi_dev))
289 return;
290
291 mhi_dev->dev_type = MHI_DEVICE_XFER;
292 switch (mhi_chan->dir) {
293 case DMA_TO_DEVICE:
294 mhi_dev->ul_chan = mhi_chan;
295 mhi_dev->ul_chan_id = mhi_chan->chan;
296 break;
297 case DMA_FROM_DEVICE:
298 /* We use dl_chan as offload channels */
299 mhi_dev->dl_chan = mhi_chan;
300 mhi_dev->dl_chan_id = mhi_chan->chan;
301 break;
302 default:
303 dev_err(dev, "Direction not supported\n");
304 put_device(&mhi_dev->dev);
305 return;
306 }
307
308 get_device(&mhi_dev->dev);
309 mhi_chan->mhi_dev = mhi_dev;
310
311 /* Check next channel if it matches */
312 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
313 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
314 i++;
315 mhi_chan++;
316 if (mhi_chan->dir == DMA_TO_DEVICE) {
317 mhi_dev->ul_chan = mhi_chan;
318 mhi_dev->ul_chan_id = mhi_chan->chan;
319 } else {
320 mhi_dev->dl_chan = mhi_chan;
321 mhi_dev->dl_chan_id = mhi_chan->chan;
322 }
323 get_device(&mhi_dev->dev);
324 mhi_chan->mhi_dev = mhi_dev;
325 }
326 }
327
328 /* Channel name is same for both UL and DL */
329 mhi_dev->chan_name = mhi_chan->name;
330 dev_set_name(&mhi_dev->dev, "%s_%s",
331 dev_name(mhi_cntrl->cntrl_dev),
332 mhi_dev->chan_name);
333
334 /* Init wakeup source if available */
335 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
336 device_init_wakeup(&mhi_dev->dev, true);
337
338 ret = device_add(&mhi_dev->dev);
339 if (ret)
340 put_device(&mhi_dev->dev);
341 }
342 }
343
344 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
345 {
346 struct mhi_event *mhi_event = dev;
347 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
348 struct mhi_event_ctxt *er_ctxt =
349 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
350 struct mhi_ring *ev_ring = &mhi_event->ring;
351 void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
352
353 /* Only proceed if event ring has pending events */
354 if (ev_ring->rp == dev_rp)
355 return IRQ_HANDLED;
356
357 /* For client managed event ring, notify pending data */
358 if (mhi_event->cl_manage) {
359 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
360 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
361
362 if (mhi_dev)
363 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
364 } else {
365 tasklet_schedule(&mhi_event->task);
366 }
367
368 return IRQ_HANDLED;
369 }
370
371 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
372 {
373 struct mhi_controller *mhi_cntrl = dev;
374 enum mhi_state state = MHI_STATE_MAX;
375 enum mhi_pm_state pm_state = 0;
376 enum mhi_ee_type ee = 0;
377
378 write_lock_irq(&mhi_cntrl->pm_lock);
379 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
380 state = mhi_get_mhi_state(mhi_cntrl);
381 ee = mhi_cntrl->ee;
382 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
383 }
384
385 if (state == MHI_STATE_SYS_ERR) {
386 dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
387 pm_state = mhi_tryset_pm_state(mhi_cntrl,
388 MHI_PM_SYS_ERR_DETECT);
389 }
390 write_unlock_irq(&mhi_cntrl->pm_lock);
391
392 /* If device in RDDM don't bother processing SYS error */
393 if (mhi_cntrl->ee == MHI_EE_RDDM) {
394 if (mhi_cntrl->ee != ee) {
395 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
396 wake_up_all(&mhi_cntrl->state_event);
397 }
398 goto exit_intvec;
399 }
400
401 if (pm_state == MHI_PM_SYS_ERR_DETECT) {
402 wake_up_all(&mhi_cntrl->state_event);
403
404 /* For fatal errors, we let controller decide next step */
405 if (MHI_IN_PBL(ee))
406 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
407 else
408 schedule_work(&mhi_cntrl->syserr_worker);
409 }
410
411 exit_intvec:
412
413 return IRQ_HANDLED;
414 }
415
416 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
417 {
418 struct mhi_controller *mhi_cntrl = dev;
419
420 /* Wake up events waiting for state change */
421 wake_up_all(&mhi_cntrl->state_event);
422
423 return IRQ_WAKE_THREAD;
424 }
425
426 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
427 struct mhi_ring *ring)
428 {
429 dma_addr_t ctxt_wp;
430
431 /* Update the WP */
432 ring->wp += ring->el_size;
433 ctxt_wp = *ring->ctxt_wp + ring->el_size;
434
435 if (ring->wp >= (ring->base + ring->len)) {
436 ring->wp = ring->base;
437 ctxt_wp = ring->iommu_base;
438 }
439
440 *ring->ctxt_wp = ctxt_wp;
441
442 /* Update the RP */
443 ring->rp += ring->el_size;
444 if (ring->rp >= (ring->base + ring->len))
445 ring->rp = ring->base;
446
447 /* Update to all cores */
448 smp_wmb();
449 }
450
451 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
452 struct mhi_tre *event,
453 struct mhi_chan *mhi_chan)
454 {
455 struct mhi_ring *buf_ring, *tre_ring;
456 struct device *dev = &mhi_cntrl->mhi_dev->dev;
457 struct mhi_result result;
458 unsigned long flags = 0;
459 u32 ev_code;
460
461 ev_code = MHI_TRE_GET_EV_CODE(event);
462 buf_ring = &mhi_chan->buf_ring;
463 tre_ring = &mhi_chan->tre_ring;
464
465 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
466 -EOVERFLOW : 0;
467
468 /*
469 * If it's a DB Event then we need to grab the lock
470 * with preemption disabled and as a write because we
471 * have to update db register and there are chances that
472 * another thread could be doing the same.
473 */
474 if (ev_code >= MHI_EV_CC_OOB)
475 write_lock_irqsave(&mhi_chan->lock, flags);
476 else
477 read_lock_bh(&mhi_chan->lock);
478
479 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
480 goto end_process_tx_event;
481
482 switch (ev_code) {
483 case MHI_EV_CC_OVERFLOW:
484 case MHI_EV_CC_EOB:
485 case MHI_EV_CC_EOT:
486 {
487 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
488 struct mhi_tre *local_rp, *ev_tre;
489 void *dev_rp;
490 struct mhi_buf_info *buf_info;
491 u16 xfer_len;
492
493 /* Get the TRB this event points to */
494 ev_tre = mhi_to_virtual(tre_ring, ptr);
495
496 dev_rp = ev_tre + 1;
497 if (dev_rp >= (tre_ring->base + tre_ring->len))
498 dev_rp = tre_ring->base;
499
500 result.dir = mhi_chan->dir;
501
502 local_rp = tre_ring->rp;
503 while (local_rp != dev_rp) {
504 buf_info = buf_ring->rp;
505 /* If it's the last TRE, get length from the event */
506 if (local_rp == ev_tre)
507 xfer_len = MHI_TRE_GET_EV_LEN(event);
508 else
509 xfer_len = buf_info->len;
510
511 /* Unmap if it's not pre-mapped by client */
512 if (likely(!buf_info->pre_mapped))
513 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
514
515 result.buf_addr = buf_info->cb_buf;
516 result.bytes_xferd = xfer_len;
517 mhi_del_ring_element(mhi_cntrl, buf_ring);
518 mhi_del_ring_element(mhi_cntrl, tre_ring);
519 local_rp = tre_ring->rp;
520
521 /* notify client */
522 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
523
524 if (mhi_chan->dir == DMA_TO_DEVICE)
525 atomic_dec(&mhi_cntrl->pending_pkts);
526
527 /*
528 * Recycle the buffer if buffer is pre-allocated,
529 * if there is an error, not much we can do apart
530 * from dropping the packet
531 */
532 if (mhi_chan->pre_alloc) {
533 if (mhi_queue_buf(mhi_chan->mhi_dev,
534 mhi_chan->dir,
535 buf_info->cb_buf,
536 buf_info->len, MHI_EOT)) {
537 dev_err(dev,
538 "Error recycling buffer for chan:%d\n",
539 mhi_chan->chan);
540 kfree(buf_info->cb_buf);
541 }
542 }
543 }
544 break;
545 } /* CC_EOT */
546 case MHI_EV_CC_OOB:
547 case MHI_EV_CC_DB_MODE:
548 {
549 unsigned long flags;
550
551 mhi_chan->db_cfg.db_mode = 1;
552 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
553 if (tre_ring->wp != tre_ring->rp &&
554 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
555 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
556 }
557 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
558 break;
559 }
560 case MHI_EV_CC_BAD_TRE:
561 default:
562 dev_err(dev, "Unknown event 0x%x\n", ev_code);
563 break;
564 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
565
566 end_process_tx_event:
567 if (ev_code >= MHI_EV_CC_OOB)
568 write_unlock_irqrestore(&mhi_chan->lock, flags);
569 else
570 read_unlock_bh(&mhi_chan->lock);
571
572 return 0;
573 }
574
575 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
576 struct mhi_tre *event,
577 struct mhi_chan *mhi_chan)
578 {
579 struct mhi_ring *buf_ring, *tre_ring;
580 struct mhi_buf_info *buf_info;
581 struct mhi_result result;
582 int ev_code;
583 u32 cookie; /* offset to local descriptor */
584 u16 xfer_len;
585
586 buf_ring = &mhi_chan->buf_ring;
587 tre_ring = &mhi_chan->tre_ring;
588
589 ev_code = MHI_TRE_GET_EV_CODE(event);
590 cookie = MHI_TRE_GET_EV_COOKIE(event);
591 xfer_len = MHI_TRE_GET_EV_LEN(event);
592
593 /* Received out of bound cookie */
594 WARN_ON(cookie >= buf_ring->len);
595
596 buf_info = buf_ring->base + cookie;
597
598 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
599 -EOVERFLOW : 0;
600 result.bytes_xferd = xfer_len;
601 result.buf_addr = buf_info->cb_buf;
602 result.dir = mhi_chan->dir;
603
604 read_lock_bh(&mhi_chan->lock);
605
606 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
607 goto end_process_rsc_event;
608
609 WARN_ON(!buf_info->used);
610
611 /* notify the client */
612 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
613
614 /*
615 * Note: We're arbitrarily incrementing RP even though, completion
616 * packet we processed might not be the same one, reason we can do this
617 * is because device guaranteed to cache descriptors in order it
618 * receive, so even though completion event is different we can re-use
619 * all descriptors in between.
620 * Example:
621 * Transfer Ring has descriptors: A, B, C, D
622 * Last descriptor host queue is D (WP) and first descriptor
623 * host queue is A (RP).
624 * The completion event we just serviced is descriptor C.
625 * Then we can safely queue descriptors to replace A, B, and C
626 * even though host did not receive any completions.
627 */
628 mhi_del_ring_element(mhi_cntrl, tre_ring);
629 buf_info->used = false;
630
631 end_process_rsc_event:
632 read_unlock_bh(&mhi_chan->lock);
633
634 return 0;
635 }
636
637 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
638 struct mhi_tre *tre)
639 {
640 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
641 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
642 struct mhi_ring *mhi_ring = &cmd_ring->ring;
643 struct mhi_tre *cmd_pkt;
644 struct mhi_chan *mhi_chan;
645 u32 chan;
646
647 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
648
649 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
650 mhi_chan = &mhi_cntrl->mhi_chan[chan];
651 write_lock_bh(&mhi_chan->lock);
652 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
653 complete(&mhi_chan->completion);
654 write_unlock_bh(&mhi_chan->lock);
655
656 mhi_del_ring_element(mhi_cntrl, mhi_ring);
657 }
658
659 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
660 struct mhi_event *mhi_event,
661 u32 event_quota)
662 {
663 struct mhi_tre *dev_rp, *local_rp;
664 struct mhi_ring *ev_ring = &mhi_event->ring;
665 struct mhi_event_ctxt *er_ctxt =
666 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
667 struct mhi_chan *mhi_chan;
668 struct device *dev = &mhi_cntrl->mhi_dev->dev;
669 u32 chan;
670 int count = 0;
671
672 /*
673 * This is a quick check to avoid unnecessary event processing
674 * in case MHI is already in error state, but it's still possible
675 * to transition to error state while processing events
676 */
677 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
678 return -EIO;
679
680 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
681 local_rp = ev_ring->rp;
682
683 while (dev_rp != local_rp) {
684 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
685
686 switch (type) {
687 case MHI_PKT_TYPE_BW_REQ_EVENT:
688 {
689 struct mhi_link_info *link_info;
690
691 link_info = &mhi_cntrl->mhi_link_info;
692 write_lock_irq(&mhi_cntrl->pm_lock);
693 link_info->target_link_speed =
694 MHI_TRE_GET_EV_LINKSPEED(local_rp);
695 link_info->target_link_width =
696 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
697 write_unlock_irq(&mhi_cntrl->pm_lock);
698 dev_dbg(dev, "Received BW_REQ event\n");
699 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
700 break;
701 }
702 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
703 {
704 enum mhi_state new_state;
705
706 new_state = MHI_TRE_GET_EV_STATE(local_rp);
707
708 dev_dbg(dev, "State change event to state: %s\n",
709 TO_MHI_STATE_STR(new_state));
710
711 switch (new_state) {
712 case MHI_STATE_M0:
713 mhi_pm_m0_transition(mhi_cntrl);
714 break;
715 case MHI_STATE_M1:
716 mhi_pm_m1_transition(mhi_cntrl);
717 break;
718 case MHI_STATE_M3:
719 mhi_pm_m3_transition(mhi_cntrl);
720 break;
721 case MHI_STATE_SYS_ERR:
722 {
723 enum mhi_pm_state new_state;
724
725 dev_dbg(dev, "System error detected\n");
726 write_lock_irq(&mhi_cntrl->pm_lock);
727 new_state = mhi_tryset_pm_state(mhi_cntrl,
728 MHI_PM_SYS_ERR_DETECT);
729 write_unlock_irq(&mhi_cntrl->pm_lock);
730 if (new_state == MHI_PM_SYS_ERR_DETECT)
731 schedule_work(&mhi_cntrl->syserr_worker);
732 break;
733 }
734 default:
735 dev_err(dev, "Invalid state: %s\n",
736 TO_MHI_STATE_STR(new_state));
737 }
738
739 break;
740 }
741 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
742 mhi_process_cmd_completion(mhi_cntrl, local_rp);
743 break;
744 case MHI_PKT_TYPE_EE_EVENT:
745 {
746 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
747 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
748
749 dev_dbg(dev, "Received EE event: %s\n",
750 TO_MHI_EXEC_STR(event));
751 switch (event) {
752 case MHI_EE_SBL:
753 st = DEV_ST_TRANSITION_SBL;
754 break;
755 case MHI_EE_WFW:
756 case MHI_EE_AMSS:
757 st = DEV_ST_TRANSITION_MISSION_MODE;
758 break;
759 case MHI_EE_RDDM:
760 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
761 write_lock_irq(&mhi_cntrl->pm_lock);
762 mhi_cntrl->ee = event;
763 write_unlock_irq(&mhi_cntrl->pm_lock);
764 wake_up_all(&mhi_cntrl->state_event);
765 break;
766 default:
767 dev_err(dev,
768 "Unhandled EE event: 0x%x\n", type);
769 }
770 if (st != DEV_ST_TRANSITION_MAX)
771 mhi_queue_state_transition(mhi_cntrl, st);
772
773 break;
774 }
775 case MHI_PKT_TYPE_TX_EVENT:
776 chan = MHI_TRE_GET_EV_CHID(local_rp);
777 mhi_chan = &mhi_cntrl->mhi_chan[chan];
778 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
779 event_quota--;
780 break;
781 default:
782 dev_err(dev, "Unhandled event type: %d\n", type);
783 break;
784 }
785
786 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
787 local_rp = ev_ring->rp;
788 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
789 count++;
790 }
791
792 read_lock_bh(&mhi_cntrl->pm_lock);
793 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
794 mhi_ring_er_db(mhi_event);
795 read_unlock_bh(&mhi_cntrl->pm_lock);
796
797 return count;
798 }
799
800 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
801 struct mhi_event *mhi_event,
802 u32 event_quota)
803 {
804 struct mhi_tre *dev_rp, *local_rp;
805 struct mhi_ring *ev_ring = &mhi_event->ring;
806 struct mhi_event_ctxt *er_ctxt =
807 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
808 int count = 0;
809 u32 chan;
810 struct mhi_chan *mhi_chan;
811
812 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
813 return -EIO;
814
815 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
816 local_rp = ev_ring->rp;
817
818 while (dev_rp != local_rp && event_quota > 0) {
819 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
820
821 chan = MHI_TRE_GET_EV_CHID(local_rp);
822 mhi_chan = &mhi_cntrl->mhi_chan[chan];
823
824 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
825 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
826 event_quota--;
827 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
828 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
829 event_quota--;
830 }
831
832 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
833 local_rp = ev_ring->rp;
834 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
835 count++;
836 }
837 read_lock_bh(&mhi_cntrl->pm_lock);
838 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
839 mhi_ring_er_db(mhi_event);
840 read_unlock_bh(&mhi_cntrl->pm_lock);
841
842 return count;
843 }
844
845 void mhi_ev_task(unsigned long data)
846 {
847 struct mhi_event *mhi_event = (struct mhi_event *)data;
848 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
849
850 /* process all pending events */
851 spin_lock_bh(&mhi_event->lock);
852 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
853 spin_unlock_bh(&mhi_event->lock);
854 }
855
856 void mhi_ctrl_ev_task(unsigned long data)
857 {
858 struct mhi_event *mhi_event = (struct mhi_event *)data;
859 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
860 struct device *dev = &mhi_cntrl->mhi_dev->dev;
861 enum mhi_state state;
862 enum mhi_pm_state pm_state = 0;
863 int ret;
864
865 /*
866 * We can check PM state w/o a lock here because there is no way
867 * PM state can change from reg access valid to no access while this
868 * thread being executed.
869 */
870 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
871 /*
872 * We may have a pending event but not allowed to
873 * process it since we are probably in a suspended state,
874 * so trigger a resume.
875 */
876 mhi_cntrl->runtime_get(mhi_cntrl);
877 mhi_cntrl->runtime_put(mhi_cntrl);
878
879 return;
880 }
881
882 /* Process ctrl events events */
883 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
884
885 /*
886 * We received an IRQ but no events to process, maybe device went to
887 * SYS_ERR state? Check the state to confirm.
888 */
889 if (!ret) {
890 write_lock_irq(&mhi_cntrl->pm_lock);
891 state = mhi_get_mhi_state(mhi_cntrl);
892 if (state == MHI_STATE_SYS_ERR) {
893 dev_dbg(dev, "System error detected\n");
894 pm_state = mhi_tryset_pm_state(mhi_cntrl,
895 MHI_PM_SYS_ERR_DETECT);
896 }
897 write_unlock_irq(&mhi_cntrl->pm_lock);
898 if (pm_state == MHI_PM_SYS_ERR_DETECT)
899 schedule_work(&mhi_cntrl->syserr_worker);
900 }
901 }
902
903 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
904 struct mhi_ring *ring)
905 {
906 void *tmp = ring->wp + ring->el_size;
907
908 if (tmp >= (ring->base + ring->len))
909 tmp = ring->base;
910
911 return (tmp == ring->rp);
912 }
913
914 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
915 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
916 {
917 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
918 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
919 mhi_dev->dl_chan;
920 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
921 struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
922 struct mhi_buf_info *buf_info;
923 struct mhi_tre *mhi_tre;
924 int ret;
925
926 /* If MHI host pre-allocates buffers then client drivers cannot queue */
927 if (mhi_chan->pre_alloc)
928 return -EINVAL;
929
930 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
931 return -ENOMEM;
932
933 read_lock_bh(&mhi_cntrl->pm_lock);
934 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
935 read_unlock_bh(&mhi_cntrl->pm_lock);
936 return -EIO;
937 }
938
939 /* we're in M3 or transitioning to M3 */
940 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
941 mhi_cntrl->runtime_get(mhi_cntrl);
942 mhi_cntrl->runtime_put(mhi_cntrl);
943 }
944
945 /* Toggle wake to exit out of M2 */
946 mhi_cntrl->wake_toggle(mhi_cntrl);
947
948 /* Generate the TRE */
949 buf_info = buf_ring->wp;
950
951 buf_info->v_addr = skb->data;
952 buf_info->cb_buf = skb;
953 buf_info->wp = tre_ring->wp;
954 buf_info->dir = mhi_chan->dir;
955 buf_info->len = len;
956 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
957 if (ret)
958 goto map_error;
959
960 mhi_tre = tre_ring->wp;
961
962 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
963 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
964 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
965
966 /* increment WP */
967 mhi_add_ring_element(mhi_cntrl, tre_ring);
968 mhi_add_ring_element(mhi_cntrl, buf_ring);
969
970 if (mhi_chan->dir == DMA_TO_DEVICE)
971 atomic_inc(&mhi_cntrl->pending_pkts);
972
973 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
974 read_lock_bh(&mhi_chan->lock);
975 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
976 read_unlock_bh(&mhi_chan->lock);
977 }
978
979 read_unlock_bh(&mhi_cntrl->pm_lock);
980
981 return 0;
982
983 map_error:
984 read_unlock_bh(&mhi_cntrl->pm_lock);
985
986 return ret;
987 }
988 EXPORT_SYMBOL_GPL(mhi_queue_skb);
989
990 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
991 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
992 {
993 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
994 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
995 mhi_dev->dl_chan;
996 struct device *dev = &mhi_cntrl->mhi_dev->dev;
997 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
998 struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
999 struct mhi_buf_info *buf_info;
1000 struct mhi_tre *mhi_tre;
1001
1002 /* If MHI host pre-allocates buffers then client drivers cannot queue */
1003 if (mhi_chan->pre_alloc)
1004 return -EINVAL;
1005
1006 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1007 return -ENOMEM;
1008
1009 read_lock_bh(&mhi_cntrl->pm_lock);
1010 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1011 dev_err(dev, "MHI is not in activate state, PM state: %s\n",
1012 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1013 read_unlock_bh(&mhi_cntrl->pm_lock);
1014
1015 return -EIO;
1016 }
1017
1018 /* we're in M3 or transitioning to M3 */
1019 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
1020 mhi_cntrl->runtime_get(mhi_cntrl);
1021 mhi_cntrl->runtime_put(mhi_cntrl);
1022 }
1023
1024 /* Toggle wake to exit out of M2 */
1025 mhi_cntrl->wake_toggle(mhi_cntrl);
1026
1027 /* Generate the TRE */
1028 buf_info = buf_ring->wp;
1029 WARN_ON(buf_info->used);
1030 buf_info->p_addr = mhi_buf->dma_addr;
1031 buf_info->pre_mapped = true;
1032 buf_info->cb_buf = mhi_buf;
1033 buf_info->wp = tre_ring->wp;
1034 buf_info->dir = mhi_chan->dir;
1035 buf_info->len = len;
1036
1037 mhi_tre = tre_ring->wp;
1038
1039 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1040 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
1041 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
1042
1043 /* increment WP */
1044 mhi_add_ring_element(mhi_cntrl, tre_ring);
1045 mhi_add_ring_element(mhi_cntrl, buf_ring);
1046
1047 if (mhi_chan->dir == DMA_TO_DEVICE)
1048 atomic_inc(&mhi_cntrl->pending_pkts);
1049
1050 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1051 read_lock_bh(&mhi_chan->lock);
1052 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1053 read_unlock_bh(&mhi_chan->lock);
1054 }
1055
1056 read_unlock_bh(&mhi_cntrl->pm_lock);
1057
1058 return 0;
1059 }
1060 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1061
1062 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1063 void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
1064 {
1065 struct mhi_ring *buf_ring, *tre_ring;
1066 struct mhi_tre *mhi_tre;
1067 struct mhi_buf_info *buf_info;
1068 int eot, eob, chain, bei;
1069 int ret;
1070
1071 buf_ring = &mhi_chan->buf_ring;
1072 tre_ring = &mhi_chan->tre_ring;
1073
1074 buf_info = buf_ring->wp;
1075 buf_info->v_addr = buf;
1076 buf_info->cb_buf = cb;
1077 buf_info->wp = tre_ring->wp;
1078 buf_info->dir = mhi_chan->dir;
1079 buf_info->len = buf_len;
1080
1081 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1082 if (ret)
1083 return ret;
1084
1085 eob = !!(flags & MHI_EOB);
1086 eot = !!(flags & MHI_EOT);
1087 chain = !!(flags & MHI_CHAIN);
1088 bei = !!(mhi_chan->intmod);
1089
1090 mhi_tre = tre_ring->wp;
1091 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1092 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
1093 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1094
1095 /* increment WP */
1096 mhi_add_ring_element(mhi_cntrl, tre_ring);
1097 mhi_add_ring_element(mhi_cntrl, buf_ring);
1098
1099 return 0;
1100 }
1101
1102 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1103 void *buf, size_t len, enum mhi_flags mflags)
1104 {
1105 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1106 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1107 mhi_dev->dl_chan;
1108 struct mhi_ring *tre_ring;
1109 unsigned long flags;
1110 int ret;
1111
1112 /*
1113 * this check here only as a guard, it's always
1114 * possible mhi can enter error while executing rest of function,
1115 * which is not fatal so we do not need to hold pm_lock
1116 */
1117 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1118 return -EIO;
1119
1120 tre_ring = &mhi_chan->tre_ring;
1121 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1122 return -ENOMEM;
1123
1124 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
1125 if (unlikely(ret))
1126 return ret;
1127
1128 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1129
1130 /* we're in M3 or transitioning to M3 */
1131 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
1132 mhi_cntrl->runtime_get(mhi_cntrl);
1133 mhi_cntrl->runtime_put(mhi_cntrl);
1134 }
1135
1136 /* Toggle wake to exit out of M2 */
1137 mhi_cntrl->wake_toggle(mhi_cntrl);
1138
1139 if (mhi_chan->dir == DMA_TO_DEVICE)
1140 atomic_inc(&mhi_cntrl->pending_pkts);
1141
1142 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1143 unsigned long flags;
1144
1145 read_lock_irqsave(&mhi_chan->lock, flags);
1146 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1147 read_unlock_irqrestore(&mhi_chan->lock, flags);
1148 }
1149
1150 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1151
1152 return 0;
1153 }
1154 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1155
1156 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1157 struct mhi_chan *mhi_chan,
1158 enum mhi_cmd_type cmd)
1159 {
1160 struct mhi_tre *cmd_tre = NULL;
1161 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1162 struct mhi_ring *ring = &mhi_cmd->ring;
1163 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1164 int chan = 0;
1165
1166 if (mhi_chan)
1167 chan = mhi_chan->chan;
1168
1169 spin_lock_bh(&mhi_cmd->lock);
1170 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1171 spin_unlock_bh(&mhi_cmd->lock);
1172 return -ENOMEM;
1173 }
1174
1175 /* prepare the cmd tre */
1176 cmd_tre = ring->wp;
1177 switch (cmd) {
1178 case MHI_CMD_RESET_CHAN:
1179 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1180 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1181 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1182 break;
1183 case MHI_CMD_START_CHAN:
1184 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1185 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1186 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1187 break;
1188 default:
1189 dev_err(dev, "Command not supported\n");
1190 break;
1191 }
1192
1193 /* queue to hardware */
1194 mhi_add_ring_element(mhi_cntrl, ring);
1195 read_lock_bh(&mhi_cntrl->pm_lock);
1196 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1197 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1198 read_unlock_bh(&mhi_cntrl->pm_lock);
1199 spin_unlock_bh(&mhi_cmd->lock);
1200
1201 return 0;
1202 }
1203
1204 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1205 struct mhi_chan *mhi_chan)
1206 {
1207 int ret;
1208 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1209
1210 dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1211
1212 /* no more processing events for this channel */
1213 mutex_lock(&mhi_chan->mutex);
1214 write_lock_irq(&mhi_chan->lock);
1215 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1216 write_unlock_irq(&mhi_chan->lock);
1217 mutex_unlock(&mhi_chan->mutex);
1218 return;
1219 }
1220
1221 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1222 write_unlock_irq(&mhi_chan->lock);
1223
1224 reinit_completion(&mhi_chan->completion);
1225 read_lock_bh(&mhi_cntrl->pm_lock);
1226 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1227 read_unlock_bh(&mhi_cntrl->pm_lock);
1228 goto error_invalid_state;
1229 }
1230
1231 mhi_cntrl->wake_toggle(mhi_cntrl);
1232 read_unlock_bh(&mhi_cntrl->pm_lock);
1233
1234 mhi_cntrl->runtime_get(mhi_cntrl);
1235 mhi_cntrl->runtime_put(mhi_cntrl);
1236 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1237 if (ret)
1238 goto error_invalid_state;
1239
1240 /* even if it fails we will still reset */
1241 ret = wait_for_completion_timeout(&mhi_chan->completion,
1242 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1243 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1244 dev_err(dev,
1245 "Failed to receive cmd completion, still resetting\n");
1246
1247 error_invalid_state:
1248 if (!mhi_chan->offload_ch) {
1249 mhi_reset_chan(mhi_cntrl, mhi_chan);
1250 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1251 }
1252 dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1253 mutex_unlock(&mhi_chan->mutex);
1254 }
1255
1256 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1257 struct mhi_chan *mhi_chan)
1258 {
1259 int ret = 0;
1260 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1261
1262 dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1263
1264 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1265 dev_err(dev,
1266 "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1267 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1268 mhi_chan->name);
1269 return -ENOTCONN;
1270 }
1271
1272 mutex_lock(&mhi_chan->mutex);
1273
1274 /* If channel is not in disable state, do not allow it to start */
1275 if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1276 ret = -EIO;
1277 dev_dbg(dev, "channel: %d is not in disabled state\n",
1278 mhi_chan->chan);
1279 goto error_init_chan;
1280 }
1281
1282 /* Check of client manages channel context for offload channels */
1283 if (!mhi_chan->offload_ch) {
1284 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1285 if (ret)
1286 goto error_init_chan;
1287 }
1288
1289 reinit_completion(&mhi_chan->completion);
1290 read_lock_bh(&mhi_cntrl->pm_lock);
1291 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1292 read_unlock_bh(&mhi_cntrl->pm_lock);
1293 ret = -EIO;
1294 goto error_pm_state;
1295 }
1296
1297 mhi_cntrl->wake_toggle(mhi_cntrl);
1298 read_unlock_bh(&mhi_cntrl->pm_lock);
1299 mhi_cntrl->runtime_get(mhi_cntrl);
1300 mhi_cntrl->runtime_put(mhi_cntrl);
1301
1302 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1303 if (ret)
1304 goto error_pm_state;
1305
1306 ret = wait_for_completion_timeout(&mhi_chan->completion,
1307 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1308 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1309 ret = -EIO;
1310 goto error_pm_state;
1311 }
1312
1313 write_lock_irq(&mhi_chan->lock);
1314 mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1315 write_unlock_irq(&mhi_chan->lock);
1316
1317 /* Pre-allocate buffer for xfer ring */
1318 if (mhi_chan->pre_alloc) {
1319 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1320 &mhi_chan->tre_ring);
1321 size_t len = mhi_cntrl->buffer_len;
1322
1323 while (nr_el--) {
1324 void *buf;
1325
1326 buf = kmalloc(len, GFP_KERNEL);
1327 if (!buf) {
1328 ret = -ENOMEM;
1329 goto error_pre_alloc;
1330 }
1331
1332 /* Prepare transfer descriptors */
1333 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
1334 len, MHI_EOT);
1335 if (ret) {
1336 kfree(buf);
1337 goto error_pre_alloc;
1338 }
1339 }
1340
1341 read_lock_bh(&mhi_cntrl->pm_lock);
1342 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1343 read_lock_irq(&mhi_chan->lock);
1344 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1345 read_unlock_irq(&mhi_chan->lock);
1346 }
1347 read_unlock_bh(&mhi_cntrl->pm_lock);
1348 }
1349
1350 mutex_unlock(&mhi_chan->mutex);
1351
1352 dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1353 mhi_chan->chan);
1354
1355 return 0;
1356
1357 error_pm_state:
1358 if (!mhi_chan->offload_ch)
1359 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1360
1361 error_init_chan:
1362 mutex_unlock(&mhi_chan->mutex);
1363
1364 return ret;
1365
1366 error_pre_alloc:
1367 mutex_unlock(&mhi_chan->mutex);
1368 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1369
1370 return ret;
1371 }
1372
1373 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1374 struct mhi_event *mhi_event,
1375 struct mhi_event_ctxt *er_ctxt,
1376 int chan)
1377
1378 {
1379 struct mhi_tre *dev_rp, *local_rp;
1380 struct mhi_ring *ev_ring;
1381 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1382 unsigned long flags;
1383
1384 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1385
1386 ev_ring = &mhi_event->ring;
1387
1388 /* mark all stale events related to channel as STALE event */
1389 spin_lock_irqsave(&mhi_event->lock, flags);
1390 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
1391
1392 local_rp = ev_ring->rp;
1393 while (dev_rp != local_rp) {
1394 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1395 chan == MHI_TRE_GET_EV_CHID(local_rp))
1396 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1397 MHI_PKT_TYPE_STALE_EVENT);
1398 local_rp++;
1399 if (local_rp == (ev_ring->base + ev_ring->len))
1400 local_rp = ev_ring->base;
1401 }
1402
1403 dev_dbg(dev, "Finished marking events as stale events\n");
1404 spin_unlock_irqrestore(&mhi_event->lock, flags);
1405 }
1406
1407 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1408 struct mhi_chan *mhi_chan)
1409 {
1410 struct mhi_ring *buf_ring, *tre_ring;
1411 struct mhi_result result;
1412
1413 /* Reset any pending buffers */
1414 buf_ring = &mhi_chan->buf_ring;
1415 tre_ring = &mhi_chan->tre_ring;
1416 result.transaction_status = -ENOTCONN;
1417 result.bytes_xferd = 0;
1418 while (tre_ring->rp != tre_ring->wp) {
1419 struct mhi_buf_info *buf_info = buf_ring->rp;
1420
1421 if (mhi_chan->dir == DMA_TO_DEVICE)
1422 atomic_dec(&mhi_cntrl->pending_pkts);
1423
1424 if (!buf_info->pre_mapped)
1425 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1426
1427 mhi_del_ring_element(mhi_cntrl, buf_ring);
1428 mhi_del_ring_element(mhi_cntrl, tre_ring);
1429
1430 if (mhi_chan->pre_alloc) {
1431 kfree(buf_info->cb_buf);
1432 } else {
1433 result.buf_addr = buf_info->cb_buf;
1434 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1435 }
1436 }
1437 }
1438
1439 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1440 {
1441 struct mhi_event *mhi_event;
1442 struct mhi_event_ctxt *er_ctxt;
1443 int chan = mhi_chan->chan;
1444
1445 /* Nothing to reset, client doesn't queue buffers */
1446 if (mhi_chan->offload_ch)
1447 return;
1448
1449 read_lock_bh(&mhi_cntrl->pm_lock);
1450 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1451 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1452
1453 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1454
1455 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1456
1457 read_unlock_bh(&mhi_cntrl->pm_lock);
1458 }
1459
1460 /* Move channel to start state */
1461 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1462 {
1463 int ret, dir;
1464 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1465 struct mhi_chan *mhi_chan;
1466
1467 for (dir = 0; dir < 2; dir++) {
1468 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1469 if (!mhi_chan)
1470 continue;
1471
1472 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1473 if (ret)
1474 goto error_open_chan;
1475 }
1476
1477 return 0;
1478
1479 error_open_chan:
1480 for (--dir; dir >= 0; dir--) {
1481 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1482 if (!mhi_chan)
1483 continue;
1484
1485 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1486 }
1487
1488 return ret;
1489 }
1490 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1491
1492 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1493 {
1494 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1495 struct mhi_chan *mhi_chan;
1496 int dir;
1497
1498 for (dir = 0; dir < 2; dir++) {
1499 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1500 if (!mhi_chan)
1501 continue;
1502
1503 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1504 }
1505 }
1506 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1507
1508 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1509 {
1510 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1511 struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1512 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1513 int ret;
1514
1515 spin_lock_bh(&mhi_event->lock);
1516 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1517 spin_unlock_bh(&mhi_event->lock);
1518
1519 return ret;
1520 }
1521 EXPORT_SYMBOL_GPL(mhi_poll);