]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/bus/mhi/core/init.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / bus / mhi / core / init.c
CommitLineData
0cbf2608
MS
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7#include <linux/device.h>
8#include <linux/dma-direction.h>
9#include <linux/dma-mapping.h>
10#include <linux/interrupt.h>
11#include <linux/list.h>
12#include <linux/mhi.h>
13#include <linux/mod_devicetable.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/wait.h>
18#include "internal.h"
19
a6e2e352
MS
20const char * const mhi_ee_str[MHI_EE_MAX] = {
21 [MHI_EE_PBL] = "PBL",
22 [MHI_EE_SBL] = "SBL",
23 [MHI_EE_AMSS] = "AMSS",
24 [MHI_EE_RDDM] = "RDDM",
25 [MHI_EE_WFW] = "WFW",
26 [MHI_EE_PTHRU] = "PASS THRU",
27 [MHI_EE_EDL] = "EDL",
28 [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
29 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
30};
31
32const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
33 [DEV_ST_TRANSITION_PBL] = "PBL",
34 [DEV_ST_TRANSITION_READY] = "READY",
35 [DEV_ST_TRANSITION_SBL] = "SBL",
36 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
37};
38
39const char * const mhi_state_str[MHI_STATE_MAX] = {
40 [MHI_STATE_RESET] = "RESET",
41 [MHI_STATE_READY] = "READY",
42 [MHI_STATE_M0] = "M0",
43 [MHI_STATE_M1] = "M1",
44 [MHI_STATE_M2] = "M2",
45 [MHI_STATE_M3] = "M3",
46 [MHI_STATE_M3_FAST] = "M3_FAST",
47 [MHI_STATE_BHI] = "BHI",
48 [MHI_STATE_SYS_ERR] = "SYS_ERR",
49};
50
51static const char * const mhi_pm_state_str[] = {
52 [MHI_PM_STATE_DISABLE] = "DISABLE",
53 [MHI_PM_STATE_POR] = "POR",
54 [MHI_PM_STATE_M0] = "M0",
55 [MHI_PM_STATE_M2] = "M2",
56 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
57 [MHI_PM_STATE_M3] = "M3",
58 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
59 [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
60 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
61 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
62 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
63 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
64};
65
66const char *to_mhi_pm_state_str(enum mhi_pm_state state)
67{
68 int index = find_last_bit((unsigned long *)&state, 32);
69
70 if (index >= ARRAY_SIZE(mhi_pm_state_str))
71 return "Invalid State";
72
73 return mhi_pm_state_str[index];
74}
75
3000f85b
MS
76/* MHI protocol requires the transfer ring to be aligned with ring length */
77static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
78 struct mhi_ring *ring,
79 u64 len)
80{
81 ring->alloc_size = len + (len - 1);
82 ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
83 &ring->dma_handle, GFP_KERNEL);
84 if (!ring->pre_aligned)
85 return -ENOMEM;
86
87 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
88 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
89
90 return 0;
91}
92
93void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
94{
95 int i;
96 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
97
98 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
99 if (mhi_event->offload_ev)
100 continue;
101
102 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
103 }
104
105 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
106}
107
108int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
109{
110 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
111 struct device *dev = &mhi_cntrl->mhi_dev->dev;
112 int i, ret;
113
114 /* Setup BHI_INTVEC IRQ */
115 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
116 mhi_intvec_threaded_handler,
117 IRQF_SHARED | IRQF_NO_SUSPEND,
118 "bhi", mhi_cntrl);
119 if (ret)
120 return ret;
121
122 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
123 if (mhi_event->offload_ev)
124 continue;
125
126 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
127 mhi_irq_handler,
128 IRQF_SHARED | IRQF_NO_SUSPEND,
129 "mhi", mhi_event);
130 if (ret) {
131 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
132 mhi_cntrl->irq[mhi_event->irq], i);
133 goto error_request;
134 }
135 }
136
137 return 0;
138
139error_request:
140 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
141 if (mhi_event->offload_ev)
142 continue;
143
144 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
145 }
146 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
147
148 return ret;
149}
150
151void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
152{
153 int i;
154 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
155 struct mhi_cmd *mhi_cmd;
156 struct mhi_event *mhi_event;
157 struct mhi_ring *ring;
158
159 mhi_cmd = mhi_cntrl->mhi_cmd;
160 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
161 ring = &mhi_cmd->ring;
162 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
163 ring->pre_aligned, ring->dma_handle);
164 ring->base = NULL;
165 ring->iommu_base = 0;
166 }
167
168 mhi_free_coherent(mhi_cntrl,
169 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
170 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
171
172 mhi_event = mhi_cntrl->mhi_event;
173 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
174 if (mhi_event->offload_ev)
175 continue;
176
177 ring = &mhi_event->ring;
178 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
179 ring->pre_aligned, ring->dma_handle);
180 ring->base = NULL;
181 ring->iommu_base = 0;
182 }
183
184 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
185 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
186 mhi_ctxt->er_ctxt_addr);
187
188 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
189 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
190 mhi_ctxt->chan_ctxt_addr);
191
192 kfree(mhi_ctxt);
193 mhi_cntrl->mhi_ctxt = NULL;
194}
195
196int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
197{
198 struct mhi_ctxt *mhi_ctxt;
199 struct mhi_chan_ctxt *chan_ctxt;
200 struct mhi_event_ctxt *er_ctxt;
201 struct mhi_cmd_ctxt *cmd_ctxt;
202 struct mhi_chan *mhi_chan;
203 struct mhi_event *mhi_event;
204 struct mhi_cmd *mhi_cmd;
205 u32 tmp;
206 int ret = -ENOMEM, i;
207
208 atomic_set(&mhi_cntrl->dev_wake, 0);
209 atomic_set(&mhi_cntrl->pending_pkts, 0);
210
211 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
212 if (!mhi_ctxt)
213 return -ENOMEM;
214
215 /* Setup channel ctxt */
216 mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
217 sizeof(*mhi_ctxt->chan_ctxt) *
218 mhi_cntrl->max_chan,
219 &mhi_ctxt->chan_ctxt_addr,
220 GFP_KERNEL);
221 if (!mhi_ctxt->chan_ctxt)
222 goto error_alloc_chan_ctxt;
223
224 mhi_chan = mhi_cntrl->mhi_chan;
225 chan_ctxt = mhi_ctxt->chan_ctxt;
226 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
227 /* Skip if it is an offload channel */
228 if (mhi_chan->offload_ch)
229 continue;
230
231 tmp = chan_ctxt->chcfg;
232 tmp &= ~CHAN_CTX_CHSTATE_MASK;
233 tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
234 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
235 tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
236 tmp &= ~CHAN_CTX_POLLCFG_MASK;
237 tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
238 chan_ctxt->chcfg = tmp;
239
240 chan_ctxt->chtype = mhi_chan->type;
241 chan_ctxt->erindex = mhi_chan->er_index;
242
243 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
244 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
245 }
246
247 /* Setup event context */
248 mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
249 sizeof(*mhi_ctxt->er_ctxt) *
250 mhi_cntrl->total_ev_rings,
251 &mhi_ctxt->er_ctxt_addr,
252 GFP_KERNEL);
253 if (!mhi_ctxt->er_ctxt)
254 goto error_alloc_er_ctxt;
255
256 er_ctxt = mhi_ctxt->er_ctxt;
257 mhi_event = mhi_cntrl->mhi_event;
258 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
259 mhi_event++) {
260 struct mhi_ring *ring = &mhi_event->ring;
261
262 /* Skip if it is an offload event */
263 if (mhi_event->offload_ev)
264 continue;
265
266 tmp = er_ctxt->intmod;
267 tmp &= ~EV_CTX_INTMODC_MASK;
268 tmp &= ~EV_CTX_INTMODT_MASK;
269 tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
270 er_ctxt->intmod = tmp;
271
272 er_ctxt->ertype = MHI_ER_TYPE_VALID;
273 er_ctxt->msivec = mhi_event->irq;
274 mhi_event->db_cfg.db_mode = true;
275
276 ring->el_size = sizeof(struct mhi_tre);
277 ring->len = ring->el_size * ring->elements;
278 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
279 if (ret)
280 goto error_alloc_er;
281
282 /*
283 * If the read pointer equals to the write pointer, then the
284 * ring is empty
285 */
286 ring->rp = ring->wp = ring->base;
287 er_ctxt->rbase = ring->iommu_base;
288 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
289 er_ctxt->rlen = ring->len;
290 ring->ctxt_wp = &er_ctxt->wp;
291 }
292
293 /* Setup cmd context */
294 mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
295 sizeof(*mhi_ctxt->cmd_ctxt) *
296 NR_OF_CMD_RINGS,
297 &mhi_ctxt->cmd_ctxt_addr,
298 GFP_KERNEL);
299 if (!mhi_ctxt->cmd_ctxt)
300 goto error_alloc_er;
301
302 mhi_cmd = mhi_cntrl->mhi_cmd;
303 cmd_ctxt = mhi_ctxt->cmd_ctxt;
304 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
305 struct mhi_ring *ring = &mhi_cmd->ring;
306
307 ring->el_size = sizeof(struct mhi_tre);
308 ring->elements = CMD_EL_PER_RING;
309 ring->len = ring->el_size * ring->elements;
310 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
311 if (ret)
312 goto error_alloc_cmd;
313
314 ring->rp = ring->wp = ring->base;
315 cmd_ctxt->rbase = ring->iommu_base;
316 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
317 cmd_ctxt->rlen = ring->len;
318 ring->ctxt_wp = &cmd_ctxt->wp;
319 }
320
321 mhi_cntrl->mhi_ctxt = mhi_ctxt;
322
323 return 0;
324
325error_alloc_cmd:
326 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
327 struct mhi_ring *ring = &mhi_cmd->ring;
328
329 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
330 ring->pre_aligned, ring->dma_handle);
331 }
332 mhi_free_coherent(mhi_cntrl,
333 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
334 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
335 i = mhi_cntrl->total_ev_rings;
336 mhi_event = mhi_cntrl->mhi_event + i;
337
338error_alloc_er:
339 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
340 struct mhi_ring *ring = &mhi_event->ring;
341
342 if (mhi_event->offload_ev)
343 continue;
344
345 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
346 ring->pre_aligned, ring->dma_handle);
347 }
348 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
349 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
350 mhi_ctxt->er_ctxt_addr);
351
352error_alloc_er_ctxt:
353 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
354 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
355 mhi_ctxt->chan_ctxt_addr);
356
357error_alloc_chan_ctxt:
358 kfree(mhi_ctxt);
359
360 return ret;
361}
362
6cd330ae
MS
363int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
364{
365 u32 val;
366 int i, ret;
367 struct mhi_chan *mhi_chan;
368 struct mhi_event *mhi_event;
369 void __iomem *base = mhi_cntrl->regs;
370 struct device *dev = &mhi_cntrl->mhi_dev->dev;
371 struct {
372 u32 offset;
373 u32 mask;
374 u32 shift;
375 u32 val;
376 } reg_info[] = {
377 {
378 CCABAP_HIGHER, U32_MAX, 0,
379 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
380 },
381 {
382 CCABAP_LOWER, U32_MAX, 0,
383 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
384 },
385 {
386 ECABAP_HIGHER, U32_MAX, 0,
387 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
388 },
389 {
390 ECABAP_LOWER, U32_MAX, 0,
391 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
392 },
393 {
394 CRCBAP_HIGHER, U32_MAX, 0,
395 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
396 },
397 {
398 CRCBAP_LOWER, U32_MAX, 0,
399 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
400 },
401 {
402 MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
403 mhi_cntrl->total_ev_rings,
404 },
405 {
406 MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
407 mhi_cntrl->hw_ev_rings,
408 },
409 {
410 MHICTRLBASE_HIGHER, U32_MAX, 0,
411 upper_32_bits(mhi_cntrl->iova_start),
412 },
413 {
414 MHICTRLBASE_LOWER, U32_MAX, 0,
415 lower_32_bits(mhi_cntrl->iova_start),
416 },
417 {
418 MHIDATABASE_HIGHER, U32_MAX, 0,
419 upper_32_bits(mhi_cntrl->iova_start),
420 },
421 {
422 MHIDATABASE_LOWER, U32_MAX, 0,
423 lower_32_bits(mhi_cntrl->iova_start),
424 },
425 {
426 MHICTRLLIMIT_HIGHER, U32_MAX, 0,
427 upper_32_bits(mhi_cntrl->iova_stop),
428 },
429 {
430 MHICTRLLIMIT_LOWER, U32_MAX, 0,
431 lower_32_bits(mhi_cntrl->iova_stop),
432 },
433 {
434 MHIDATALIMIT_HIGHER, U32_MAX, 0,
435 upper_32_bits(mhi_cntrl->iova_stop),
436 },
437 {
438 MHIDATALIMIT_LOWER, U32_MAX, 0,
439 lower_32_bits(mhi_cntrl->iova_stop),
440 },
441 { 0, 0, 0 }
442 };
443
444 dev_dbg(dev, "Initializing MHI registers\n");
445
446 /* Read channel db offset */
447 ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
448 CHDBOFF_CHDBOFF_SHIFT, &val);
449 if (ret) {
450 dev_err(dev, "Unable to read CHDBOFF register\n");
451 return -EIO;
452 }
453
454 /* Setup wake db */
455 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
456 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
457 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
458 mhi_cntrl->wake_set = false;
459
460 /* Setup channel db address for each channel in tre_ring */
461 mhi_chan = mhi_cntrl->mhi_chan;
462 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
463 mhi_chan->tre_ring.db_addr = base + val;
464
465 /* Read event ring db offset */
466 ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
467 ERDBOFF_ERDBOFF_SHIFT, &val);
468 if (ret) {
469 dev_err(dev, "Unable to read ERDBOFF register\n");
470 return -EIO;
471 }
472
473 /* Setup event db address for each ev_ring */
474 mhi_event = mhi_cntrl->mhi_event;
475 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
476 if (mhi_event->offload_ev)
477 continue;
478
479 mhi_event->ring.db_addr = base + val;
480 }
481
482 /* Setup DB register for primary CMD rings */
483 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
484
485 /* Write to MMIO registers */
486 for (i = 0; reg_info[i].offset; i++)
487 mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
488 reg_info[i].mask, reg_info[i].shift,
489 reg_info[i].val);
490
491 return 0;
492}
493
189ff97c
MS
494void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
495 struct mhi_chan *mhi_chan)
496{
497 struct mhi_ring *buf_ring;
498 struct mhi_ring *tre_ring;
499 struct mhi_chan_ctxt *chan_ctxt;
500
501 buf_ring = &mhi_chan->buf_ring;
502 tre_ring = &mhi_chan->tre_ring;
503 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
504
505 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
506 tre_ring->pre_aligned, tre_ring->dma_handle);
507 vfree(buf_ring->base);
508
509 buf_ring->base = tre_ring->base = NULL;
510 chan_ctxt->rbase = 0;
511}
512
513int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
514 struct mhi_chan *mhi_chan)
515{
516 struct mhi_ring *buf_ring;
517 struct mhi_ring *tre_ring;
518 struct mhi_chan_ctxt *chan_ctxt;
519 u32 tmp;
520 int ret;
521
522 buf_ring = &mhi_chan->buf_ring;
523 tre_ring = &mhi_chan->tre_ring;
524 tre_ring->el_size = sizeof(struct mhi_tre);
525 tre_ring->len = tre_ring->el_size * tre_ring->elements;
526 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
527 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
528 if (ret)
529 return -ENOMEM;
530
531 buf_ring->el_size = sizeof(struct mhi_buf_info);
532 buf_ring->len = buf_ring->el_size * buf_ring->elements;
533 buf_ring->base = vzalloc(buf_ring->len);
534
535 if (!buf_ring->base) {
536 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
537 tre_ring->pre_aligned, tre_ring->dma_handle);
538 return -ENOMEM;
539 }
540
541 tmp = chan_ctxt->chcfg;
542 tmp &= ~CHAN_CTX_CHSTATE_MASK;
543 tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
544 chan_ctxt->chcfg = tmp;
545
546 chan_ctxt->rbase = tre_ring->iommu_base;
547 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
548 chan_ctxt->rlen = tre_ring->len;
549 tre_ring->ctxt_wp = &chan_ctxt->wp;
550
551 tre_ring->rp = tre_ring->wp = tre_ring->base;
552 buf_ring->rp = buf_ring->wp = buf_ring->base;
553 mhi_chan->db_cfg.db_mode = 1;
554
555 /* Update to all cores */
556 smp_wmb();
557
558 return 0;
559}
560
0cbf2608
MS
561static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
562 struct mhi_controller_config *config)
563{
564 struct mhi_event *mhi_event;
565 struct mhi_event_config *event_cfg;
566 struct device *dev = &mhi_cntrl->mhi_dev->dev;
567 int i, num;
568
569 num = config->num_events;
570 mhi_cntrl->total_ev_rings = num;
571 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
572 GFP_KERNEL);
573 if (!mhi_cntrl->mhi_event)
574 return -ENOMEM;
575
576 /* Populate event ring */
577 mhi_event = mhi_cntrl->mhi_event;
578 for (i = 0; i < num; i++) {
579 event_cfg = &config->event_cfg[i];
580
581 mhi_event->er_index = i;
582 mhi_event->ring.elements = event_cfg->num_elements;
583 mhi_event->intmod = event_cfg->irq_moderation_ms;
584 mhi_event->irq = event_cfg->irq;
585
586 if (event_cfg->channel != U32_MAX) {
587 /* This event ring has a dedicated channel */
588 mhi_event->chan = event_cfg->channel;
589 if (mhi_event->chan >= mhi_cntrl->max_chan) {
590 dev_err(dev,
591 "Event Ring channel not available\n");
592 goto error_ev_cfg;
593 }
594
595 mhi_event->mhi_chan =
596 &mhi_cntrl->mhi_chan[mhi_event->chan];
597 }
598
599 /* Priority is fixed to 1 for now */
600 mhi_event->priority = 1;
601
602 mhi_event->db_cfg.brstmode = event_cfg->mode;
603 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
604 goto error_ev_cfg;
605
6cd330ae
MS
606 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
607 mhi_event->db_cfg.process_db = mhi_db_brstmode;
608 else
609 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
610
0cbf2608
MS
611 mhi_event->data_type = event_cfg->data_type;
612
1d3173a3
MS
613 switch (mhi_event->data_type) {
614 case MHI_ER_DATA:
615 mhi_event->process_event = mhi_process_data_event_ring;
616 break;
617 case MHI_ER_CTRL:
618 mhi_event->process_event = mhi_process_ctrl_ev_ring;
619 break;
620 default:
621 dev_err(dev, "Event Ring type not supported\n");
622 goto error_ev_cfg;
623 }
624
0cbf2608
MS
625 mhi_event->hw_ring = event_cfg->hardware_event;
626 if (mhi_event->hw_ring)
627 mhi_cntrl->hw_ev_rings++;
628 else
629 mhi_cntrl->sw_ev_rings++;
630
631 mhi_event->cl_manage = event_cfg->client_managed;
632 mhi_event->offload_ev = event_cfg->offload_channel;
633 mhi_event++;
634 }
635
636 /* We need IRQ for each event ring + additional one for BHI */
637 mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1;
638
639 return 0;
640
641error_ev_cfg:
642
643 kfree(mhi_cntrl->mhi_event);
644 return -EINVAL;
645}
646
647static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
648 struct mhi_controller_config *config)
649{
650 struct mhi_channel_config *ch_cfg;
651 struct device *dev = &mhi_cntrl->mhi_dev->dev;
652 int i;
653 u32 chan;
654
655 mhi_cntrl->max_chan = config->max_channels;
656
657 /*
658 * The allocation of MHI channels can exceed 32KB in some scenarios,
659 * so to avoid any memory possible allocation failures, vzalloc is
660 * used here
661 */
662 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
663 sizeof(*mhi_cntrl->mhi_chan));
664 if (!mhi_cntrl->mhi_chan)
665 return -ENOMEM;
666
667 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
668
669 /* Populate channel configurations */
670 for (i = 0; i < config->num_channels; i++) {
671 struct mhi_chan *mhi_chan;
672
673 ch_cfg = &config->ch_cfg[i];
674
675 chan = ch_cfg->num;
676 if (chan >= mhi_cntrl->max_chan) {
677 dev_err(dev, "Channel %d not available\n", chan);
678 goto error_chan_cfg;
679 }
680
681 mhi_chan = &mhi_cntrl->mhi_chan[chan];
682 mhi_chan->name = ch_cfg->name;
683 mhi_chan->chan = chan;
684
685 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
686 if (!mhi_chan->tre_ring.elements)
687 goto error_chan_cfg;
688
689 /*
690 * For some channels, local ring length should be bigger than
691 * the transfer ring length due to internal logical channels
692 * in device. So host can queue much more buffers than transfer
693 * ring length. Example, RSC channels should have a larger local
694 * channel length than transfer ring length.
695 */
696 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
697 if (!mhi_chan->buf_ring.elements)
698 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
699 mhi_chan->er_index = ch_cfg->event_ring;
700 mhi_chan->dir = ch_cfg->dir;
701
702 /*
703 * For most channels, chtype is identical to channel directions.
704 * So, if it is not defined then assign channel direction to
705 * chtype
706 */
707 mhi_chan->type = ch_cfg->type;
708 if (!mhi_chan->type)
709 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
710
711 mhi_chan->ee_mask = ch_cfg->ee_mask;
712 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
713 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
714 mhi_chan->offload_ch = ch_cfg->offload_channel;
715 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
716 mhi_chan->pre_alloc = ch_cfg->auto_queue;
717 mhi_chan->auto_start = ch_cfg->auto_start;
718
719 /*
720 * If MHI host allocates buffers, then the channel direction
721 * should be DMA_FROM_DEVICE
722 */
723 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
724 dev_err(dev, "Invalid channel configuration\n");
725 goto error_chan_cfg;
726 }
727
728 /*
729 * Bi-directional and direction less channel must be an
730 * offload channel
731 */
732 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
733 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
734 dev_err(dev, "Invalid channel configuration\n");
735 goto error_chan_cfg;
736 }
737
738 if (!mhi_chan->offload_ch) {
739 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
740 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
741 dev_err(dev, "Invalid Door bell mode\n");
742 goto error_chan_cfg;
743 }
744 }
745
6cd330ae
MS
746 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
747 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
748 else
749 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
750
0cbf2608
MS
751 mhi_chan->configured = true;
752
753 if (mhi_chan->lpm_notify)
754 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
755 }
756
757 return 0;
758
759error_chan_cfg:
760 vfree(mhi_cntrl->mhi_chan);
761
762 return -EINVAL;
763}
764
765static int parse_config(struct mhi_controller *mhi_cntrl,
766 struct mhi_controller_config *config)
767{
768 int ret;
769
770 /* Parse MHI channel configuration */
771 ret = parse_ch_cfg(mhi_cntrl, config);
772 if (ret)
773 return ret;
774
775 /* Parse MHI event configuration */
776 ret = parse_ev_cfg(mhi_cntrl, config);
777 if (ret)
778 goto error_ev_cfg;
779
780 mhi_cntrl->timeout_ms = config->timeout_ms;
781 if (!mhi_cntrl->timeout_ms)
782 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
783
784 mhi_cntrl->bounce_buf = config->use_bounce_buf;
785 mhi_cntrl->buffer_len = config->buf_len;
786 if (!mhi_cntrl->buffer_len)
787 mhi_cntrl->buffer_len = MHI_MAX_MTU;
788
a6e2e352
MS
789 /* By default, host is allowed to ring DB in both M0 and M2 states */
790 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
791 if (config->m2_no_db)
792 mhi_cntrl->db_access &= ~MHI_PM_M2;
793
0cbf2608
MS
794 return 0;
795
796error_ev_cfg:
797 vfree(mhi_cntrl->mhi_chan);
798
799 return ret;
800}
801
802int mhi_register_controller(struct mhi_controller *mhi_cntrl,
803 struct mhi_controller_config *config)
804{
0cbf2608
MS
805 struct mhi_event *mhi_event;
806 struct mhi_chan *mhi_chan;
807 struct mhi_cmd *mhi_cmd;
808 struct mhi_device *mhi_dev;
3316ab2b
MS
809 u32 soc_info;
810 int ret, i;
0cbf2608
MS
811
812 if (!mhi_cntrl)
813 return -EINVAL;
814
85a087df 815 if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
45723a44
JH
816 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
817 !mhi_cntrl->write_reg)
0cbf2608
MS
818 return -EINVAL;
819
820 ret = parse_config(mhi_cntrl, config);
821 if (ret)
822 return -EINVAL;
823
824 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
825 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
826 if (!mhi_cntrl->mhi_cmd) {
827 ret = -ENOMEM;
828 goto error_alloc_cmd;
829 }
830
831 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
a6e2e352
MS
832 mutex_init(&mhi_cntrl->pm_mutex);
833 rwlock_init(&mhi_cntrl->pm_lock);
0cbf2608
MS
834 spin_lock_init(&mhi_cntrl->transition_lock);
835 spin_lock_init(&mhi_cntrl->wlock);
a6e2e352
MS
836 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
837 INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
cd457afb 838 INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
0cbf2608
MS
839 init_waitqueue_head(&mhi_cntrl->state_event);
840
841 mhi_cmd = mhi_cntrl->mhi_cmd;
842 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
843 spin_lock_init(&mhi_cmd->lock);
844
845 mhi_event = mhi_cntrl->mhi_event;
846 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
847 /* Skip for offload events */
848 if (mhi_event->offload_ev)
849 continue;
850
851 mhi_event->mhi_cntrl = mhi_cntrl;
852 spin_lock_init(&mhi_event->lock);
1d3173a3
MS
853 if (mhi_event->data_type == MHI_ER_CTRL)
854 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
855 (ulong)mhi_event);
856 else
857 tasklet_init(&mhi_event->task, mhi_ev_task,
858 (ulong)mhi_event);
0cbf2608
MS
859 }
860
861 mhi_chan = mhi_cntrl->mhi_chan;
862 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
863 mutex_init(&mhi_chan->mutex);
864 init_completion(&mhi_chan->completion);
865 rwlock_init(&mhi_chan->lock);
866 }
867
189ff97c
MS
868 if (mhi_cntrl->bounce_buf) {
869 mhi_cntrl->map_single = mhi_map_single_use_bb;
870 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
871 } else {
872 mhi_cntrl->map_single = mhi_map_single_no_bb;
873 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
874 }
875
3316ab2b
MS
876 /* Read the MHI device info */
877 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
878 SOC_HW_VERSION_OFFS, &soc_info);
879 if (ret)
880 goto error_alloc_dev;
881
882 mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
883 SOC_HW_VERSION_FAM_NUM_SHFT;
884 mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
885 SOC_HW_VERSION_DEV_NUM_SHFT;
886 mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
887 SOC_HW_VERSION_MAJOR_VER_SHFT;
888 mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
889 SOC_HW_VERSION_MINOR_VER_SHFT;
890
0cbf2608
MS
891 /* Register controller with MHI bus */
892 mhi_dev = mhi_alloc_device(mhi_cntrl);
893 if (IS_ERR(mhi_dev)) {
894 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
895 ret = PTR_ERR(mhi_dev);
896 goto error_alloc_dev;
897 }
898
899 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
900 mhi_dev->mhi_cntrl = mhi_cntrl;
901 dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
902
903 /* Init wakeup source */
904 device_init_wakeup(&mhi_dev->dev, true);
905
906 ret = device_add(&mhi_dev->dev);
907 if (ret)
908 goto error_add_dev;
909
910 mhi_cntrl->mhi_dev = mhi_dev;
911
912 return 0;
913
914error_add_dev:
915 put_device(&mhi_dev->dev);
916
917error_alloc_dev:
918 kfree(mhi_cntrl->mhi_cmd);
919
920error_alloc_cmd:
921 vfree(mhi_cntrl->mhi_chan);
922 kfree(mhi_cntrl->mhi_event);
923
924 return ret;
925}
926EXPORT_SYMBOL_GPL(mhi_register_controller);
927
928void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
929{
930 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
931 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
932 unsigned int i;
933
934 kfree(mhi_cntrl->mhi_cmd);
935 kfree(mhi_cntrl->mhi_event);
936
937 /* Drop the references to MHI devices created for channels */
938 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
939 if (!mhi_chan->mhi_dev)
940 continue;
941
942 put_device(&mhi_chan->mhi_dev->dev);
943 }
944 vfree(mhi_cntrl->mhi_chan);
945
946 device_del(&mhi_dev->dev);
947 put_device(&mhi_dev->dev);
948}
949EXPORT_SYMBOL_GPL(mhi_unregister_controller);
950
3000f85b
MS
951int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
952{
6fdfdd27
MS
953 struct device *dev = &mhi_cntrl->mhi_dev->dev;
954 u32 bhie_off;
3000f85b
MS
955 int ret;
956
957 mutex_lock(&mhi_cntrl->pm_mutex);
958
959 ret = mhi_init_dev_ctxt(mhi_cntrl);
960 if (ret)
961 goto error_dev_ctxt;
962
6fdfdd27
MS
963 /*
964 * Allocate RDDM table if specified, this table is for debugging purpose
965 */
966 if (mhi_cntrl->rddm_size) {
967 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
968 mhi_cntrl->rddm_size);
969
970 /*
971 * This controller supports RDDM, so we need to manually clear
972 * BHIE RX registers since POR values are undefined.
973 */
974 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
975 &bhie_off);
976 if (ret) {
977 dev_err(dev, "Error getting BHIE offset\n");
978 goto bhie_error;
979 }
980
93e17a44
MS
981 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
982 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
6fdfdd27
MS
983 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
984 4);
985
986 if (mhi_cntrl->rddm_image)
987 mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
988 }
989
3000f85b
MS
990 mhi_cntrl->pre_init = true;
991
992 mutex_unlock(&mhi_cntrl->pm_mutex);
993
994 return 0;
995
6fdfdd27
MS
996bhie_error:
997 if (mhi_cntrl->rddm_image) {
998 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
999 mhi_cntrl->rddm_image = NULL;
1000 }
1001
3000f85b
MS
1002error_dev_ctxt:
1003 mutex_unlock(&mhi_cntrl->pm_mutex);
1004
1005 return ret;
1006}
1007EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1008
1009void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1010{
1011 if (mhi_cntrl->fbc_image) {
1012 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1013 mhi_cntrl->fbc_image = NULL;
1014 }
1015
6fdfdd27
MS
1016 if (mhi_cntrl->rddm_image) {
1017 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1018 mhi_cntrl->rddm_image = NULL;
1019 }
1020
3000f85b
MS
1021 mhi_deinit_dev_ctxt(mhi_cntrl);
1022 mhi_cntrl->pre_init = false;
1023}
1024EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1025
0cbf2608
MS
1026static void mhi_release_device(struct device *dev)
1027{
1028 struct mhi_device *mhi_dev = to_mhi_device(dev);
1029
3eb583a6
MS
1030 /*
1031 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1032 * devices for the channels will only get created if the mhi_dev
1033 * associated with it is NULL. This scenario will happen during the
1034 * controller suspend and resume.
1035 */
1036 if (mhi_dev->ul_chan)
1037 mhi_dev->ul_chan->mhi_dev = NULL;
1038
1039 if (mhi_dev->dl_chan)
1040 mhi_dev->dl_chan->mhi_dev = NULL;
1041
0cbf2608
MS
1042 kfree(mhi_dev);
1043}
1044
1045struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1046{
1047 struct mhi_device *mhi_dev;
1048 struct device *dev;
1049
1050 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1051 if (!mhi_dev)
1052 return ERR_PTR(-ENOMEM);
1053
1054 dev = &mhi_dev->dev;
1055 device_initialize(dev);
1056 dev->bus = &mhi_bus_type;
1057 dev->release = mhi_release_device;
1058 dev->parent = mhi_cntrl->cntrl_dev;
1059 mhi_dev->mhi_cntrl = mhi_cntrl;
1060 mhi_dev->dev_wake = 0;
1061
1062 return mhi_dev;
1063}
1064
e755cadb
MS
1065static int mhi_driver_probe(struct device *dev)
1066{
1067 struct mhi_device *mhi_dev = to_mhi_device(dev);
1068 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1069 struct device_driver *drv = dev->driver;
1070 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1071 struct mhi_event *mhi_event;
1072 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1073 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
189ff97c
MS
1074 int ret;
1075
1076 /* Bring device out of LPM */
1077 ret = mhi_device_get_sync(mhi_dev);
1078 if (ret)
1079 return ret;
1080
1081 ret = -EINVAL;
e755cadb
MS
1082
1083 if (ul_chan) {
1084 /*
1085 * If channel supports LPM notifications then status_cb should
1086 * be provided
1087 */
1088 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
189ff97c 1089 goto exit_probe;
e755cadb
MS
1090
1091 /* For non-offload channels then xfer_cb should be provided */
1092 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
189ff97c 1093 goto exit_probe;
e755cadb
MS
1094
1095 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
189ff97c
MS
1096 if (ul_chan->auto_start) {
1097 ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
1098 if (ret)
1099 goto exit_probe;
1100 }
e755cadb
MS
1101 }
1102
1103 if (dl_chan) {
1104 /*
1105 * If channel supports LPM notifications then status_cb should
1106 * be provided
1107 */
1108 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
189ff97c 1109 goto exit_probe;
e755cadb
MS
1110
1111 /* For non-offload channels then xfer_cb should be provided */
1112 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
189ff97c 1113 goto exit_probe;
e755cadb
MS
1114
1115 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1116
1117 /*
1118 * If the channel event ring is managed by client, then
1119 * status_cb must be provided so that the framework can
1120 * notify pending data
1121 */
1122 if (mhi_event->cl_manage && !mhi_drv->status_cb)
189ff97c 1123 goto exit_probe;
e755cadb
MS
1124
1125 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1126 }
1127
1128 /* Call the user provided probe function */
189ff97c
MS
1129 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1130 if (ret)
1131 goto exit_probe;
1132
1133 if (dl_chan && dl_chan->auto_start)
1134 mhi_prepare_channel(mhi_cntrl, dl_chan);
1135
1136 mhi_device_put(mhi_dev);
1137
1138 return ret;
1139
1140exit_probe:
1141 mhi_unprepare_from_transfer(mhi_dev);
1142
1143 mhi_device_put(mhi_dev);
1144
1145 return ret;
e755cadb
MS
1146}
1147
1148static int mhi_driver_remove(struct device *dev)
1149{
1150 struct mhi_device *mhi_dev = to_mhi_device(dev);
1151 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
189ff97c 1152 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
e755cadb
MS
1153 struct mhi_chan *mhi_chan;
1154 enum mhi_ch_state ch_state[] = {
1155 MHI_CH_STATE_DISABLED,
1156 MHI_CH_STATE_DISABLED
1157 };
1158 int dir;
1159
1160 /* Skip if it is a controller device */
1161 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1162 return 0;
1163
1164 /* Reset both channels */
1165 for (dir = 0; dir < 2; dir++) {
1166 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1167
1168 if (!mhi_chan)
1169 continue;
1170
1171 /* Wake all threads waiting for completion */
1172 write_lock_irq(&mhi_chan->lock);
1173 mhi_chan->ccs = MHI_EV_CC_INVALID;
1174 complete_all(&mhi_chan->completion);
1175 write_unlock_irq(&mhi_chan->lock);
1176
1177 /* Set the channel state to disabled */
1178 mutex_lock(&mhi_chan->mutex);
1179 write_lock_irq(&mhi_chan->lock);
1180 ch_state[dir] = mhi_chan->ch_state;
1181 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1182 write_unlock_irq(&mhi_chan->lock);
1183
189ff97c
MS
1184 /* Reset the non-offload channel */
1185 if (!mhi_chan->offload_ch)
1186 mhi_reset_chan(mhi_cntrl, mhi_chan);
1187
e755cadb
MS
1188 mutex_unlock(&mhi_chan->mutex);
1189 }
1190
1191 mhi_drv->remove(mhi_dev);
1192
1193 /* De-init channel if it was enabled */
1194 for (dir = 0; dir < 2; dir++) {
1195 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1196
1197 if (!mhi_chan)
1198 continue;
1199
1200 mutex_lock(&mhi_chan->mutex);
1201
189ff97c
MS
1202 if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
1203 !mhi_chan->offload_ch)
1204 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1205
e755cadb
MS
1206 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1207
1208 mutex_unlock(&mhi_chan->mutex);
1209 }
1210
189ff97c
MS
1211 read_lock_bh(&mhi_cntrl->pm_lock);
1212 while (mhi_dev->dev_wake)
1213 mhi_device_put(mhi_dev);
1214 read_unlock_bh(&mhi_cntrl->pm_lock);
1215
e755cadb
MS
1216 return 0;
1217}
1218
82174738 1219int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
e755cadb
MS
1220{
1221 struct device_driver *driver = &mhi_drv->driver;
1222
1223 if (!mhi_drv->probe || !mhi_drv->remove)
1224 return -EINVAL;
1225
1226 driver->bus = &mhi_bus_type;
82174738 1227 driver->owner = owner;
e755cadb
MS
1228 driver->probe = mhi_driver_probe;
1229 driver->remove = mhi_driver_remove;
1230
1231 return driver_register(driver);
1232}
82174738 1233EXPORT_SYMBOL_GPL(__mhi_driver_register);
e755cadb
MS
1234
1235void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1236{
1237 driver_unregister(&mhi_drv->driver);
1238}
1239EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1240
e6b0de46
MS
1241static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1242{
1243 struct mhi_device *mhi_dev = to_mhi_device(dev);
1244
1245 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1246 mhi_dev->chan_name);
1247}
1248
0cbf2608
MS
1249static int mhi_match(struct device *dev, struct device_driver *drv)
1250{
e755cadb
MS
1251 struct mhi_device *mhi_dev = to_mhi_device(dev);
1252 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1253 const struct mhi_device_id *id;
1254
1255 /*
1256 * If the device is a controller type then there is no client driver
1257 * associated with it
1258 */
1259 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1260 return 0;
1261
1262 for (id = mhi_drv->id_table; id->chan[0]; id++)
1263 if (!strcmp(mhi_dev->chan_name, id->chan)) {
1264 mhi_dev->id = id;
1265 return 1;
1266 }
1267
0cbf2608
MS
1268 return 0;
1269};
1270
1271struct bus_type mhi_bus_type = {
1272 .name = "mhi",
1273 .dev_name = "mhi",
1274 .match = mhi_match,
e6b0de46 1275 .uevent = mhi_uevent,
0cbf2608
MS
1276};
1277
1278static int __init mhi_init(void)
1279{
1280 return bus_register(&mhi_bus_type);
1281}
1282
1283static void __exit mhi_exit(void)
1284{
1285 bus_unregister(&mhi_bus_type);
1286}
1287
1288postcore_initcall(mhi_init);
1289module_exit(mhi_exit);
1290
1291MODULE_LICENSE("GPL v2");
1292MODULE_DESCRIPTION("MHI Host Interface");