1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/wait.h>
20 const char * const mhi_ee_str
[MHI_EE_MAX
] = {
23 [MHI_EE_AMSS
] = "AMSS",
24 [MHI_EE_RDDM
] = "RDDM",
26 [MHI_EE_PTHRU
] = "PASS THRU",
28 [MHI_EE_DISABLE_TRANSITION
] = "DISABLE",
29 [MHI_EE_NOT_SUPPORTED
] = "NOT SUPPORTED",
32 const char * const dev_state_tran_str
[DEV_ST_TRANSITION_MAX
] = {
33 [DEV_ST_TRANSITION_PBL
] = "PBL",
34 [DEV_ST_TRANSITION_READY
] = "READY",
35 [DEV_ST_TRANSITION_SBL
] = "SBL",
36 [DEV_ST_TRANSITION_MISSION_MODE
] = "MISSION_MODE",
39 const char * const mhi_state_str
[MHI_STATE_MAX
] = {
40 [MHI_STATE_RESET
] = "RESET",
41 [MHI_STATE_READY
] = "READY",
42 [MHI_STATE_M0
] = "M0",
43 [MHI_STATE_M1
] = "M1",
44 [MHI_STATE_M2
] = "M2",
45 [MHI_STATE_M3
] = "M3",
46 [MHI_STATE_M3_FAST
] = "M3_FAST",
47 [MHI_STATE_BHI
] = "BHI",
48 [MHI_STATE_SYS_ERR
] = "SYS_ERR",
51 static const char * const mhi_pm_state_str
[] = {
52 [MHI_PM_STATE_DISABLE
] = "DISABLE",
53 [MHI_PM_STATE_POR
] = "POR",
54 [MHI_PM_STATE_M0
] = "M0",
55 [MHI_PM_STATE_M2
] = "M2",
56 [MHI_PM_STATE_M3_ENTER
] = "M?->M3",
57 [MHI_PM_STATE_M3
] = "M3",
58 [MHI_PM_STATE_M3_EXIT
] = "M3->M0",
59 [MHI_PM_STATE_FW_DL_ERR
] = "FW DL Error",
60 [MHI_PM_STATE_SYS_ERR_DETECT
] = "SYS_ERR Detect",
61 [MHI_PM_STATE_SYS_ERR_PROCESS
] = "SYS_ERR Process",
62 [MHI_PM_STATE_SHUTDOWN_PROCESS
] = "SHUTDOWN Process",
63 [MHI_PM_STATE_LD_ERR_FATAL_DETECT
] = "LD or Error Fatal Detect",
66 const char *to_mhi_pm_state_str(enum mhi_pm_state state
)
68 int index
= find_last_bit((unsigned long *)&state
, 32);
70 if (index
>= ARRAY_SIZE(mhi_pm_state_str
))
71 return "Invalid State";
73 return mhi_pm_state_str
[index
];
76 /* MHI protocol requires the transfer ring to be aligned with ring length */
77 static int mhi_alloc_aligned_ring(struct mhi_controller
*mhi_cntrl
,
78 struct mhi_ring
*ring
,
81 ring
->alloc_size
= len
+ (len
- 1);
82 ring
->pre_aligned
= mhi_alloc_coherent(mhi_cntrl
, ring
->alloc_size
,
83 &ring
->dma_handle
, GFP_KERNEL
);
84 if (!ring
->pre_aligned
)
87 ring
->iommu_base
= (ring
->dma_handle
+ (len
- 1)) & ~(len
- 1);
88 ring
->base
= ring
->pre_aligned
+ (ring
->iommu_base
- ring
->dma_handle
);
93 void mhi_deinit_free_irq(struct mhi_controller
*mhi_cntrl
)
96 struct mhi_event
*mhi_event
= mhi_cntrl
->mhi_event
;
98 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
99 if (mhi_event
->offload_ev
)
102 free_irq(mhi_cntrl
->irq
[mhi_event
->irq
], mhi_event
);
105 free_irq(mhi_cntrl
->irq
[0], mhi_cntrl
);
108 int mhi_init_irq_setup(struct mhi_controller
*mhi_cntrl
)
110 struct mhi_event
*mhi_event
= mhi_cntrl
->mhi_event
;
111 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
114 /* Setup BHI_INTVEC IRQ */
115 ret
= request_threaded_irq(mhi_cntrl
->irq
[0], mhi_intvec_handler
,
116 mhi_intvec_threaded_handler
,
117 IRQF_SHARED
| IRQF_NO_SUSPEND
,
122 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
123 if (mhi_event
->offload_ev
)
126 ret
= request_irq(mhi_cntrl
->irq
[mhi_event
->irq
],
128 IRQF_SHARED
| IRQF_NO_SUSPEND
,
131 dev_err(dev
, "Error requesting irq:%d for ev:%d\n",
132 mhi_cntrl
->irq
[mhi_event
->irq
], i
);
140 for (--i
, --mhi_event
; i
>= 0; i
--, mhi_event
--) {
141 if (mhi_event
->offload_ev
)
144 free_irq(mhi_cntrl
->irq
[mhi_event
->irq
], mhi_event
);
146 free_irq(mhi_cntrl
->irq
[0], mhi_cntrl
);
151 void mhi_deinit_dev_ctxt(struct mhi_controller
*mhi_cntrl
)
154 struct mhi_ctxt
*mhi_ctxt
= mhi_cntrl
->mhi_ctxt
;
155 struct mhi_cmd
*mhi_cmd
;
156 struct mhi_event
*mhi_event
;
157 struct mhi_ring
*ring
;
159 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
160 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++) {
161 ring
= &mhi_cmd
->ring
;
162 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
163 ring
->pre_aligned
, ring
->dma_handle
);
165 ring
->iommu_base
= 0;
168 mhi_free_coherent(mhi_cntrl
,
169 sizeof(*mhi_ctxt
->cmd_ctxt
) * NR_OF_CMD_RINGS
,
170 mhi_ctxt
->cmd_ctxt
, mhi_ctxt
->cmd_ctxt_addr
);
172 mhi_event
= mhi_cntrl
->mhi_event
;
173 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
174 if (mhi_event
->offload_ev
)
177 ring
= &mhi_event
->ring
;
178 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
179 ring
->pre_aligned
, ring
->dma_handle
);
181 ring
->iommu_base
= 0;
184 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->er_ctxt
) *
185 mhi_cntrl
->total_ev_rings
, mhi_ctxt
->er_ctxt
,
186 mhi_ctxt
->er_ctxt_addr
);
188 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->chan_ctxt
) *
189 mhi_cntrl
->max_chan
, mhi_ctxt
->chan_ctxt
,
190 mhi_ctxt
->chan_ctxt_addr
);
193 mhi_cntrl
->mhi_ctxt
= NULL
;
196 int mhi_init_dev_ctxt(struct mhi_controller
*mhi_cntrl
)
198 struct mhi_ctxt
*mhi_ctxt
;
199 struct mhi_chan_ctxt
*chan_ctxt
;
200 struct mhi_event_ctxt
*er_ctxt
;
201 struct mhi_cmd_ctxt
*cmd_ctxt
;
202 struct mhi_chan
*mhi_chan
;
203 struct mhi_event
*mhi_event
;
204 struct mhi_cmd
*mhi_cmd
;
206 int ret
= -ENOMEM
, i
;
208 atomic_set(&mhi_cntrl
->dev_wake
, 0);
209 atomic_set(&mhi_cntrl
->pending_pkts
, 0);
211 mhi_ctxt
= kzalloc(sizeof(*mhi_ctxt
), GFP_KERNEL
);
215 /* Setup channel ctxt */
216 mhi_ctxt
->chan_ctxt
= mhi_alloc_coherent(mhi_cntrl
,
217 sizeof(*mhi_ctxt
->chan_ctxt
) *
219 &mhi_ctxt
->chan_ctxt_addr
,
221 if (!mhi_ctxt
->chan_ctxt
)
222 goto error_alloc_chan_ctxt
;
224 mhi_chan
= mhi_cntrl
->mhi_chan
;
225 chan_ctxt
= mhi_ctxt
->chan_ctxt
;
226 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, chan_ctxt
++, mhi_chan
++) {
227 /* Skip if it is an offload channel */
228 if (mhi_chan
->offload_ch
)
231 tmp
= chan_ctxt
->chcfg
;
232 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
233 tmp
|= (MHI_CH_STATE_DISABLED
<< CHAN_CTX_CHSTATE_SHIFT
);
234 tmp
&= ~CHAN_CTX_BRSTMODE_MASK
;
235 tmp
|= (mhi_chan
->db_cfg
.brstmode
<< CHAN_CTX_BRSTMODE_SHIFT
);
236 tmp
&= ~CHAN_CTX_POLLCFG_MASK
;
237 tmp
|= (mhi_chan
->db_cfg
.pollcfg
<< CHAN_CTX_POLLCFG_SHIFT
);
238 chan_ctxt
->chcfg
= tmp
;
240 chan_ctxt
->chtype
= mhi_chan
->type
;
241 chan_ctxt
->erindex
= mhi_chan
->er_index
;
243 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
244 mhi_chan
->tre_ring
.db_addr
= (void __iomem
*)&chan_ctxt
->wp
;
247 /* Setup event context */
248 mhi_ctxt
->er_ctxt
= mhi_alloc_coherent(mhi_cntrl
,
249 sizeof(*mhi_ctxt
->er_ctxt
) *
250 mhi_cntrl
->total_ev_rings
,
251 &mhi_ctxt
->er_ctxt_addr
,
253 if (!mhi_ctxt
->er_ctxt
)
254 goto error_alloc_er_ctxt
;
256 er_ctxt
= mhi_ctxt
->er_ctxt
;
257 mhi_event
= mhi_cntrl
->mhi_event
;
258 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, er_ctxt
++,
260 struct mhi_ring
*ring
= &mhi_event
->ring
;
262 /* Skip if it is an offload event */
263 if (mhi_event
->offload_ev
)
266 tmp
= er_ctxt
->intmod
;
267 tmp
&= ~EV_CTX_INTMODC_MASK
;
268 tmp
&= ~EV_CTX_INTMODT_MASK
;
269 tmp
|= (mhi_event
->intmod
<< EV_CTX_INTMODT_SHIFT
);
270 er_ctxt
->intmod
= tmp
;
272 er_ctxt
->ertype
= MHI_ER_TYPE_VALID
;
273 er_ctxt
->msivec
= mhi_event
->irq
;
274 mhi_event
->db_cfg
.db_mode
= true;
276 ring
->el_size
= sizeof(struct mhi_tre
);
277 ring
->len
= ring
->el_size
* ring
->elements
;
278 ret
= mhi_alloc_aligned_ring(mhi_cntrl
, ring
, ring
->len
);
283 * If the read pointer equals to the write pointer, then the
286 ring
->rp
= ring
->wp
= ring
->base
;
287 er_ctxt
->rbase
= ring
->iommu_base
;
288 er_ctxt
->rp
= er_ctxt
->wp
= er_ctxt
->rbase
;
289 er_ctxt
->rlen
= ring
->len
;
290 ring
->ctxt_wp
= &er_ctxt
->wp
;
293 /* Setup cmd context */
294 mhi_ctxt
->cmd_ctxt
= mhi_alloc_coherent(mhi_cntrl
,
295 sizeof(*mhi_ctxt
->cmd_ctxt
) *
297 &mhi_ctxt
->cmd_ctxt_addr
,
299 if (!mhi_ctxt
->cmd_ctxt
)
302 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
303 cmd_ctxt
= mhi_ctxt
->cmd_ctxt
;
304 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++, cmd_ctxt
++) {
305 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
307 ring
->el_size
= sizeof(struct mhi_tre
);
308 ring
->elements
= CMD_EL_PER_RING
;
309 ring
->len
= ring
->el_size
* ring
->elements
;
310 ret
= mhi_alloc_aligned_ring(mhi_cntrl
, ring
, ring
->len
);
312 goto error_alloc_cmd
;
314 ring
->rp
= ring
->wp
= ring
->base
;
315 cmd_ctxt
->rbase
= ring
->iommu_base
;
316 cmd_ctxt
->rp
= cmd_ctxt
->wp
= cmd_ctxt
->rbase
;
317 cmd_ctxt
->rlen
= ring
->len
;
318 ring
->ctxt_wp
= &cmd_ctxt
->wp
;
321 mhi_cntrl
->mhi_ctxt
= mhi_ctxt
;
326 for (--i
, --mhi_cmd
; i
>= 0; i
--, mhi_cmd
--) {
327 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
329 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
330 ring
->pre_aligned
, ring
->dma_handle
);
332 mhi_free_coherent(mhi_cntrl
,
333 sizeof(*mhi_ctxt
->cmd_ctxt
) * NR_OF_CMD_RINGS
,
334 mhi_ctxt
->cmd_ctxt
, mhi_ctxt
->cmd_ctxt_addr
);
335 i
= mhi_cntrl
->total_ev_rings
;
336 mhi_event
= mhi_cntrl
->mhi_event
+ i
;
339 for (--i
, --mhi_event
; i
>= 0; i
--, mhi_event
--) {
340 struct mhi_ring
*ring
= &mhi_event
->ring
;
342 if (mhi_event
->offload_ev
)
345 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
346 ring
->pre_aligned
, ring
->dma_handle
);
348 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->er_ctxt
) *
349 mhi_cntrl
->total_ev_rings
, mhi_ctxt
->er_ctxt
,
350 mhi_ctxt
->er_ctxt_addr
);
353 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->chan_ctxt
) *
354 mhi_cntrl
->max_chan
, mhi_ctxt
->chan_ctxt
,
355 mhi_ctxt
->chan_ctxt_addr
);
357 error_alloc_chan_ctxt
:
363 int mhi_init_mmio(struct mhi_controller
*mhi_cntrl
)
367 struct mhi_chan
*mhi_chan
;
368 struct mhi_event
*mhi_event
;
369 void __iomem
*base
= mhi_cntrl
->regs
;
370 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
378 CCABAP_HIGHER
, U32_MAX
, 0,
379 upper_32_bits(mhi_cntrl
->mhi_ctxt
->chan_ctxt_addr
),
382 CCABAP_LOWER
, U32_MAX
, 0,
383 lower_32_bits(mhi_cntrl
->mhi_ctxt
->chan_ctxt_addr
),
386 ECABAP_HIGHER
, U32_MAX
, 0,
387 upper_32_bits(mhi_cntrl
->mhi_ctxt
->er_ctxt_addr
),
390 ECABAP_LOWER
, U32_MAX
, 0,
391 lower_32_bits(mhi_cntrl
->mhi_ctxt
->er_ctxt_addr
),
394 CRCBAP_HIGHER
, U32_MAX
, 0,
395 upper_32_bits(mhi_cntrl
->mhi_ctxt
->cmd_ctxt_addr
),
398 CRCBAP_LOWER
, U32_MAX
, 0,
399 lower_32_bits(mhi_cntrl
->mhi_ctxt
->cmd_ctxt_addr
),
402 MHICFG
, MHICFG_NER_MASK
, MHICFG_NER_SHIFT
,
403 mhi_cntrl
->total_ev_rings
,
406 MHICFG
, MHICFG_NHWER_MASK
, MHICFG_NHWER_SHIFT
,
407 mhi_cntrl
->hw_ev_rings
,
410 MHICTRLBASE_HIGHER
, U32_MAX
, 0,
411 upper_32_bits(mhi_cntrl
->iova_start
),
414 MHICTRLBASE_LOWER
, U32_MAX
, 0,
415 lower_32_bits(mhi_cntrl
->iova_start
),
418 MHIDATABASE_HIGHER
, U32_MAX
, 0,
419 upper_32_bits(mhi_cntrl
->iova_start
),
422 MHIDATABASE_LOWER
, U32_MAX
, 0,
423 lower_32_bits(mhi_cntrl
->iova_start
),
426 MHICTRLLIMIT_HIGHER
, U32_MAX
, 0,
427 upper_32_bits(mhi_cntrl
->iova_stop
),
430 MHICTRLLIMIT_LOWER
, U32_MAX
, 0,
431 lower_32_bits(mhi_cntrl
->iova_stop
),
434 MHIDATALIMIT_HIGHER
, U32_MAX
, 0,
435 upper_32_bits(mhi_cntrl
->iova_stop
),
438 MHIDATALIMIT_LOWER
, U32_MAX
, 0,
439 lower_32_bits(mhi_cntrl
->iova_stop
),
444 dev_dbg(dev
, "Initializing MHI registers\n");
446 /* Read channel db offset */
447 ret
= mhi_read_reg_field(mhi_cntrl
, base
, CHDBOFF
, CHDBOFF_CHDBOFF_MASK
,
448 CHDBOFF_CHDBOFF_SHIFT
, &val
);
450 dev_err(dev
, "Unable to read CHDBOFF register\n");
455 mhi_cntrl
->wake_db
= base
+ val
+ (8 * MHI_DEV_WAKE_DB
);
456 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->wake_db
, 4, 0);
457 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->wake_db
, 0, 0);
458 mhi_cntrl
->wake_set
= false;
460 /* Setup channel db address for each channel in tre_ring */
461 mhi_chan
= mhi_cntrl
->mhi_chan
;
462 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, val
+= 8, mhi_chan
++)
463 mhi_chan
->tre_ring
.db_addr
= base
+ val
;
465 /* Read event ring db offset */
466 ret
= mhi_read_reg_field(mhi_cntrl
, base
, ERDBOFF
, ERDBOFF_ERDBOFF_MASK
,
467 ERDBOFF_ERDBOFF_SHIFT
, &val
);
469 dev_err(dev
, "Unable to read ERDBOFF register\n");
473 /* Setup event db address for each ev_ring */
474 mhi_event
= mhi_cntrl
->mhi_event
;
475 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, val
+= 8, mhi_event
++) {
476 if (mhi_event
->offload_ev
)
479 mhi_event
->ring
.db_addr
= base
+ val
;
482 /* Setup DB register for primary CMD rings */
483 mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
].ring
.db_addr
= base
+ CRDB_LOWER
;
485 /* Write to MMIO registers */
486 for (i
= 0; reg_info
[i
].offset
; i
++)
487 mhi_write_reg_field(mhi_cntrl
, base
, reg_info
[i
].offset
,
488 reg_info
[i
].mask
, reg_info
[i
].shift
,
494 void mhi_deinit_chan_ctxt(struct mhi_controller
*mhi_cntrl
,
495 struct mhi_chan
*mhi_chan
)
497 struct mhi_ring
*buf_ring
;
498 struct mhi_ring
*tre_ring
;
499 struct mhi_chan_ctxt
*chan_ctxt
;
501 buf_ring
= &mhi_chan
->buf_ring
;
502 tre_ring
= &mhi_chan
->tre_ring
;
503 chan_ctxt
= &mhi_cntrl
->mhi_ctxt
->chan_ctxt
[mhi_chan
->chan
];
505 mhi_free_coherent(mhi_cntrl
, tre_ring
->alloc_size
,
506 tre_ring
->pre_aligned
, tre_ring
->dma_handle
);
507 vfree(buf_ring
->base
);
509 buf_ring
->base
= tre_ring
->base
= NULL
;
510 chan_ctxt
->rbase
= 0;
513 int mhi_init_chan_ctxt(struct mhi_controller
*mhi_cntrl
,
514 struct mhi_chan
*mhi_chan
)
516 struct mhi_ring
*buf_ring
;
517 struct mhi_ring
*tre_ring
;
518 struct mhi_chan_ctxt
*chan_ctxt
;
522 buf_ring
= &mhi_chan
->buf_ring
;
523 tre_ring
= &mhi_chan
->tre_ring
;
524 tre_ring
->el_size
= sizeof(struct mhi_tre
);
525 tre_ring
->len
= tre_ring
->el_size
* tre_ring
->elements
;
526 chan_ctxt
= &mhi_cntrl
->mhi_ctxt
->chan_ctxt
[mhi_chan
->chan
];
527 ret
= mhi_alloc_aligned_ring(mhi_cntrl
, tre_ring
, tre_ring
->len
);
531 buf_ring
->el_size
= sizeof(struct mhi_buf_info
);
532 buf_ring
->len
= buf_ring
->el_size
* buf_ring
->elements
;
533 buf_ring
->base
= vzalloc(buf_ring
->len
);
535 if (!buf_ring
->base
) {
536 mhi_free_coherent(mhi_cntrl
, tre_ring
->alloc_size
,
537 tre_ring
->pre_aligned
, tre_ring
->dma_handle
);
541 tmp
= chan_ctxt
->chcfg
;
542 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
543 tmp
|= (MHI_CH_STATE_ENABLED
<< CHAN_CTX_CHSTATE_SHIFT
);
544 chan_ctxt
->chcfg
= tmp
;
546 chan_ctxt
->rbase
= tre_ring
->iommu_base
;
547 chan_ctxt
->rp
= chan_ctxt
->wp
= chan_ctxt
->rbase
;
548 chan_ctxt
->rlen
= tre_ring
->len
;
549 tre_ring
->ctxt_wp
= &chan_ctxt
->wp
;
551 tre_ring
->rp
= tre_ring
->wp
= tre_ring
->base
;
552 buf_ring
->rp
= buf_ring
->wp
= buf_ring
->base
;
553 mhi_chan
->db_cfg
.db_mode
= 1;
555 /* Update to all cores */
561 static int parse_ev_cfg(struct mhi_controller
*mhi_cntrl
,
562 struct mhi_controller_config
*config
)
564 struct mhi_event
*mhi_event
;
565 struct mhi_event_config
*event_cfg
;
566 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
569 num
= config
->num_events
;
570 mhi_cntrl
->total_ev_rings
= num
;
571 mhi_cntrl
->mhi_event
= kcalloc(num
, sizeof(*mhi_cntrl
->mhi_event
),
573 if (!mhi_cntrl
->mhi_event
)
576 /* Populate event ring */
577 mhi_event
= mhi_cntrl
->mhi_event
;
578 for (i
= 0; i
< num
; i
++) {
579 event_cfg
= &config
->event_cfg
[i
];
581 mhi_event
->er_index
= i
;
582 mhi_event
->ring
.elements
= event_cfg
->num_elements
;
583 mhi_event
->intmod
= event_cfg
->irq_moderation_ms
;
584 mhi_event
->irq
= event_cfg
->irq
;
586 if (event_cfg
->channel
!= U32_MAX
) {
587 /* This event ring has a dedicated channel */
588 mhi_event
->chan
= event_cfg
->channel
;
589 if (mhi_event
->chan
>= mhi_cntrl
->max_chan
) {
591 "Event Ring channel not available\n");
595 mhi_event
->mhi_chan
=
596 &mhi_cntrl
->mhi_chan
[mhi_event
->chan
];
599 /* Priority is fixed to 1 for now */
600 mhi_event
->priority
= 1;
602 mhi_event
->db_cfg
.brstmode
= event_cfg
->mode
;
603 if (MHI_INVALID_BRSTMODE(mhi_event
->db_cfg
.brstmode
))
606 if (mhi_event
->db_cfg
.brstmode
== MHI_DB_BRST_ENABLE
)
607 mhi_event
->db_cfg
.process_db
= mhi_db_brstmode
;
609 mhi_event
->db_cfg
.process_db
= mhi_db_brstmode_disable
;
611 mhi_event
->data_type
= event_cfg
->data_type
;
613 switch (mhi_event
->data_type
) {
615 mhi_event
->process_event
= mhi_process_data_event_ring
;
618 mhi_event
->process_event
= mhi_process_ctrl_ev_ring
;
621 dev_err(dev
, "Event Ring type not supported\n");
625 mhi_event
->hw_ring
= event_cfg
->hardware_event
;
626 if (mhi_event
->hw_ring
)
627 mhi_cntrl
->hw_ev_rings
++;
629 mhi_cntrl
->sw_ev_rings
++;
631 mhi_event
->cl_manage
= event_cfg
->client_managed
;
632 mhi_event
->offload_ev
= event_cfg
->offload_channel
;
636 /* We need IRQ for each event ring + additional one for BHI */
637 mhi_cntrl
->nr_irqs_req
= mhi_cntrl
->total_ev_rings
+ 1;
643 kfree(mhi_cntrl
->mhi_event
);
647 static int parse_ch_cfg(struct mhi_controller
*mhi_cntrl
,
648 struct mhi_controller_config
*config
)
650 struct mhi_channel_config
*ch_cfg
;
651 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
655 mhi_cntrl
->max_chan
= config
->max_channels
;
658 * The allocation of MHI channels can exceed 32KB in some scenarios,
659 * so to avoid any memory possible allocation failures, vzalloc is
662 mhi_cntrl
->mhi_chan
= vzalloc(mhi_cntrl
->max_chan
*
663 sizeof(*mhi_cntrl
->mhi_chan
));
664 if (!mhi_cntrl
->mhi_chan
)
667 INIT_LIST_HEAD(&mhi_cntrl
->lpm_chans
);
669 /* Populate channel configurations */
670 for (i
= 0; i
< config
->num_channels
; i
++) {
671 struct mhi_chan
*mhi_chan
;
673 ch_cfg
= &config
->ch_cfg
[i
];
676 if (chan
>= mhi_cntrl
->max_chan
) {
677 dev_err(dev
, "Channel %d not available\n", chan
);
681 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
682 mhi_chan
->name
= ch_cfg
->name
;
683 mhi_chan
->chan
= chan
;
685 mhi_chan
->tre_ring
.elements
= ch_cfg
->num_elements
;
686 if (!mhi_chan
->tre_ring
.elements
)
690 * For some channels, local ring length should be bigger than
691 * the transfer ring length due to internal logical channels
692 * in device. So host can queue much more buffers than transfer
693 * ring length. Example, RSC channels should have a larger local
694 * channel length than transfer ring length.
696 mhi_chan
->buf_ring
.elements
= ch_cfg
->local_elements
;
697 if (!mhi_chan
->buf_ring
.elements
)
698 mhi_chan
->buf_ring
.elements
= mhi_chan
->tre_ring
.elements
;
699 mhi_chan
->er_index
= ch_cfg
->event_ring
;
700 mhi_chan
->dir
= ch_cfg
->dir
;
703 * For most channels, chtype is identical to channel directions.
704 * So, if it is not defined then assign channel direction to
707 mhi_chan
->type
= ch_cfg
->type
;
709 mhi_chan
->type
= (enum mhi_ch_type
)mhi_chan
->dir
;
711 mhi_chan
->ee_mask
= ch_cfg
->ee_mask
;
712 mhi_chan
->db_cfg
.pollcfg
= ch_cfg
->pollcfg
;
713 mhi_chan
->lpm_notify
= ch_cfg
->lpm_notify
;
714 mhi_chan
->offload_ch
= ch_cfg
->offload_channel
;
715 mhi_chan
->db_cfg
.reset_req
= ch_cfg
->doorbell_mode_switch
;
716 mhi_chan
->pre_alloc
= ch_cfg
->auto_queue
;
717 mhi_chan
->auto_start
= ch_cfg
->auto_start
;
720 * If MHI host allocates buffers, then the channel direction
721 * should be DMA_FROM_DEVICE
723 if (mhi_chan
->pre_alloc
&& mhi_chan
->dir
!= DMA_FROM_DEVICE
) {
724 dev_err(dev
, "Invalid channel configuration\n");
729 * Bi-directional and direction less channel must be an
732 if ((mhi_chan
->dir
== DMA_BIDIRECTIONAL
||
733 mhi_chan
->dir
== DMA_NONE
) && !mhi_chan
->offload_ch
) {
734 dev_err(dev
, "Invalid channel configuration\n");
738 if (!mhi_chan
->offload_ch
) {
739 mhi_chan
->db_cfg
.brstmode
= ch_cfg
->doorbell
;
740 if (MHI_INVALID_BRSTMODE(mhi_chan
->db_cfg
.brstmode
)) {
741 dev_err(dev
, "Invalid Door bell mode\n");
746 if (mhi_chan
->db_cfg
.brstmode
== MHI_DB_BRST_ENABLE
)
747 mhi_chan
->db_cfg
.process_db
= mhi_db_brstmode
;
749 mhi_chan
->db_cfg
.process_db
= mhi_db_brstmode_disable
;
751 mhi_chan
->configured
= true;
753 if (mhi_chan
->lpm_notify
)
754 list_add_tail(&mhi_chan
->node
, &mhi_cntrl
->lpm_chans
);
760 vfree(mhi_cntrl
->mhi_chan
);
765 static int parse_config(struct mhi_controller
*mhi_cntrl
,
766 struct mhi_controller_config
*config
)
770 /* Parse MHI channel configuration */
771 ret
= parse_ch_cfg(mhi_cntrl
, config
);
775 /* Parse MHI event configuration */
776 ret
= parse_ev_cfg(mhi_cntrl
, config
);
780 mhi_cntrl
->timeout_ms
= config
->timeout_ms
;
781 if (!mhi_cntrl
->timeout_ms
)
782 mhi_cntrl
->timeout_ms
= MHI_TIMEOUT_MS
;
784 mhi_cntrl
->bounce_buf
= config
->use_bounce_buf
;
785 mhi_cntrl
->buffer_len
= config
->buf_len
;
786 if (!mhi_cntrl
->buffer_len
)
787 mhi_cntrl
->buffer_len
= MHI_MAX_MTU
;
789 /* By default, host is allowed to ring DB in both M0 and M2 states */
790 mhi_cntrl
->db_access
= MHI_PM_M0
| MHI_PM_M2
;
791 if (config
->m2_no_db
)
792 mhi_cntrl
->db_access
&= ~MHI_PM_M2
;
797 vfree(mhi_cntrl
->mhi_chan
);
802 int mhi_register_controller(struct mhi_controller
*mhi_cntrl
,
803 struct mhi_controller_config
*config
)
805 struct mhi_event
*mhi_event
;
806 struct mhi_chan
*mhi_chan
;
807 struct mhi_cmd
*mhi_cmd
;
808 struct mhi_device
*mhi_dev
;
815 if (!mhi_cntrl
->runtime_get
|| !mhi_cntrl
->runtime_put
)
818 if (!mhi_cntrl
->status_cb
|| !mhi_cntrl
->link_status
)
821 ret
= parse_config(mhi_cntrl
, config
);
825 mhi_cntrl
->mhi_cmd
= kcalloc(NR_OF_CMD_RINGS
,
826 sizeof(*mhi_cntrl
->mhi_cmd
), GFP_KERNEL
);
827 if (!mhi_cntrl
->mhi_cmd
) {
829 goto error_alloc_cmd
;
832 INIT_LIST_HEAD(&mhi_cntrl
->transition_list
);
833 mutex_init(&mhi_cntrl
->pm_mutex
);
834 rwlock_init(&mhi_cntrl
->pm_lock
);
835 spin_lock_init(&mhi_cntrl
->transition_lock
);
836 spin_lock_init(&mhi_cntrl
->wlock
);
837 INIT_WORK(&mhi_cntrl
->st_worker
, mhi_pm_st_worker
);
838 INIT_WORK(&mhi_cntrl
->syserr_worker
, mhi_pm_sys_err_worker
);
839 INIT_WORK(&mhi_cntrl
->fw_worker
, mhi_fw_load_worker
);
840 init_waitqueue_head(&mhi_cntrl
->state_event
);
842 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
843 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++)
844 spin_lock_init(&mhi_cmd
->lock
);
846 mhi_event
= mhi_cntrl
->mhi_event
;
847 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
848 /* Skip for offload events */
849 if (mhi_event
->offload_ev
)
852 mhi_event
->mhi_cntrl
= mhi_cntrl
;
853 spin_lock_init(&mhi_event
->lock
);
854 if (mhi_event
->data_type
== MHI_ER_CTRL
)
855 tasklet_init(&mhi_event
->task
, mhi_ctrl_ev_task
,
858 tasklet_init(&mhi_event
->task
, mhi_ev_task
,
862 mhi_chan
= mhi_cntrl
->mhi_chan
;
863 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
864 mutex_init(&mhi_chan
->mutex
);
865 init_completion(&mhi_chan
->completion
);
866 rwlock_init(&mhi_chan
->lock
);
869 if (mhi_cntrl
->bounce_buf
) {
870 mhi_cntrl
->map_single
= mhi_map_single_use_bb
;
871 mhi_cntrl
->unmap_single
= mhi_unmap_single_use_bb
;
873 mhi_cntrl
->map_single
= mhi_map_single_no_bb
;
874 mhi_cntrl
->unmap_single
= mhi_unmap_single_no_bb
;
877 /* Read the MHI device info */
878 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
,
879 SOC_HW_VERSION_OFFS
, &soc_info
);
881 goto error_alloc_dev
;
883 mhi_cntrl
->family_number
= (soc_info
& SOC_HW_VERSION_FAM_NUM_BMSK
) >>
884 SOC_HW_VERSION_FAM_NUM_SHFT
;
885 mhi_cntrl
->device_number
= (soc_info
& SOC_HW_VERSION_DEV_NUM_BMSK
) >>
886 SOC_HW_VERSION_DEV_NUM_SHFT
;
887 mhi_cntrl
->major_version
= (soc_info
& SOC_HW_VERSION_MAJOR_VER_BMSK
) >>
888 SOC_HW_VERSION_MAJOR_VER_SHFT
;
889 mhi_cntrl
->minor_version
= (soc_info
& SOC_HW_VERSION_MINOR_VER_BMSK
) >>
890 SOC_HW_VERSION_MINOR_VER_SHFT
;
892 /* Register controller with MHI bus */
893 mhi_dev
= mhi_alloc_device(mhi_cntrl
);
894 if (IS_ERR(mhi_dev
)) {
895 dev_err(mhi_cntrl
->cntrl_dev
, "Failed to allocate MHI device\n");
896 ret
= PTR_ERR(mhi_dev
);
897 goto error_alloc_dev
;
900 mhi_dev
->dev_type
= MHI_DEVICE_CONTROLLER
;
901 mhi_dev
->mhi_cntrl
= mhi_cntrl
;
902 dev_set_name(&mhi_dev
->dev
, "%s", dev_name(mhi_cntrl
->cntrl_dev
));
904 /* Init wakeup source */
905 device_init_wakeup(&mhi_dev
->dev
, true);
907 ret
= device_add(&mhi_dev
->dev
);
911 mhi_cntrl
->mhi_dev
= mhi_dev
;
916 put_device(&mhi_dev
->dev
);
919 kfree(mhi_cntrl
->mhi_cmd
);
922 vfree(mhi_cntrl
->mhi_chan
);
923 kfree(mhi_cntrl
->mhi_event
);
927 EXPORT_SYMBOL_GPL(mhi_register_controller
);
929 void mhi_unregister_controller(struct mhi_controller
*mhi_cntrl
)
931 struct mhi_device
*mhi_dev
= mhi_cntrl
->mhi_dev
;
932 struct mhi_chan
*mhi_chan
= mhi_cntrl
->mhi_chan
;
935 kfree(mhi_cntrl
->mhi_cmd
);
936 kfree(mhi_cntrl
->mhi_event
);
938 /* Drop the references to MHI devices created for channels */
939 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
940 if (!mhi_chan
->mhi_dev
)
943 put_device(&mhi_chan
->mhi_dev
->dev
);
945 vfree(mhi_cntrl
->mhi_chan
);
947 device_del(&mhi_dev
->dev
);
948 put_device(&mhi_dev
->dev
);
950 EXPORT_SYMBOL_GPL(mhi_unregister_controller
);
952 int mhi_prepare_for_power_up(struct mhi_controller
*mhi_cntrl
)
954 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
958 mutex_lock(&mhi_cntrl
->pm_mutex
);
960 ret
= mhi_init_dev_ctxt(mhi_cntrl
);
965 * Allocate RDDM table if specified, this table is for debugging purpose
967 if (mhi_cntrl
->rddm_size
) {
968 mhi_alloc_bhie_table(mhi_cntrl
, &mhi_cntrl
->rddm_image
,
969 mhi_cntrl
->rddm_size
);
972 * This controller supports RDDM, so we need to manually clear
973 * BHIE RX registers since POR values are undefined.
975 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
, BHIEOFF
,
978 dev_err(dev
, "Error getting BHIE offset\n");
982 mhi_cntrl
->bhie
= mhi_cntrl
->regs
+ bhie_off
;
983 memset_io(mhi_cntrl
->bhie
+ BHIE_RXVECADDR_LOW_OFFS
,
984 0, BHIE_RXVECSTATUS_OFFS
- BHIE_RXVECADDR_LOW_OFFS
+
987 if (mhi_cntrl
->rddm_image
)
988 mhi_rddm_prepare(mhi_cntrl
, mhi_cntrl
->rddm_image
);
991 mhi_cntrl
->pre_init
= true;
993 mutex_unlock(&mhi_cntrl
->pm_mutex
);
998 if (mhi_cntrl
->rddm_image
) {
999 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->rddm_image
);
1000 mhi_cntrl
->rddm_image
= NULL
;
1004 mutex_unlock(&mhi_cntrl
->pm_mutex
);
1008 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up
);
1010 void mhi_unprepare_after_power_down(struct mhi_controller
*mhi_cntrl
)
1012 if (mhi_cntrl
->fbc_image
) {
1013 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->fbc_image
);
1014 mhi_cntrl
->fbc_image
= NULL
;
1017 if (mhi_cntrl
->rddm_image
) {
1018 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->rddm_image
);
1019 mhi_cntrl
->rddm_image
= NULL
;
1022 mhi_deinit_dev_ctxt(mhi_cntrl
);
1023 mhi_cntrl
->pre_init
= false;
1025 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down
);
1027 static void mhi_release_device(struct device
*dev
)
1029 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1032 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1033 * devices for the channels will only get created if the mhi_dev
1034 * associated with it is NULL. This scenario will happen during the
1035 * controller suspend and resume.
1037 if (mhi_dev
->ul_chan
)
1038 mhi_dev
->ul_chan
->mhi_dev
= NULL
;
1040 if (mhi_dev
->dl_chan
)
1041 mhi_dev
->dl_chan
->mhi_dev
= NULL
;
1046 struct mhi_device
*mhi_alloc_device(struct mhi_controller
*mhi_cntrl
)
1048 struct mhi_device
*mhi_dev
;
1051 mhi_dev
= kzalloc(sizeof(*mhi_dev
), GFP_KERNEL
);
1053 return ERR_PTR(-ENOMEM
);
1055 dev
= &mhi_dev
->dev
;
1056 device_initialize(dev
);
1057 dev
->bus
= &mhi_bus_type
;
1058 dev
->release
= mhi_release_device
;
1059 dev
->parent
= mhi_cntrl
->cntrl_dev
;
1060 mhi_dev
->mhi_cntrl
= mhi_cntrl
;
1061 mhi_dev
->dev_wake
= 0;
1066 static int mhi_driver_probe(struct device
*dev
)
1068 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1069 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1070 struct device_driver
*drv
= dev
->driver
;
1071 struct mhi_driver
*mhi_drv
= to_mhi_driver(drv
);
1072 struct mhi_event
*mhi_event
;
1073 struct mhi_chan
*ul_chan
= mhi_dev
->ul_chan
;
1074 struct mhi_chan
*dl_chan
= mhi_dev
->dl_chan
;
1077 /* Bring device out of LPM */
1078 ret
= mhi_device_get_sync(mhi_dev
);
1086 * If channel supports LPM notifications then status_cb should
1089 if (ul_chan
->lpm_notify
&& !mhi_drv
->status_cb
)
1092 /* For non-offload channels then xfer_cb should be provided */
1093 if (!ul_chan
->offload_ch
&& !mhi_drv
->ul_xfer_cb
)
1096 ul_chan
->xfer_cb
= mhi_drv
->ul_xfer_cb
;
1097 if (ul_chan
->auto_start
) {
1098 ret
= mhi_prepare_channel(mhi_cntrl
, ul_chan
);
1106 * If channel supports LPM notifications then status_cb should
1109 if (dl_chan
->lpm_notify
&& !mhi_drv
->status_cb
)
1112 /* For non-offload channels then xfer_cb should be provided */
1113 if (!dl_chan
->offload_ch
&& !mhi_drv
->dl_xfer_cb
)
1116 mhi_event
= &mhi_cntrl
->mhi_event
[dl_chan
->er_index
];
1119 * If the channel event ring is managed by client, then
1120 * status_cb must be provided so that the framework can
1121 * notify pending data
1123 if (mhi_event
->cl_manage
&& !mhi_drv
->status_cb
)
1126 dl_chan
->xfer_cb
= mhi_drv
->dl_xfer_cb
;
1129 /* Call the user provided probe function */
1130 ret
= mhi_drv
->probe(mhi_dev
, mhi_dev
->id
);
1134 if (dl_chan
&& dl_chan
->auto_start
)
1135 mhi_prepare_channel(mhi_cntrl
, dl_chan
);
1137 mhi_device_put(mhi_dev
);
1142 mhi_unprepare_from_transfer(mhi_dev
);
1144 mhi_device_put(mhi_dev
);
1149 static int mhi_driver_remove(struct device
*dev
)
1151 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1152 struct mhi_driver
*mhi_drv
= to_mhi_driver(dev
->driver
);
1153 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1154 struct mhi_chan
*mhi_chan
;
1155 enum mhi_ch_state ch_state
[] = {
1156 MHI_CH_STATE_DISABLED
,
1157 MHI_CH_STATE_DISABLED
1161 /* Skip if it is a controller device */
1162 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1165 /* Reset both channels */
1166 for (dir
= 0; dir
< 2; dir
++) {
1167 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1172 /* Wake all threads waiting for completion */
1173 write_lock_irq(&mhi_chan
->lock
);
1174 mhi_chan
->ccs
= MHI_EV_CC_INVALID
;
1175 complete_all(&mhi_chan
->completion
);
1176 write_unlock_irq(&mhi_chan
->lock
);
1178 /* Set the channel state to disabled */
1179 mutex_lock(&mhi_chan
->mutex
);
1180 write_lock_irq(&mhi_chan
->lock
);
1181 ch_state
[dir
] = mhi_chan
->ch_state
;
1182 mhi_chan
->ch_state
= MHI_CH_STATE_SUSPENDED
;
1183 write_unlock_irq(&mhi_chan
->lock
);
1185 /* Reset the non-offload channel */
1186 if (!mhi_chan
->offload_ch
)
1187 mhi_reset_chan(mhi_cntrl
, mhi_chan
);
1189 mutex_unlock(&mhi_chan
->mutex
);
1192 mhi_drv
->remove(mhi_dev
);
1194 /* De-init channel if it was enabled */
1195 for (dir
= 0; dir
< 2; dir
++) {
1196 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1201 mutex_lock(&mhi_chan
->mutex
);
1203 if (ch_state
[dir
] == MHI_CH_STATE_ENABLED
&&
1204 !mhi_chan
->offload_ch
)
1205 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1207 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
1209 mutex_unlock(&mhi_chan
->mutex
);
1212 read_lock_bh(&mhi_cntrl
->pm_lock
);
1213 while (mhi_dev
->dev_wake
)
1214 mhi_device_put(mhi_dev
);
1215 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1220 int __mhi_driver_register(struct mhi_driver
*mhi_drv
, struct module
*owner
)
1222 struct device_driver
*driver
= &mhi_drv
->driver
;
1224 if (!mhi_drv
->probe
|| !mhi_drv
->remove
)
1227 driver
->bus
= &mhi_bus_type
;
1228 driver
->owner
= owner
;
1229 driver
->probe
= mhi_driver_probe
;
1230 driver
->remove
= mhi_driver_remove
;
1232 return driver_register(driver
);
1234 EXPORT_SYMBOL_GPL(__mhi_driver_register
);
1236 void mhi_driver_unregister(struct mhi_driver
*mhi_drv
)
1238 driver_unregister(&mhi_drv
->driver
);
1240 EXPORT_SYMBOL_GPL(mhi_driver_unregister
);
1242 static int mhi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1244 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1246 return add_uevent_var(env
, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT
,
1247 mhi_dev
->chan_name
);
1250 static int mhi_match(struct device
*dev
, struct device_driver
*drv
)
1252 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1253 struct mhi_driver
*mhi_drv
= to_mhi_driver(drv
);
1254 const struct mhi_device_id
*id
;
1257 * If the device is a controller type then there is no client driver
1258 * associated with it
1260 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1263 for (id
= mhi_drv
->id_table
; id
->chan
[0]; id
++)
1264 if (!strcmp(mhi_dev
->chan_name
, id
->chan
)) {
1272 struct bus_type mhi_bus_type
= {
1276 .uevent
= mhi_uevent
,
1279 static int __init
mhi_init(void)
1281 return bus_register(&mhi_bus_type
);
1284 static void __exit
mhi_exit(void)
1286 bus_unregister(&mhi_bus_type
);
1289 postcore_initcall(mhi_init
);
1290 module_exit(mhi_exit
);
1292 MODULE_LICENSE("GPL v2");
1293 MODULE_DESCRIPTION("MHI Host Interface");