]>
git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/staging/tidspbridge/core/chnl_sm.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge driver channel module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
23 * Care is taken in this code to prevent simulataneous access to channel
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
28 * This is done primarily by:
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure offunction sync_wait_on_event.
38 * This invariant condition is:
40 * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset
42 * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set.
45 #include <linux/types.h>
47 /* ----------------------------------- OS */
48 #include <dspbridge/host_os.h>
50 /* ----------------------------------- DSP/BIOS Bridge */
51 #include <dspbridge/dbdefs.h>
53 /* ----------------------------------- Trace & Debug */
54 #include <dspbridge/dbc.h>
56 /* ----------------------------------- OS Adaptation Layer */
57 #include <dspbridge/sync.h>
59 /* ----------------------------------- Bridge Driver */
60 #include <dspbridge/dspdefs.h>
61 #include <dspbridge/dspchnl.h>
64 /* ----------------------------------- Platform Manager */
65 #include <dspbridge/dev.h>
67 /* ----------------------------------- Others */
68 #include <dspbridge/io_sm.h>
70 /* ----------------------------------- Define for This */
71 #define USERMODE_ADDR PAGE_OFFSET
73 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
75 /* ----------------------------------- Function Prototypes */
76 static int create_chirp_list(struct list_head
*list
, u32 chirps
);
78 static void free_chirp_list(struct list_head
*list
);
80 static int search_free_channel(struct chnl_mgr
*chnl_mgr_obj
,
84 * ======== bridge_chnl_add_io_req ========
85 * Enqueue an I/O request for data transfer on a channel to the DSP.
86 * The direction (mode) is specified in the channel object. Note the DSP
87 * address is specified for channels opened in direct I/O mode.
89 int bridge_chnl_add_io_req(struct chnl_object
*chnl_obj
, void *host_buf
,
90 u32 byte_size
, u32 buf_size
,
91 u32 dw_dsp_addr
, u32 dw_arg
)
94 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
95 struct chnl_irp
*chnl_packet_obj
= NULL
;
96 struct bridge_dev_context
*dev_ctxt
;
97 struct dev_object
*dev_obj
;
100 struct chnl_mgr
*chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
101 u8
*host_sys_buf
= NULL
;
102 bool sched_dpc
= false;
105 is_eos
= (byte_size
== 0);
108 if (!host_buf
|| !pchnl
)
111 if (is_eos
&& CHNL_IS_INPUT(pchnl
->chnl_mode
))
115 * Check the channel state: only queue chirp if channel state
118 dw_state
= pchnl
->state
;
119 if (dw_state
!= CHNL_STATEREADY
) {
120 if (dw_state
& CHNL_STATECANCEL
)
122 if ((dw_state
& CHNL_STATEEOS
) &&
123 CHNL_IS_OUTPUT(pchnl
->chnl_mode
))
125 /* No other possible states left */
129 dev_obj
= dev_get_first();
130 dev_get_bridge_context(dev_obj
, &dev_ctxt
);
134 if (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1 && host_buf
) {
135 if (!(host_buf
< (void *)USERMODE_ADDR
)) {
136 host_sys_buf
= host_buf
;
139 /* if addr in user mode, then copy to kernel space */
140 host_sys_buf
= kmalloc(buf_size
, GFP_KERNEL
);
141 if (host_sys_buf
== NULL
)
144 if (CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
145 status
= copy_from_user(host_sys_buf
, host_buf
,
155 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
156 * channels. DPCCS is held to avoid race conditions with PCPY channels.
157 * If DPC is scheduled in process context (iosm_schedule) and any
158 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
159 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
160 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
161 omap_mbox_disable_irq(dev_ctxt
->mbox
, IRQ_RX
);
162 if (pchnl
->chnl_type
== CHNL_PCPY
) {
163 /* This is a processor-copy channel. */
164 if (CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
165 /* Check buffer size on output channels for fit. */
166 if (byte_size
> io_buf_size(
167 pchnl
->chnl_mgr_obj
->iomgr
)) {
174 /* Get a free chirp: */
175 if (list_empty(&pchnl
->free_packets_list
)) {
179 chnl_packet_obj
= list_first_entry(&pchnl
->free_packets_list
,
180 struct chnl_irp
, link
);
181 list_del(&chnl_packet_obj
->link
);
183 /* Enqueue the chirp on the chnl's IORequest queue: */
184 chnl_packet_obj
->host_user_buf
= chnl_packet_obj
->host_sys_buf
=
186 if (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1)
187 chnl_packet_obj
->host_sys_buf
= host_sys_buf
;
190 * Note: for dma chans dw_dsp_addr contains dsp address
193 DBC_ASSERT(chnl_mgr_obj
->word_size
!= 0);
195 chnl_packet_obj
->dsp_tx_addr
= dw_dsp_addr
/ chnl_mgr_obj
->word_size
;
196 chnl_packet_obj
->byte_size
= byte_size
;
197 chnl_packet_obj
->buf_size
= buf_size
;
198 /* Only valid for output channel */
199 chnl_packet_obj
->arg
= dw_arg
;
200 chnl_packet_obj
->status
= (is_eos
? CHNL_IOCSTATEOS
:
201 CHNL_IOCSTATCOMPLETE
);
202 list_add_tail(&chnl_packet_obj
->link
, &pchnl
->io_requests
);
204 DBC_ASSERT(pchnl
->cio_reqs
<= pchnl
->chnl_packets
);
206 * If end of stream, update the channel state to prevent
210 pchnl
->state
|= CHNL_STATEEOS
;
212 /* Legacy DSM Processor-Copy */
213 DBC_ASSERT(pchnl
->chnl_type
== CHNL_PCPY
);
214 /* Request IO from the DSP */
215 io_request_chnl(chnl_mgr_obj
->iomgr
, pchnl
,
216 (CHNL_IS_INPUT(pchnl
->chnl_mode
) ? IO_INPUT
:
217 IO_OUTPUT
), &mb_val
);
220 omap_mbox_enable_irq(dev_ctxt
->mbox
, IRQ_RX
);
221 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
223 sm_interrupt_dsp(dev_ctxt
, mb_val
);
225 /* Schedule a DPC, to do the actual data transfer */
227 iosm_schedule(chnl_mgr_obj
->iomgr
);
233 * ======== bridge_chnl_cancel_io ========
234 * Return all I/O requests to the client which have not yet been
235 * transferred. The channel's I/O completion object is
236 * signalled, and all the I/O requests are queued as IOC's, with the
237 * status field set to CHNL_IOCSTATCANCEL.
238 * This call is typically used in abort situations, and is a prelude to
241 int bridge_chnl_cancel_io(struct chnl_object
*chnl_obj
)
243 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
246 struct chnl_irp
*chirp
, *tmp
;
247 struct chnl_mgr
*chnl_mgr_obj
= NULL
;
250 if (!pchnl
|| !pchnl
->chnl_mgr_obj
)
253 chnl_id
= pchnl
->chnl_id
;
254 chnl_mode
= pchnl
->chnl_mode
;
255 chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
257 /* Mark this channel as cancelled, to prevent further IORequests or
258 * IORequests or dispatching. */
259 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
261 pchnl
->state
|= CHNL_STATECANCEL
;
263 if (list_empty(&pchnl
->io_requests
)) {
264 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
268 if (pchnl
->chnl_type
== CHNL_PCPY
) {
269 /* Indicate we have no more buffers available for transfer: */
270 if (CHNL_IS_INPUT(pchnl
->chnl_mode
)) {
271 io_cancel_chnl(chnl_mgr_obj
->iomgr
, chnl_id
);
273 /* Record that we no longer have output buffers
275 chnl_mgr_obj
->output_mask
&= ~(1 << chnl_id
);
278 /* Move all IOR's to IOC queue: */
279 list_for_each_entry_safe(chirp
, tmp
, &pchnl
->io_requests
, link
) {
280 list_del(&chirp
->link
);
281 chirp
->byte_size
= 0;
282 chirp
->status
|= CHNL_IOCSTATCANCEL
;
283 list_add_tail(&chirp
->link
, &pchnl
->io_completions
);
286 DBC_ASSERT(pchnl
->cio_reqs
>= 0);
289 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
295 * ======== bridge_chnl_close ========
297 * Ensures all pending I/O on this channel is cancelled, discards all
298 * queued I/O completion notifications, then frees the resources allocated
299 * for this channel, and makes the corresponding logical channel id
300 * available for subsequent use.
302 int bridge_chnl_close(struct chnl_object
*chnl_obj
)
305 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
310 /* Cancel IO: this ensures no further IO requests or notifications */
311 status
= bridge_chnl_cancel_io(chnl_obj
);
314 /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
315 DBC_ASSERT((pchnl
->state
& CHNL_STATECANCEL
));
316 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
317 /* Free the slot in the channel manager: */
318 pchnl
->chnl_mgr_obj
->channels
[pchnl
->chnl_id
] = NULL
;
319 spin_lock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
320 pchnl
->chnl_mgr_obj
->open_channels
-= 1;
321 spin_unlock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
322 if (pchnl
->ntfy_obj
) {
323 ntfy_delete(pchnl
->ntfy_obj
);
324 kfree(pchnl
->ntfy_obj
);
325 pchnl
->ntfy_obj
= NULL
;
327 /* Reset channel event: (NOTE: user_event freed in user context) */
328 if (pchnl
->sync_event
) {
329 sync_reset_event(pchnl
->sync_event
);
330 kfree(pchnl
->sync_event
);
331 pchnl
->sync_event
= NULL
;
333 /* Free I/O request and I/O completion queues: */
334 free_chirp_list(&pchnl
->io_completions
);
337 free_chirp_list(&pchnl
->io_requests
);
340 free_chirp_list(&pchnl
->free_packets_list
);
342 /* Release channel object. */
349 * ======== bridge_chnl_create ========
350 * Create a channel manager object, responsible for opening new channels
351 * and closing old ones for a given board.
353 int bridge_chnl_create(struct chnl_mgr
**channel_mgr
,
354 struct dev_object
*hdev_obj
,
355 const struct chnl_mgrattrs
*mgr_attrts
)
358 struct chnl_mgr
*chnl_mgr_obj
= NULL
;
361 /* Check DBC requirements: */
362 DBC_REQUIRE(channel_mgr
!= NULL
);
363 DBC_REQUIRE(mgr_attrts
!= NULL
);
364 DBC_REQUIRE(mgr_attrts
->max_channels
> 0);
365 DBC_REQUIRE(mgr_attrts
->max_channels
<= CHNL_MAXCHANNELS
);
366 DBC_REQUIRE(mgr_attrts
->word_size
!= 0);
368 /* Allocate channel manager object */
369 chnl_mgr_obj
= kzalloc(sizeof(struct chnl_mgr
), GFP_KERNEL
);
372 * The max_channels attr must equal the # of supported chnls for
373 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
374 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
375 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
377 DBC_ASSERT(mgr_attrts
->max_channels
== CHNL_MAXCHANNELS
);
378 max_channels
= CHNL_MAXCHANNELS
+ CHNL_MAXCHANNELS
* CHNL_PCPY
;
379 /* Create array of channels */
380 chnl_mgr_obj
->channels
= kzalloc(sizeof(struct chnl_object
*)
381 * max_channels
, GFP_KERNEL
);
382 if (chnl_mgr_obj
->channels
) {
383 /* Initialize chnl_mgr object */
384 chnl_mgr_obj
->type
= CHNL_TYPESM
;
385 chnl_mgr_obj
->word_size
= mgr_attrts
->word_size
;
386 /* Total # chnls supported */
387 chnl_mgr_obj
->max_channels
= max_channels
;
388 chnl_mgr_obj
->open_channels
= 0;
389 chnl_mgr_obj
->output_mask
= 0;
390 chnl_mgr_obj
->last_output
= 0;
391 chnl_mgr_obj
->dev_obj
= hdev_obj
;
392 spin_lock_init(&chnl_mgr_obj
->chnl_mgr_lock
);
401 bridge_chnl_destroy(chnl_mgr_obj
);
404 /* Return channel manager object to caller... */
405 *channel_mgr
= chnl_mgr_obj
;
411 * ======== bridge_chnl_destroy ========
413 * Close all open channels, and destroy the channel manager.
415 int bridge_chnl_destroy(struct chnl_mgr
*hchnl_mgr
)
418 struct chnl_mgr
*chnl_mgr_obj
= hchnl_mgr
;
422 /* Close all open channels: */
423 for (chnl_id
= 0; chnl_id
< chnl_mgr_obj
->max_channels
;
426 bridge_chnl_close(chnl_mgr_obj
->channels
429 dev_dbg(bridge
, "%s: Error status 0x%x\n",
433 /* Free channel manager object: */
434 kfree(chnl_mgr_obj
->channels
);
436 /* Set hchnl_mgr to NULL in device object. */
437 dev_set_chnl_mgr(chnl_mgr_obj
->dev_obj
, NULL
);
438 /* Free this Chnl Mgr object: */
447 * ======== bridge_chnl_flush_io ========
449 * Flushes all the outstanding data requests on a channel.
451 int bridge_chnl_flush_io(struct chnl_object
*chnl_obj
, u32 timeout
)
454 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
456 struct chnl_mgr
*chnl_mgr_obj
;
457 struct chnl_ioc chnl_ioc_obj
;
460 if ((timeout
== CHNL_IOCNOWAIT
)
461 && CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
464 chnl_mode
= pchnl
->chnl_mode
;
465 chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
471 /* Note: Currently, if another thread continues to add IO
472 * requests to this channel, this function will continue to
473 * flush all such queued IO requests. */
474 if (CHNL_IS_OUTPUT(chnl_mode
)
475 && (pchnl
->chnl_type
== CHNL_PCPY
)) {
476 /* Wait for IO completions, up to the specified
478 while (!list_empty(&pchnl
->io_requests
) && !status
) {
479 status
= bridge_chnl_get_ioc(chnl_obj
,
480 timeout
, &chnl_ioc_obj
);
484 if (chnl_ioc_obj
.status
& CHNL_IOCSTATTIMEOUT
)
489 status
= bridge_chnl_cancel_io(chnl_obj
);
490 /* Now, leave the channel in the ready state: */
491 pchnl
->state
&= ~CHNL_STATECANCEL
;
494 DBC_ENSURE(status
|| list_empty(&pchnl
->io_requests
));
499 * ======== bridge_chnl_get_info ========
501 * Retrieve information related to a channel.
503 int bridge_chnl_get_info(struct chnl_object
*chnl_obj
,
504 struct chnl_info
*channel_info
)
507 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
508 if (channel_info
!= NULL
) {
510 /* Return the requested information: */
511 channel_info
->chnl_mgr
= pchnl
->chnl_mgr_obj
;
512 channel_info
->event_obj
= pchnl
->user_event
;
513 channel_info
->cnhl_id
= pchnl
->chnl_id
;
514 channel_info
->mode
= pchnl
->chnl_mode
;
515 channel_info
->bytes_tx
= pchnl
->bytes_moved
;
516 channel_info
->process
= pchnl
->process
;
517 channel_info
->sync_event
= pchnl
->sync_event
;
518 channel_info
->cio_cs
= pchnl
->cio_cs
;
519 channel_info
->cio_reqs
= pchnl
->cio_reqs
;
520 channel_info
->state
= pchnl
->state
;
531 * ======== bridge_chnl_get_ioc ========
532 * Optionally wait for I/O completion on a channel. Dequeue an I/O
533 * completion record, which contains information about the completed
535 * Note: Ensures Channel Invariant (see notes above).
537 int bridge_chnl_get_ioc(struct chnl_object
*chnl_obj
, u32 timeout
,
538 struct chnl_ioc
*chan_ioc
)
541 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
542 struct chnl_irp
*chnl_packet_obj
;
544 bool dequeue_ioc
= true;
545 struct chnl_ioc ioc
= { NULL
, 0, 0, 0, 0 };
546 u8
*host_sys_buf
= NULL
;
547 struct bridge_dev_context
*dev_ctxt
;
548 struct dev_object
*dev_obj
;
551 if (!chan_ioc
|| !pchnl
) {
553 } else if (timeout
== CHNL_IOCNOWAIT
) {
554 if (list_empty(&pchnl
->io_completions
))
559 dev_obj
= dev_get_first();
560 dev_get_bridge_context(dev_obj
, &dev_ctxt
);
567 ioc
.status
= CHNL_IOCSTATCOMPLETE
;
569 CHNL_IOCNOWAIT
&& list_empty(&pchnl
->io_completions
)) {
570 if (timeout
== CHNL_IOCINFINITE
)
571 timeout
= SYNC_INFINITE
;
573 stat_sync
= sync_wait_on_event(pchnl
->sync_event
, timeout
);
574 if (stat_sync
== -ETIME
) {
575 /* No response from DSP */
576 ioc
.status
|= CHNL_IOCSTATTIMEOUT
;
578 } else if (stat_sync
== -EPERM
) {
579 /* This can occur when the user mode thread is
580 * aborted (^C), or when _VWIN32_WaitSingleObject()
581 * fails due to unknown causes. */
582 /* Even though Wait failed, there may be something in
584 if (list_empty(&pchnl
->io_completions
)) {
585 ioc
.status
|= CHNL_IOCSTATCANCEL
;
590 /* See comment in AddIOReq */
591 spin_lock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
592 omap_mbox_disable_irq(dev_ctxt
->mbox
, IRQ_RX
);
594 /* Dequeue IOC and set chan_ioc; */
595 DBC_ASSERT(!list_empty(&pchnl
->io_completions
));
596 chnl_packet_obj
= list_first_entry(&pchnl
->io_completions
,
597 struct chnl_irp
, link
);
598 list_del(&chnl_packet_obj
->link
);
599 /* Update chan_ioc from channel state and chirp: */
602 * If this is a zero-copy channel, then set IOC's pbuf
603 * to the DSP's address. This DSP address will get
604 * translated to user's virtual addr later.
606 host_sys_buf
= chnl_packet_obj
->host_sys_buf
;
607 ioc
.buf
= chnl_packet_obj
->host_user_buf
;
608 ioc
.byte_size
= chnl_packet_obj
->byte_size
;
609 ioc
.buf_size
= chnl_packet_obj
->buf_size
;
610 ioc
.arg
= chnl_packet_obj
->arg
;
611 ioc
.status
|= chnl_packet_obj
->status
;
612 /* Place the used chirp on the free list: */
613 list_add_tail(&chnl_packet_obj
->link
,
614 &pchnl
->free_packets_list
);
621 /* Ensure invariant: If any IOC's are queued for this channel... */
622 if (!list_empty(&pchnl
->io_completions
)) {
623 /* Since DSPStream_Reclaim() does not take a timeout
624 * parameter, we pass the stream's timeout value to
625 * bridge_chnl_get_ioc. We cannot determine whether or not
626 * we have waited in User mode. Since the stream's timeout
627 * value may be non-zero, we still have to set the event.
628 * Therefore, this optimization is taken out.
630 * if (timeout == CHNL_IOCNOWAIT) {
631 * ... ensure event is set..
632 * sync_set_event(pchnl->sync_event);
634 sync_set_event(pchnl
->sync_event
);
636 /* else, if list is empty, ensure event is reset. */
637 sync_reset_event(pchnl
->sync_event
);
639 omap_mbox_enable_irq(dev_ctxt
->mbox
, IRQ_RX
);
640 spin_unlock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
642 && (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1)) {
643 if (!(ioc
.buf
< (void *)USERMODE_ADDR
))
646 /* If the addr is in user mode, then copy it */
647 if (!host_sys_buf
|| !ioc
.buf
) {
651 if (!CHNL_IS_INPUT(pchnl
->chnl_mode
))
655 status
= copy_to_user(ioc
.buf
, host_sys_buf
, ioc
.byte_size
);
657 if (current
->flags
& PF_EXITING
)
666 /* Update User's IOC block: */
673 * ======== bridge_chnl_get_mgr_info ========
674 * Retrieve information related to the channel manager.
676 int bridge_chnl_get_mgr_info(struct chnl_mgr
*hchnl_mgr
, u32 ch_id
,
677 struct chnl_mgrinfo
*mgr_info
)
679 struct chnl_mgr
*chnl_mgr_obj
= (struct chnl_mgr
*)hchnl_mgr
;
681 if (!mgr_info
|| !hchnl_mgr
)
684 if (ch_id
> CHNL_MAXCHANNELS
)
687 /* Return the requested information: */
688 mgr_info
->chnl_obj
= chnl_mgr_obj
->channels
[ch_id
];
689 mgr_info
->open_channels
= chnl_mgr_obj
->open_channels
;
690 mgr_info
->type
= chnl_mgr_obj
->type
;
691 /* total # of chnls */
692 mgr_info
->max_channels
= chnl_mgr_obj
->max_channels
;
698 * ======== bridge_chnl_idle ========
699 * Idles a particular channel.
701 int bridge_chnl_idle(struct chnl_object
*chnl_obj
, u32 timeout
,
705 struct chnl_mgr
*chnl_mgr_obj
;
708 DBC_REQUIRE(chnl_obj
);
710 chnl_mode
= chnl_obj
->chnl_mode
;
711 chnl_mgr_obj
= chnl_obj
->chnl_mgr_obj
;
713 if (CHNL_IS_OUTPUT(chnl_mode
) && !flush_data
) {
714 /* Wait for IO completions, up to the specified timeout: */
715 status
= bridge_chnl_flush_io(chnl_obj
, timeout
);
717 status
= bridge_chnl_cancel_io(chnl_obj
);
719 /* Reset the byte count and put channel back in ready state. */
720 chnl_obj
->bytes_moved
= 0;
721 chnl_obj
->state
&= ~CHNL_STATECANCEL
;
728 * ======== bridge_chnl_open ========
729 * Open a new half-duplex channel to the DSP board.
731 int bridge_chnl_open(struct chnl_object
**chnl
,
732 struct chnl_mgr
*hchnl_mgr
, s8 chnl_mode
,
733 u32 ch_id
, const struct chnl_attr
*pattrs
)
736 struct chnl_mgr
*chnl_mgr_obj
= hchnl_mgr
;
737 struct chnl_object
*pchnl
= NULL
;
738 struct sync_object
*sync_event
= NULL
;
739 /* Ensure DBC requirements: */
740 DBC_REQUIRE(chnl
!= NULL
);
741 DBC_REQUIRE(pattrs
!= NULL
);
742 DBC_REQUIRE(hchnl_mgr
!= NULL
);
746 if (!pattrs
->uio_reqs
)
752 if (ch_id
!= CHNL_PICKFREE
) {
753 if (ch_id
>= chnl_mgr_obj
->max_channels
)
755 if (chnl_mgr_obj
->channels
[ch_id
] != NULL
)
758 /* Check for free channel */
759 status
= search_free_channel(chnl_mgr_obj
, &ch_id
);
764 DBC_ASSERT(ch_id
< chnl_mgr_obj
->max_channels
);
766 /* Create channel object: */
767 pchnl
= kzalloc(sizeof(struct chnl_object
), GFP_KERNEL
);
771 /* Protect queues from io_dpc: */
772 pchnl
->state
= CHNL_STATECANCEL
;
774 /* Allocate initial IOR and IOC queues: */
775 status
= create_chirp_list(&pchnl
->free_packets_list
,
780 INIT_LIST_HEAD(&pchnl
->io_requests
);
781 INIT_LIST_HEAD(&pchnl
->io_completions
);
783 pchnl
->chnl_packets
= pattrs
->uio_reqs
;
787 sync_event
= kzalloc(sizeof(struct sync_object
), GFP_KERNEL
);
792 sync_init_event(sync_event
);
794 pchnl
->ntfy_obj
= kmalloc(sizeof(struct ntfy_object
), GFP_KERNEL
);
795 if (!pchnl
->ntfy_obj
) {
799 ntfy_init(pchnl
->ntfy_obj
);
801 /* Initialize CHNL object fields: */
802 pchnl
->chnl_mgr_obj
= chnl_mgr_obj
;
803 pchnl
->chnl_id
= ch_id
;
804 pchnl
->chnl_mode
= chnl_mode
;
805 pchnl
->user_event
= sync_event
;
806 pchnl
->sync_event
= sync_event
;
807 /* Get the process handle */
808 pchnl
->process
= current
->tgid
;
810 pchnl
->bytes_moved
= 0;
811 /* Default to proc-copy */
812 pchnl
->chnl_type
= CHNL_PCPY
;
814 /* Insert channel object in channel manager: */
815 chnl_mgr_obj
->channels
[pchnl
->chnl_id
] = pchnl
;
816 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
817 chnl_mgr_obj
->open_channels
++;
818 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
819 /* Return result... */
820 pchnl
->state
= CHNL_STATEREADY
;
827 free_chirp_list(&pchnl
->io_completions
);
828 free_chirp_list(&pchnl
->io_requests
);
829 free_chirp_list(&pchnl
->free_packets_list
);
833 if (pchnl
->ntfy_obj
) {
834 ntfy_delete(pchnl
->ntfy_obj
);
835 kfree(pchnl
->ntfy_obj
);
836 pchnl
->ntfy_obj
= NULL
;
844 * ======== bridge_chnl_register_notify ========
845 * Registers for events on a particular channel.
847 int bridge_chnl_register_notify(struct chnl_object
*chnl_obj
,
848 u32 event_mask
, u32 notify_type
,
849 struct dsp_notification
*hnotification
)
853 DBC_ASSERT(!(event_mask
& ~(DSP_STREAMDONE
| DSP_STREAMIOCOMPLETION
)));
856 status
= ntfy_register(chnl_obj
->ntfy_obj
, hnotification
,
857 event_mask
, notify_type
);
859 status
= ntfy_unregister(chnl_obj
->ntfy_obj
, hnotification
);
865 * ======== create_chirp_list ========
867 * Initialize a queue of channel I/O Request/Completion packets.
869 * list: Pointer to a list_head
870 * chirps: Number of Chirps to allocate.
872 * 0 if successful, error code otherwise.
876 static int create_chirp_list(struct list_head
*list
, u32 chirps
)
878 struct chnl_irp
*chirp
;
881 INIT_LIST_HEAD(list
);
883 /* Make N chirps and place on queue. */
884 for (i
= 0; i
< chirps
; i
++) {
885 chirp
= kzalloc(sizeof(struct chnl_irp
), GFP_KERNEL
);
888 list_add_tail(&chirp
->link
, list
);
891 /* If we couldn't allocate all chirps, free those allocated: */
893 free_chirp_list(list
);
901 * ======== free_chirp_list ========
903 * Free the queue of Chirps.
905 static void free_chirp_list(struct list_head
*chirp_list
)
907 struct chnl_irp
*chirp
, *tmp
;
909 DBC_REQUIRE(chirp_list
!= NULL
);
911 list_for_each_entry_safe(chirp
, tmp
, chirp_list
, link
) {
912 list_del(&chirp
->link
);
918 * ======== search_free_channel ========
919 * Search for a free channel slot in the array of channel pointers.
921 static int search_free_channel(struct chnl_mgr
*chnl_mgr_obj
,
927 DBC_REQUIRE(chnl_mgr_obj
);
929 for (i
= 0; i
< chnl_mgr_obj
->max_channels
; i
++) {
930 if (chnl_mgr_obj
->channels
[i
] == NULL
) {