return 0;
}
+static void flush_completions_work(struct work_struct *work)
+{
+ struct fw_iso_context *ctx = container_of(work, struct fw_iso_context, work);
+
+ fw_iso_context_flush_completions(ctx);
+}
+
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data)
ctx->header_size = header_size;
ctx->callback.sc = callback;
ctx->callback_data = callback_data;
+ INIT_WORK(&ctx->work, flush_completions_work);
trace_isoc_outbound_allocate(ctx, channel, speed);
trace_isoc_inbound_single_allocate(ctx, channel, header_size);
* to process the context asynchronously, fw_iso_context_schedule_flush_completions() is available
* instead.
*
- * Context: Process context. May sleep due to disable_work_sync().
+ * Context: Process context.
*/
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
- int err;
-
trace_isoc_outbound_flush_completions(ctx);
trace_isoc_inbound_single_flush_completions(ctx);
trace_isoc_inbound_multiple_flush_completions(ctx);
- might_sleep();
-
- // Avoid dead lock due to programming mistake.
- if (WARN_ON_ONCE(current_work() == &ctx->work))
- return 0;
-
- disable_work_sync(&ctx->work);
-
- err = ctx->card->driver->flush_iso_completions(ctx);
-
- enable_work(&ctx->work);
-
- return err;
+ return ctx->card->driver->flush_iso_completions(ctx);
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
}
}
-static void ohci_isoc_context_work(struct work_struct *work)
-{
- struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
- struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
- struct context *ctx = &isoc_ctx->context;
- struct descriptor *d, *last;
- u32 address;
- int z;
- struct descriptor_buffer *desc;
-
- desc = list_entry(ctx->buffer_list.next, struct descriptor_buffer, list);
- last = ctx->last;
- while (last->branch_address != 0) {
- struct descriptor_buffer *old_desc = desc;
-
- address = le32_to_cpu(last->branch_address);
- z = address & 0xf;
- address &= ~0xf;
- ctx->current_bus = address;
-
- // If the branch address points to a buffer outside of the current buffer, advance
- // to the next buffer.
- if (address < desc->buffer_bus || address >= desc->buffer_bus + desc->used)
- desc = list_entry(desc->list.next, struct descriptor_buffer, list);
- d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
- last = find_branch_descriptor(d, z);
-
- if (!ctx->callback(ctx, d, last))
- break;
-
- if (old_desc != desc) {
- // If we've advanced to the next buffer, move the previous buffer to the
- // free list.
- old_desc->used = 0;
- guard(spinlock_irqsave)(&ctx->ohci->lock);
- list_move_tail(&old_desc->list, &ctx->buffer_list);
- }
- ctx->last = last;
- }
-}
-
/*
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
- fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
int ret = 0;
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
- ohci_isoc_context_work(&base->work);
+ // Note that tasklet softIRQ is not used to process isochronous context anymore.
+ context_tasklet((unsigned long)&ctx->context);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT: