struct iio_dma_buffer_block *block, *_block;
LIST_HEAD(block_list);
- spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
- list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
- spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
+ scoped_guard(spinlock_irq, &iio_dma_buffer_dead_blocks_lock)
+ list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
list_for_each_entry_safe(block, _block, &block_list, head)
iio_buffer_block_release(&block->kref);
static void iio_buffer_block_release_atomic(struct kref *kref)
{
struct iio_dma_buffer_block *block;
- unsigned long flags;
block = container_of(kref, struct iio_dma_buffer_block, kref);
- spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
- list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
- spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
+ scoped_guard(spinlock_irqsave, &iio_dma_buffer_dead_blocks_lock)
+ list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
schedule_work(&iio_dma_buffer_cleanup_work);
}
static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
{
- struct iio_dma_buffer_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
+ struct iio_dma_buffer_block *block __free(kfree) =
+ kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
if (fileio) {
block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
&block->phys_addr, GFP_KERNEL);
- if (!block->vaddr) {
- kfree(block);
+ if (!block->vaddr)
return NULL;
- }
}
block->fileio = fileio;
if (!fileio)
atomic_inc(&queue->num_dmabufs);
- return block;
+ return_ptr(block);
}
static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
{
struct iio_dma_buffer_queue *queue = block->queue;
- unsigned long flags;
bool cookie;
cookie = dma_fence_begin_signalling();
- spin_lock_irqsave(&queue->list_lock, flags);
- _iio_dma_buffer_block_done(block);
- spin_unlock_irqrestore(&queue->list_lock, flags);
+ scoped_guard(spinlock_irqsave, &queue->list_lock)
+ _iio_dma_buffer_block_done(block);
if (!block->fileio)
iio_buffer_signal_dmabuf_done(block->fence, 0);
struct list_head *list)
{
struct iio_dma_buffer_block *block, *_block;
- unsigned long flags;
bool cookie;
cookie = dma_fence_begin_signalling();
- spin_lock_irqsave(&queue->list_lock, flags);
- list_for_each_entry_safe(block, _block, list, head) {
- list_del(&block->head);
- block->bytes_used = 0;
- _iio_dma_buffer_block_done(block);
+ scoped_guard(spinlock_irqsave, &queue->list_lock) {
+ list_for_each_entry_safe(block, _block, list, head) {
+ list_del(&block->head);
+ block->bytes_used = 0;
+ _iio_dma_buffer_block_done(block);
- if (!block->fileio)
- iio_buffer_signal_dmabuf_done(block->fence, -EINTR);
- iio_buffer_block_put_atomic(block);
+ if (!block->fileio)
+ iio_buffer_signal_dmabuf_done(block->fence,
+ -EINTR);
+ iio_buffer_block_put_atomic(block);
+ }
}
- spin_unlock_irqrestore(&queue->list_lock, flags);
if (queue->fileio.enabled)
queue->fileio.enabled = false;
struct iio_dma_buffer_block *block;
bool try_reuse = false;
size_t size;
- int ret = 0;
int i;
/*
size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
queue->buffer.length, 2);
- mutex_lock(&queue->lock);
+ guard(mutex)(&queue->lock);
queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue);
/* If DMABUFs were created, disable fileio interface */
if (!queue->fileio.enabled)
- goto out_unlock;
+ return 0;
/* Allocations are page aligned */
if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
queue->fileio.block_size = size;
queue->fileio.active_block = NULL;
- spin_lock_irq(&queue->list_lock);
- for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
- block = queue->fileio.blocks[i];
+ scoped_guard(spinlock_irq, &queue->list_lock) {
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+ block = queue->fileio.blocks[i];
- /* If we can't re-use it free it */
- if (block && (!iio_dma_block_reusable(block) || !try_reuse))
- block->state = IIO_BLOCK_STATE_DEAD;
- }
+ /* If we can't re-use it free it */
+ if (block && (!iio_dma_block_reusable(block) || !try_reuse))
+ block->state = IIO_BLOCK_STATE_DEAD;
+ }
- /*
- * At this point all blocks are either owned by the core or marked as
- * dead. This means we can reset the lists without having to fear
- * corrution.
- */
- spin_unlock_irq(&queue->list_lock);
+ /*
+ * At this point all blocks are either owned by the core or
+ * marked as dead. This means we can reset the lists without
+ * having to fear corruption.
+ */
+ }
INIT_LIST_HEAD(&queue->incoming);
if (!block) {
block = iio_dma_buffer_alloc_block(queue, size, true);
- if (!block) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (!block)
+ return -ENOMEM;
+
queue->fileio.blocks[i] = block;
}
}
}
-out_unlock:
- mutex_unlock(&queue->lock);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_request_update, "IIO_DMA_BUFFER");
{
unsigned int i;
- spin_lock_irq(&queue->list_lock);
- for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
- if (!queue->fileio.blocks[i])
- continue;
- queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
+ scoped_guard(spinlock_irq, &queue->list_lock) {
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+ if (!queue->fileio.blocks[i])
+ continue;
+ queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
+ }
}
- spin_unlock_irq(&queue->list_lock);
INIT_LIST_HEAD(&queue->incoming);
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
struct iio_dma_buffer_block *block, *_block;
- mutex_lock(&queue->lock);
+ guard(mutex)(&queue->lock);
queue->active = true;
list_for_each_entry_safe(block, _block, &queue->incoming, head) {
list_del(&block->head);
iio_dma_buffer_submit_block(queue, block);
}
- mutex_unlock(&queue->lock);
return 0;
}
{
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
- mutex_lock(&queue->lock);
+ guard(mutex)(&queue->lock);
queue->active = false;
if (queue->ops && queue->ops->abort)
queue->ops->abort(queue);
- mutex_unlock(&queue->lock);
return 0;
}
struct iio_dma_buffer_block *block;
unsigned int idx;
- spin_lock_irq(&queue->list_lock);
+ guard(spinlock_irq)(&queue->list_lock);
idx = queue->fileio.next_dequeue;
block = queue->fileio.blocks[idx];
- if (block->state == IIO_BLOCK_STATE_DONE) {
- idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
- queue->fileio.next_dequeue = idx;
- } else {
- block = NULL;
- }
+ if (block->state != IIO_BLOCK_STATE_DONE)
+ return NULL;
- spin_unlock_irq(&queue->list_lock);
+ idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
+ queue->fileio.next_dequeue = idx;
return block;
}
if (n < buffer->bytes_per_datum)
return -EINVAL;
- mutex_lock(&queue->lock);
+ guard(mutex)(&queue->lock);
if (!queue->fileio.active_block) {
block = iio_dma_buffer_dequeue(queue);
- if (block == NULL) {
- ret = 0;
- goto out_unlock;
- }
+ if (!block)
+ return 0;
+
queue->fileio.pos = 0;
queue->fileio.active_block = block;
} else {
ret = copy_from_user(addr, user_buffer, n);
else
ret = copy_to_user(user_buffer, addr, n);
- if (ret) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ if (ret)
+ return -EFAULT;
queue->fileio.pos += n;
iio_dma_buffer_enqueue(queue, block);
}
- ret = n;
-
-out_unlock:
- mutex_unlock(&queue->lock);
-
- return ret;
+ return n;
}
/**
* but won't increase since all blocks are in use.
*/
- mutex_lock(&queue->lock);
+ guard(mutex)(&queue->lock);
if (queue->fileio.active_block)
data_available += queue->fileio.active_block->size;
- spin_lock_irq(&queue->list_lock);
+ guard(spinlock_irq)(&queue->list_lock);
for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
block = queue->fileio.blocks[i];
data_available += block->size;
}
- spin_unlock_irq(&queue->list_lock);
- mutex_unlock(&queue->lock);
-
return data_available;
}
EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_usage, "IIO_DMA_BUFFER");
*/
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
{
- mutex_lock(&queue->lock);
+ guard(mutex)(&queue->lock);
iio_dma_buffer_fileio_free(queue);
queue->ops = NULL;
-
- mutex_unlock(&queue->lock);
}
EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_exit, "IIO_DMA_BUFFER");