]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iio: buffer-dma: Fix coding style complains
authorNuno Sá <nuno.sa@analog.com>
Fri, 19 Dec 2025 15:28:15 +0000 (15:28 +0000)
committerJonathan Cameron <Jonathan.Cameron@huawei.com>
Sun, 21 Dec 2025 12:00:27 +0000 (12:00 +0000)
Just making sure checkpatch is happy. No functional change intended.

Signed-off-by: Nuno Sá <nuno.sa@analog.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
drivers/iio/buffer/industrialio-buffer-dma.c
include/linux/iio/buffer-dma.h

index ed187bb23708eafd51556e49cd9d1eef2060d3e2..1c94b334f98732e101be3c71f182e57c206140ad 100644 (file)
@@ -169,8 +169,9 @@ static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
        return container_of(buf, struct iio_dma_buffer_queue, buffer);
 }
 
-static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
-       struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
+static struct iio_dma_buffer_block *
+iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue *queue, size_t size,
+                          bool fileio)
 {
        struct iio_dma_buffer_block *block __free(kfree) =
                        kzalloc(sizeof(*block), GFP_KERNEL);
@@ -254,7 +255,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_done, "IIO_DMA_BUFFER");
  * hand the blocks back to the queue.
  */
 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
-       struct list_head *list)
+                                    struct list_head *list)
 {
        struct iio_dma_buffer_block *block, *_block;
        bool cookie;
@@ -434,7 +435,7 @@ static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
 }
 
 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
-       struct iio_dma_buffer_block *block)
+                                       struct iio_dma_buffer_block *block)
 {
        int ret;
 
@@ -478,8 +479,7 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
  *
  * This will allocate the DMA buffers and start the DMA transfers.
  */
-int iio_dma_buffer_enable(struct iio_buffer *buffer,
-       struct iio_dev *indio_dev)
+int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev)
 {
        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
        struct iio_dma_buffer_block *block, *_block;
@@ -503,8 +503,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enable, "IIO_DMA_BUFFER");
  * Needs to be called when the device that the buffer is attached to stops
  * sampling. Typically should be the iio_buffer_access_ops disable callback.
  */
-int iio_dma_buffer_disable(struct iio_buffer *buffer,
-       struct iio_dev *indio_dev)
+int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev)
 {
        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 
@@ -519,7 +518,7 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer,
 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, "IIO_DMA_BUFFER");
 
 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
-       struct iio_dma_buffer_block *block)
+                                  struct iio_dma_buffer_block *block)
 {
        if (block->state == IIO_BLOCK_STATE_DEAD) {
                iio_buffer_block_put(block);
@@ -531,8 +530,8 @@ static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
        }
 }
 
-static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
-       struct iio_dma_buffer_queue *queue)
+static struct iio_dma_buffer_block *
+iio_dma_buffer_dequeue(struct iio_dma_buffer_queue *queue)
 {
        struct iio_dma_buffer_block *block;
        unsigned int idx;
@@ -661,8 +660,7 @@ size_t iio_dma_buffer_usage(struct iio_buffer *buf)
        for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
                block = queue->fileio.blocks[i];
 
-               if (block != queue->fileio.active_block
-                   && block->state == IIO_BLOCK_STATE_DONE)
+               if (block != queue->fileio.active_block && block->state == IIO_BLOCK_STATE_DONE)
                        data_available += block->size;
        }
 
index cd2ba4bb75015b7c2d492f6c7f7b74e37d622ce0..ef8687a88b73302adb695da0c6931d6cb3987d56 100644 (file)
@@ -119,7 +119,12 @@ struct iio_dma_buffer_queue {
        struct device *dev;
        const struct iio_dma_buffer_ops *ops;
 
+       /*
+        * A mutex to protect accessing, configuring (eg: enqueuing DMA blocks)
+        * and do file IO on struct iio_dma_buffer_queue objects.
+        */
        struct mutex lock;
+       /* A spin lock to protect adding/removing blocks to the queue list */
        spinlock_t list_lock;
        struct list_head incoming;
 
@@ -136,20 +141,19 @@ struct iio_dma_buffer_queue {
  */
 struct iio_dma_buffer_ops {
        int (*submit)(struct iio_dma_buffer_queue *queue,
-               struct iio_dma_buffer_block *block);
+                     struct iio_dma_buffer_block *block);
        void (*abort)(struct iio_dma_buffer_queue *queue);
 };
 
 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
-       struct list_head *list);
+                                    struct list_head *list);
 
-int iio_dma_buffer_enable(struct iio_buffer *buffer,
-       struct iio_dev *indio_dev);
+int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev);
 int iio_dma_buffer_disable(struct iio_buffer *buffer,
-       struct iio_dev *indio_dev);
+                          struct iio_dev *indio_dev);
 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
-       char __user *user_buffer);
+                       char __user *user_buffer);
 int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
                         const char __user *user_buffer);
 size_t iio_dma_buffer_usage(struct iio_buffer *buffer);