return container_of(buf, struct iio_dma_buffer_queue, buffer);
}
-static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
- struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
+static struct iio_dma_buffer_block *
+iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue *queue, size_t size,
+ bool fileio)
{
struct iio_dma_buffer_block *block __free(kfree) =
kzalloc(sizeof(*block), GFP_KERNEL);
* hand the blocks back to the queue.
*/
void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
- struct list_head *list)
+ struct list_head *list)
{
struct iio_dma_buffer_block *block, *_block;
bool cookie;
}
static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
- struct iio_dma_buffer_block *block)
+ struct iio_dma_buffer_block *block)
{
int ret;
*
* This will allocate the DMA buffers and start the DMA transfers.
*/
-int iio_dma_buffer_enable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev)
+int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev)
{
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
struct iio_dma_buffer_block *block, *_block;
* Needs to be called when the device that the buffer is attached to stops
* sampling. Typically should be the iio_buffer_access_ops disable callback.
*/
-int iio_dma_buffer_disable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev)
+int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev)
{
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, "IIO_DMA_BUFFER");
static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
- struct iio_dma_buffer_block *block)
+ struct iio_dma_buffer_block *block)
{
if (block->state == IIO_BLOCK_STATE_DEAD) {
iio_buffer_block_put(block);
}
}
-static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
- struct iio_dma_buffer_queue *queue)
+static struct iio_dma_buffer_block *
+iio_dma_buffer_dequeue(struct iio_dma_buffer_queue *queue)
{
struct iio_dma_buffer_block *block;
unsigned int idx;
for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
block = queue->fileio.blocks[i];
- if (block != queue->fileio.active_block
- && block->state == IIO_BLOCK_STATE_DONE)
+ if (block != queue->fileio.active_block && block->state == IIO_BLOCK_STATE_DONE)
data_available += block->size;
}
struct device *dev;
const struct iio_dma_buffer_ops *ops;
+ /*
+ * A mutex to protect accessing, configuring (eg: enqueuing DMA blocks)
+ * and do file IO on struct iio_dma_buffer_queue objects.
+ */
struct mutex lock;
+ /* A spin lock to protect adding/removing blocks to the queue list */
spinlock_t list_lock;
struct list_head incoming;
*/
struct iio_dma_buffer_ops {
int (*submit)(struct iio_dma_buffer_queue *queue,
- struct iio_dma_buffer_block *block);
+ struct iio_dma_buffer_block *block);
void (*abort)(struct iio_dma_buffer_queue *queue);
};
void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
- struct list_head *list);
+ struct list_head *list);
-int iio_dma_buffer_enable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev);
+int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev);
int iio_dma_buffer_disable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev);
+ struct iio_dev *indio_dev);
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
- char __user *user_buffer);
+ char __user *user_buffer);
int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
const char __user *user_buffer);
size_t iio_dma_buffer_usage(struct iio_buffer *buffer);