return 0;
}
+static struct device *iio_buffer_get_dma_dev(const struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ if (buffer->access->get_dma_dev)
+ return buffer->access->get_dma_dev(buffer);
+
+ return indio_dev->dev.parent;
+}
+
static struct dma_buf_attachment *
iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
struct dma_buf *dmabuf, bool nonblock)
{
- struct device *dev = ib->indio_dev->dev.parent;
struct iio_buffer *buffer = ib->buffer;
+ struct device *dma_dev = iio_buffer_get_dma_dev(ib->indio_dev, buffer);
struct dma_buf_attachment *attach = NULL;
struct iio_dmabuf_priv *priv;
guard(mutex)(&buffer->dmabufs_mutex);
list_for_each_entry(priv, &buffer->dmabufs, entry) {
- if (priv->attach->dev == dev
+ if (priv->attach->dev == dma_dev
&& priv->attach->dmabuf == dmabuf) {
attach = priv->attach;
break;
{
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_buffer *buffer = ib->buffer;
+ struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
struct dma_buf_attachment *attach;
struct iio_dmabuf_priv *priv, *each;
struct dma_buf *dmabuf;
goto err_free_priv;
}
- attach = dma_buf_attach(dmabuf, indio_dev->dev.parent);
+ attach = dma_buf_attach(dmabuf, dma_dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
goto err_dmabuf_put;
* combo. If we do, refuse to attach.
*/
list_for_each_entry(each, &buffer->dmabufs, entry) {
- if (each->attach->dev == indio_dev->dev.parent
+ if (each->attach->dev == dma_dev
&& each->attach->dmabuf == dmabuf) {
/*
* We unlocked the reservation object, so going through
{
struct iio_buffer *buffer = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
+ struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
struct iio_dmabuf_priv *priv;
struct dma_buf *dmabuf;
int dmabuf_fd, ret = -EPERM;
guard(mutex)(&buffer->dmabufs_mutex);
list_for_each_entry(priv, &buffer->dmabufs, entry) {
- if (priv->attach->dev == indio_dev->dev.parent
+ if (priv->attach->dev == dma_dev
&& priv->attach->dmabuf == dmabuf) {
list_del(&priv->entry);
* @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF
* object to this buffer. Requires a valid DMABUF fd, that
* was previouly attached to this buffer.
+ * @get_dma_dev: called to get the DMA channel associated with this buffer.
* @lock_queue: called when the core needs to lock the buffer queue;
* it is used when enqueueing DMABUF objects.
* @unlock_queue: used to unlock a previously locked buffer queue
struct iio_dma_buffer_block *block,
struct dma_fence *fence, struct sg_table *sgt,
size_t size, bool cyclic);
+ struct device * (*get_dma_dev)(struct iio_buffer *buffer);
void (*lock_queue)(struct iio_buffer *buffer);
void (*unlock_queue)(struct iio_buffer *buffer);