* @v flags Mapping flags
* @ret rc Return status code
*/
-static int riscv_dma_map ( struct dma_device *dma __unused,
- struct dma_mapping *map __unused,
+static int riscv_dma_map ( struct dma_device *dma,
+ struct dma_mapping *map,
void *addr, size_t len, int flags ) {
/* Sanity check: we cannot support bidirectional mappings */
assert ( ! ( ( flags & DMA_TX ) & ( flags & DMA_RX ) ) );
+ /* Populate mapping */
+ map->dma = dma;
+ map->offset = 0;
+ map->token = NULL;
+
/* Flush cached data to transmit buffers */
if ( flags & DMA_TX )
cache_clean ( addr, len );
- /* Invalidate cached data in receive buffers */
- if ( flags & DMA_RX )
+ /* Invalidate cached data in receive buffers and record address */
+ if ( flags & DMA_RX ) {
cache_invalidate ( addr, len );
+ map->token = addr;
+ }
+
+ /* Increment mapping count (for debugging) */
+ if ( DBG_LOG )
+ dma->mapped++;
return 0;
}
+/**
+ * Unmap buffer
+ *
+ * @v map DMA mapping
+ * @v len Used length
+ */
+static void riscv_dma_unmap ( struct dma_mapping *map, size_t len ) {
+ struct dma_device *dma = map->dma;
+ void *addr = map->token;
+
+ /* Invalidate cached data in receive buffers */
+ if ( addr )
+ cache_invalidate ( addr, len );
+
+ /* Clear mapping */
+ map->dma = NULL;
+
+ /* Decrement mapping count (for debugging) */
+ if ( DBG_LOG )
+ dma->mapped--;
+}
+
/**
* Allocate and map DMA-coherent buffer
*
DBGC ( dma, "DMA allocated [%#08lx,%#08lx) via %p\n",
phys, ( phys + len ), caddr );
+ /* Increment allocation count (for debugging) */
+ if ( DBG_LOG )
+ dma->allocated++;
+
return caddr;
}
*/
static void riscv_dma_free ( struct dma_mapping *map,
void *addr, size_t len ) {
+ struct dma_device *dma = map->dma;
/* Sanity check */
assert ( virt_to_phys ( addr ) == virt_to_phys ( map->token ) );
/* Clear mapping */
map->dma = NULL;
map->token = NULL;
+
+ /* Decrement allocation count (for debugging) */
+ if ( DBG_LOG )
+ dma->allocated--;
}
PROVIDE_DMAAPI ( riscv, dma_map, riscv_dma_map );
-PROVIDE_DMAAPI_INLINE ( riscv, dma_unmap );
+PROVIDE_DMAAPI ( riscv, dma_unmap, riscv_dma_unmap );
PROVIDE_DMAAPI ( riscv, dma_alloc, riscv_dma_alloc );
PROVIDE_DMAAPI ( riscv, dma_free, riscv_dma_free );
PROVIDE_DMAAPI ( riscv, dma_umalloc, riscv_dma_alloc );
#define DMAAPI_PREFIX_riscv __riscv_
#endif
-/**
- * Unmap buffer
- *
- * @v map DMA mapping
- */
-static inline __always_inline void
-DMAAPI_INLINE ( riscv, dma_unmap ) ( struct dma_mapping *map __unused ) {
-
- /* Nothing to do */
-}
-
/**
* Set addressable space mask
*
* Unmap buffer
*
* @v map DMA mapping
+ * @v len Used length
*/
-static void dma_op_unmap ( struct dma_mapping *map ) {
+static void dma_op_unmap ( struct dma_mapping *map, size_t len ) {
struct dma_device *dma = map->dma;
assert ( dma != NULL );
assert ( dma->op != NULL );
- dma->op->unmap ( dma, map );
+ dma->op->unmap ( dma, map, len );
}
/**
pci_msix_disable ( pci, &intelxl->msix.cap );
err_enable:
- dma_unmap ( &intelxl->msix.map );
+ dma_unmap ( &intelxl->msix.map, sizeof ( intelxl->msix.msg ) );
err_map:
return rc;
}
pci_msix_disable ( pci, &intelxl->msix.cap );
/* Unmap dummy target location */
- dma_unmap ( &intelxl->msix.map );
+ dma_unmap ( &intelxl->msix.map, sizeof ( intelxl->msix.msg ) );
}
/******************************************************************************
*
* @v dma DMA device
* @v map DMA mapping
+ * @v len Used length
*/
- void ( * unmap ) ( struct dma_device *dma, struct dma_mapping *map );
+ void ( * unmap ) ( struct dma_device *dma, struct dma_mapping *map,
+ size_t len );
/**
* Allocate and map DMA-coherent buffer
*
* Unmap buffer
*
* @v map DMA mapping
+ * @v len Used length
*/
static inline __always_inline void
-DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_mapping *map ) {
+DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_mapping *map,
+ size_t len __unused ) {
/* Decrement mapping count (for debugging) */
if ( DBG_LOG ) {
* Unmap buffer
*
* @v map DMA mapping
+ * @v len Used length
*/
-void dma_unmap ( struct dma_mapping *map );
+void dma_unmap ( struct dma_mapping *map, size_t len );
/**
* Allocate and map DMA-coherent buffer
* @ret rc Return status code
*/
static inline __always_inline void iob_unmap ( struct io_buffer *iobuf ) {
- dma_unmap ( &iobuf->map );
+ dma_unmap ( &iobuf->map, iob_len ( iobuf ) );
}
extern struct io_buffer * __malloc alloc_iob_raw ( size_t len, size_t align,
*
* @v dma DMA device
* @v map DMA mapping
+ * @v len Used length
*/
static void efipci_dma_unmap ( struct dma_device *dma,
- struct dma_mapping *map ) {
+ struct dma_mapping *map, size_t len __unused ) {
struct efi_pci_device *efipci =
container_of ( dma, struct efi_pci_device, pci.dma );
EFI_PCI_IO_PROTOCOL *pci_io = efipci->io;
return addr;
- efipci_dma_unmap ( dma, map );
+ efipci_dma_unmap ( dma, map, len );
err_map:
pci_io->FreeBuffer ( pci_io, pages, addr );
err_alloc:
pages = ( ( len + EFI_PAGE_SIZE - 1 ) / EFI_PAGE_SIZE );
/* Unmap buffer */
- efipci_dma_unmap ( dma, map );
+ efipci_dma_unmap ( dma, map, len );
/* Free buffer */
pci_io->FreeBuffer ( pci_io, pages, addr );