From: Linus Torvalds Date: Sat, 4 Oct 2025 00:41:12 +0000 (-0700) Subject: Merge tag 'dma-mapping-6.18-2025-09-30' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a498d59c469bf1cab2710ffeb34050f475de28ce;p=thirdparty%2Fkernel%2Fstable.git Merge tag 'dma-mapping-6.18-2025-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux Pull dma-mapping updates from Marek Szyprowski: - Refactoring of DMA mapping API to physical addresses as the primary interface instead of page+offset parameters This gets much closer to Matthew Wilcox's long term wish for struct-pageless IO to cacheable DRAM and is supporting memdesc project which seeks to substantially transform how struct page works. An advantage of this approach is the possibility of introducing DMA_ATTR_MMIO, which covers existing 'dma_map_resource' flow in the common paths, what in turn lets to use recently introduced dma_iova_link() API to map PCI P2P MMIO without creating struct page Developped by Leon Romanovsky and Jason Gunthorpe - Minor clean-up by Petr Tesarik and Qianfeng Rong * tag 'dma-mapping-6.18-2025-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux: kmsan: fix missed kmsan_handle_dma() signature conversion mm/hmm: properly take MMIO path mm/hmm: migrate to physical address-based DMA mapping API dma-mapping: export new dma_*map_phys() interface xen: swiotlb: Open code map_resource callback dma-mapping: implement DMA_ATTR_MMIO for dma_(un)map_page_attrs() kmsan: convert kmsan_handle_dma to use physical addresses dma-mapping: convert dma_direct_*map_page to be phys_addr_t based iommu/dma: implement DMA_ATTR_MMIO for iommu_dma_(un)map_phys() iommu/dma: rename iommu_dma_*map_page to iommu_dma_*map_phys dma-mapping: rename trace_dma_*map_page to trace_dma_*map_phys dma-debug: refactor to use physical addresses for page mapping iommu/dma: implement DMA_ATTR_MMIO for dma_iova_link(). dma-mapping: introduce new DMA attribute to indicate MMIO memory swiotlb: Remove redundant __GFP_NOWARN dma-direct: clean up the logic in __dma_direct_alloc_pages() --- a498d59c469bf1cab2710ffeb34050f475de28ce diff --cc rust/kernel/dma.rs index 2569c21208e37,61d9eed7a786e..4e0af3e1a3b9a --- a/rust/kernel/dma.rs +++ b/rust/kernel/dma.rs @@@ -252,76 -242,11 +252,79 @@@ pub mod attrs /// Indicates that the buffer is fully accessible at an elevated privilege level (and /// ideally inaccessible or at least read-only at lesser-privileged levels). pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED); + + /// Indicates that the buffer is MMIO memory. + pub const DMA_ATTR_MMIO: Attrs = Attrs(bindings::DMA_ATTR_MMIO); } +/// DMA data direction. +/// +/// Corresponds to the C [`enum dma_data_direction`]. +/// +/// [`enum dma_data_direction`]: srctree/include/linux/dma-direction.h +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +#[repr(u32)] +pub enum DataDirection { + /// The DMA mapping is for bidirectional data transfer. + /// + /// This is used when the buffer can be both read from and written to by the device. + /// The cache for the corresponding memory region is both flushed and invalidated. + Bidirectional = Self::const_cast(bindings::dma_data_direction_DMA_BIDIRECTIONAL), + + /// The DMA mapping is for data transfer from memory to the device (write). + /// + /// The CPU has prepared data in the buffer, and the device will read it. + /// The cache for the corresponding memory region is flushed before device access. + ToDevice = Self::const_cast(bindings::dma_data_direction_DMA_TO_DEVICE), + + /// The DMA mapping is for data transfer from the device to memory (read). + /// + /// The device will write data into the buffer for the CPU to read. + /// The cache for the corresponding memory region is invalidated before CPU access. + FromDevice = Self::const_cast(bindings::dma_data_direction_DMA_FROM_DEVICE), + + /// The DMA mapping is not for data transfer. + /// + /// This is primarily for debugging purposes. With this direction, the DMA mapping API + /// will not perform any cache coherency operations. + None = Self::const_cast(bindings::dma_data_direction_DMA_NONE), +} + +impl DataDirection { + /// Casts the bindgen-generated enum type to a `u32` at compile time. + /// + /// This function will cause a compile-time error if the underlying value of the + /// C enum is out of bounds for `u32`. + const fn const_cast(val: bindings::dma_data_direction) -> u32 { + // CAST: The C standard allows compilers to choose different integer types for enums. + // To safely check the value, we cast it to a wide signed integer type (`i128`) + // which can hold any standard C integer enum type without truncation. + let wide_val = val as i128; + + // Check if the value is outside the valid range for the target type `u32`. + // CAST: `u32::MAX` is cast to `i128` to match the type of `wide_val` for the comparison. + if wide_val < 0 || wide_val > u32::MAX as i128 { + // Trigger a compile-time error in a const context. + build_error!("C enum value is out of bounds for the target type `u32`."); + } + + // CAST: This cast is valid because the check above guarantees that `wide_val` + // is within the representable range of `u32`. + wide_val as u32 + } +} + +impl From for bindings::dma_data_direction { + /// Returns the raw representation of [`enum dma_data_direction`]. + fn from(direction: DataDirection) -> Self { + // CAST: `direction as u32` gets the underlying representation of our `#[repr(u32)]` enum. + // The subsequent cast to `Self` (the bindgen type) assumes the C enum is compatible + // with the enum variants of `DataDirection`, which is a valid assumption given our + // compile-time checks. + direction as u32 as Self + } +} + /// An abstraction of the `dma_alloc_coherent` API. /// /// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map