From: Michael Brown Date: Wed, 5 Nov 2025 17:29:39 +0000 (+0000) Subject: [ioapi] Allow iounmap() to be called for port I/O addresses X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=bd3982b63064590497d39d63e96d0a3f63149b73;p=thirdparty%2Fipxe.git [ioapi] Allow iounmap() to be called for port I/O addresses Allow code using the combined MMIO and port I/O accessors to safely call iounmap() to unmap the MMIO or port I/O region. In the virtual offset I/O mapping API as used for UEFI, 32-bit BIOS, and 32-bit RISC-V SBI, iounmap() is a no-op anyway. In 64-bit RISC-V SBI, we have no concept of port I/O and so the issue is moot. This leaves only 64-bit BIOS, where it suffices to simply do nothing for any pages outside of the chosen MMIO virtual address range. For symmetry, we implement the equivalent change in the very closely related RISC-V page management code. Signed-off-by: Michael Brown --- diff --git a/src/arch/riscv/core/svpage.c b/src/arch/riscv/core/svpage.c index c0fa8b445..e25e3920a 100644 --- a/src/arch/riscv/core/svpage.c +++ b/src/arch/riscv/core/svpage.c @@ -83,6 +83,10 @@ enum pte_flags { /** The page table */ extern struct page_table page_table; +/** Maximum number of I/O pages */ +#define MAP_PAGE_COUNT \ + ( sizeof ( page_table.pte ) / sizeof ( page_table.pte[0] ) ) + /** I/O page size * * We choose to use 1GB "gigapages", since these are supported by all @@ -146,17 +150,14 @@ static void * svpage_map ( physaddr_t phys, size_t len, unsigned long attrs ) { /* Calculate number of pages required */ count = ( ( offset + len + MAP_PAGE_SIZE - 1 ) / MAP_PAGE_SIZE ); assert ( count != 0 ); - assert ( count < ( sizeof ( page_table.pte ) / - sizeof ( page_table.pte[0] ) ) ); + assert ( count <= MAP_PAGE_COUNT ); /* Round up number of pages to a power of two */ stride = ( 1 << fls ( count - 1 ) ); assert ( count <= stride ); /* Allocate pages */ - for ( first = 0 ; first < ( sizeof ( page_table.pte ) / - sizeof ( page_table.pte[0] ) ) ; - first += stride ) { + for ( first = 0 ; first < MAP_PAGE_COUNT ; first += stride ) { /* Calculate virtual address */ virt = ( MAP_BASE + ( first * MAP_PAGE_SIZE ) + offset ); @@ -216,6 +217,10 @@ static void svpage_unmap ( const volatile void *virt ) { /* Calculate first page table entry */ first = ( ( virt - MAP_BASE ) / MAP_PAGE_SIZE ); + /* Ignore unmappings outside of the I/O range */ + if ( first >= MAP_PAGE_COUNT ) + return; + /* Clear page table entries */ for ( i = first ; ; i++ ) { diff --git a/src/arch/x86/include/librm.h b/src/arch/x86/include/librm.h index 55feeffc9..666be0438 100644 --- a/src/arch/x86/include/librm.h +++ b/src/arch/x86/include/librm.h @@ -346,6 +346,10 @@ enum page_flags { /** The I/O space page table */ extern struct page_table io_pages; +/** Maximum number of I/O pages */ +#define IO_PAGE_COUNT \ + ( sizeof ( io_pages.page ) / sizeof ( io_pages.page[0] ) ) + /** I/O page size * * We choose to use 2MB pages for I/O space, to minimise the number of diff --git a/src/arch/x86/transitions/librm_mgmt.c b/src/arch/x86/transitions/librm_mgmt.c index e51f0e649..89feec96a 100644 --- a/src/arch/x86/transitions/librm_mgmt.c +++ b/src/arch/x86/transitions/librm_mgmt.c @@ -303,17 +303,14 @@ static void * ioremap_pages ( unsigned long bus_addr, size_t len ) { /* Calculate number of pages required */ count = ( ( offset + len + IO_PAGE_SIZE - 1 ) / IO_PAGE_SIZE ); assert ( count != 0 ); - assert ( count < ( sizeof ( io_pages.page ) / - sizeof ( io_pages.page[0] ) ) ); + assert ( count <= IO_PAGE_COUNT ); /* Round up number of pages to a power of two */ stride = ( 1 << fls ( count - 1 ) ); assert ( count <= stride ); /* Allocate pages */ - for ( first = 0 ; first < ( sizeof ( io_pages.page ) / - sizeof ( io_pages.page[0] ) ) ; - first += stride ) { + for ( first = 0 ; first < IO_PAGE_COUNT ; first += stride ) { /* Calculate I/O address */ io_addr = ( IO_BASE + ( first * IO_PAGE_SIZE ) + offset ); @@ -366,6 +363,10 @@ static void iounmap_pages ( volatile const void *io_addr ) { /* Calculate first page table entry */ first = ( ( io_addr - IO_BASE ) / IO_PAGE_SIZE ); + /* Ignore unmappings outside of the I/O range */ + if ( first >= IO_PAGE_COUNT ) + return; + /* Clear page table entries */ for ( i = first ; ; i++ ) {