extern void set_interrupt_vector ( unsigned int intr, void *vector );
+/** A page table */
+struct page_table {
+ /** Page address and flags */
+ uint64_t page[512];
+};
+
+/** Page flags */
+enum page_flags {
+ /** Page is present */
+ PAGE_P = 0x01,
+ /** Page is writable */
+ PAGE_RW = 0x02,
+ /** Page is accessible by user code */
+ PAGE_US = 0x04,
+ /** Page-level write-through */
+ PAGE_PWT = 0x08,
+ /** Page-level cache disable */
+ PAGE_PCD = 0x10,
+ /** Page is a large page */
+ PAGE_PS = 0x80,
+ /** Page is the last page in an allocation
+ *
+ * This bit is ignored by the hardware. We use it to track
+ * the size of allocations made by ioremap().
+ */
+ PAGE_LAST = 0x800,
+};
+
+/** The I/O space page table */
+extern struct page_table io_pages;
+
+/** I/O page size
+ *
+ * We choose to use 2MB pages for I/O space, to minimise the number of
+ * page table entries required.
+ */
+#define IO_PAGE_SIZE 0x200000UL
+
+/** I/O page base address
+ *
+ * We choose to place I/O space immediately above the identity-mapped
+ * 32-bit address space.
+ */
+#define IO_BASE ( ( void * ) 0x100000000ULL )
+
#endif /* ASSEMBLY */
#endif /* LIBRM_H */
* These point to the PDPT. This creates some aliased
* addresses within unused portions of the 64-bit address
* space, but allows us to use just a single PDPT.
+ *
+ * - PDE[...] covering arbitrary 2MB portions of I/O space
+ *
+ * These are 2MB pages created by ioremap() to cover I/O
+ * device addresses.
*/
pml4e:
.space SIZEOF_PT
.size pml4e, . - pml4e
+ .globl io_pages
+ .equ io_pages, pml4e
+
/* Page directory pointer table entries (PDPTEs)
*
* This comprises:
* These point to the appropriate page directories (in pde_low)
* used to identity-map the whole of the 32-bit address space.
*
+ * - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff]
+ *
+ * This points back to the PML4, allowing the PML4 to be
+ * (ab)used to hold 2MB pages used for I/O device addresses.
+ *
* - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff]
*
* This points back to the PDPT itself, allowing the PDPT to be
/* Initialise PDPTE for negative 1GB */
movl %eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE )
+ /* Initialise PDPTE for I/O space */
+ leal ( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
+ movl %eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) )
+
/* Initialise PDPTEs for low 4GB */
movl $PDE_LOW_PTS, %ecx
leal ( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#include <stdint.h>
+#include <strings.h>
+#include <assert.h>
#include <ipxe/profile.h>
#include <realmode.h>
#include <pic8259.h>
profile_exclude ( profiler );
}
+/**
+ * Map pages for I/O
+ *
+ * @v bus_addr Bus address
+ * @v len Length of region
+ * @ret io_addr I/O address
+ */
+static void * ioremap_pages ( unsigned long bus_addr, size_t len ) {
+ unsigned long start;
+ unsigned int count;
+ unsigned int stride;
+ unsigned int first;
+ unsigned int i;
+ size_t offset;
+ void *io_addr;
+
+ DBGC ( &io_pages, "IO mapping %08lx+%zx\n", bus_addr, len );
+
+ /* Sanity check */
+ assert ( len != 0 );
+
+ /* Round down start address to a page boundary */
+ start = ( bus_addr & ~( IO_PAGE_SIZE - 1 ) );
+ offset = ( bus_addr - start );
+ assert ( offset < IO_PAGE_SIZE );
+
+ /* Calculate number of pages required */
+ count = ( ( offset + len + IO_PAGE_SIZE - 1 ) / IO_PAGE_SIZE );
+ assert ( count != 0 );
+ assert ( count < ( sizeof ( io_pages.page ) /
+ sizeof ( io_pages.page[0] ) ) );
+
+ /* Round up number of pages to a power of two */
+ stride = ( 1 << ( fls ( count ) - 1 ) );
+ assert ( count <= stride );
+
+ /* Allocate pages */
+ for ( first = 0 ; first < ( sizeof ( io_pages.page ) /
+ sizeof ( io_pages.page[0] ) ) ;
+ first += stride ) {
+
+ /* Calculate I/O address */
+ io_addr = ( IO_BASE + ( first * IO_PAGE_SIZE ) + offset );
+
+ /* Check that page table entries are available */
+ for ( i = first ; i < ( first + count ) ; i++ ) {
+ if ( io_pages.page[i] & PAGE_P ) {
+ io_addr = NULL;
+ break;
+ }
+ }
+ if ( ! io_addr )
+ continue;
+
+ /* Create page table entries */
+ for ( i = first ; i < ( first + count ) ; i++ ) {
+ io_pages.page[i] = ( start | PAGE_P | PAGE_RW |
+ PAGE_US | PAGE_PWT | PAGE_PCD |
+ PAGE_PS );
+ start += IO_PAGE_SIZE;
+ }
+
+ /* Mark last page as being the last in this allocation */
+ io_pages.page[ i - 1 ] |= PAGE_LAST;
+
+ /* Return I/O address */
+ DBGC ( &io_pages, "IO mapped %08lx+%zx to %p using PTEs "
+ "[%d-%d]\n", bus_addr, len, io_addr, first,
+ ( first + count - 1 ) );
+ return io_addr;
+ }
+
+ DBGC ( &io_pages, "IO could not map %08lx+%zx\n", bus_addr, len );
+ return NULL;
+}
+
+/**
+ * Unmap pages for I/O
+ *
+ * @v io_addr I/O address
+ */
+static void iounmap_pages ( volatile const void *io_addr ) {
+ volatile const void *invalidate = io_addr;
+ unsigned int first;
+ unsigned int i;
+ int is_last;
+
+ DBGC ( &io_pages, "IO unmapping %p\n", io_addr );
+
+ /* Calculate first page table entry */
+ first = ( ( io_addr - IO_BASE ) / IO_PAGE_SIZE );
+
+ /* Clear page table entries */
+ for ( i = first ; ; i++ ) {
+
+ /* Sanity check */
+ assert ( io_pages.page[i] & PAGE_P );
+
+ /* Check if this is the last page in this allocation */
+ is_last = ( io_pages.page[i] & PAGE_LAST );
+
+ /* Clear page table entry */
+ io_pages.page[i] = 0;
+
+ /* Invalidate TLB for this page */
+ __asm__ __volatile__ ( "invlpg (%0)" : : "r" ( invalidate ) );
+ invalidate += IO_PAGE_SIZE;
+
+ /* Terminate if this was the last page */
+ if ( is_last )
+ break;
+ }
+
+ DBGC ( &io_pages, "IO unmapped %p using PTEs [%d-%d]\n",
+ io_addr, first, i );
+}
+
PROVIDE_UACCESS_INLINE ( librm, phys_to_user );
PROVIDE_UACCESS_INLINE ( librm, user_to_phys );
PROVIDE_UACCESS_INLINE ( librm, virt_to_user );
PROVIDE_UACCESS_INLINE ( librm, memset_user );
PROVIDE_UACCESS_INLINE ( librm, strlen_user );
PROVIDE_UACCESS_INLINE ( librm, memchr_user );
+PROVIDE_IOMAP_INLINE ( pages, io_to_bus );
+PROVIDE_IOMAP ( pages, ioremap, ioremap_pages );
+PROVIDE_IOMAP ( pages, iounmap, iounmap_pages );