DBGC ( colour, "RUNTIME found no command line\n" );
return 0;
}
- cmdline_user = phys_to_user ( cmdline_phys );
+ cmdline_user = phys_to_virt ( cmdline_phys );
len = ( strlen ( cmdline_user ) + 1 /* NUL */ );
/* Allocate and copy command line */
initrd_phys, ( initrd_phys + initrd_len ) );
/* Create initrd image */
- image = image_memory ( "<INITRD>", phys_to_user ( initrd_phys ),
+ image = image_memory ( "<INITRD>", phys_to_virt ( initrd_phys ),
initrd_len );
if ( ! image ) {
DBGC ( colour, "RUNTIME could not create initrd image\n" );
* @ret len Length of setting data, or negative error
*/
static int vram_fetch ( void *data, size_t len ) {
- userptr_t vram = phys_to_user ( VRAM_BASE );
+ userptr_t vram = phys_to_virt ( VRAM_BASE );
/* Copy video RAM */
if ( len > VRAM_LEN )
bzimg->rm_memsz += BZI_CMDLINE_SIZE;
/* Calculate load address of protected-mode portion */
- bzimg->pm_kernel = phys_to_user ( is_bzimage ? BZI_LOAD_HIGH_ADDR
+ bzimg->pm_kernel = phys_to_virt ( is_bzimage ? BZI_LOAD_HIGH_ADDR
: BZI_LOAD_LOW_ADDR );
/* Extract video mode */
DBGC ( image, "bzImage %p version %04x RM %#lx+%#zx PM %#lx+%#zx "
"cmdlen %zd\n", image, bzimg->version,
- user_to_phys ( bzimg->rm_kernel, 0 ), bzimg->rm_filesz,
- user_to_phys ( bzimg->pm_kernel, 0 ), bzimg->pm_sz,
+ virt_to_phys ( bzimg->rm_kernel ), bzimg->rm_filesz,
+ virt_to_phys ( bzimg->pm_kernel ), bzimg->pm_sz,
bzimg->cmdline_size );
return 0;
/* Set command line */
if ( bzimg->version >= 0x0202 ) {
- bzimg->bzhdr.cmd_line_ptr = user_to_phys ( bzimg->rm_kernel,
- bzimg->rm_cmdline );
+ bzimg->bzhdr.cmd_line_ptr = ( virt_to_phys ( bzimg->rm_kernel )
+ + bzimg->rm_cmdline );
} else {
bzimg->cmdline_magic.magic = BZI_CMDLINE_MAGIC;
bzimg->cmdline_magic.offset = bzimg->rm_cmdline;
}
assert ( offset == len );
DBGC ( image, "bzImage %p initrd %p [%#08lx,%#08lx,%#08lx)"
- "%s%s\n", image, initrd, user_to_phys ( address, 0 ),
- user_to_phys ( address, offset ),
- user_to_phys ( address, ( offset + initrd->len ) ),
+ "%s%s\n", image, initrd, virt_to_phys ( address ),
+ ( virt_to_phys ( address ) + offset ),
+ ( virt_to_phys ( address ) + offset + initrd->len ),
( filename ? " " : "" ), ( filename ? filename : "" ) );
- DBGC2_MD5A ( image, user_to_phys ( address, offset ),
+ DBGC2_MD5A ( image, ( virt_to_phys ( address ) + offset ),
( address + offset ), initrd->len );
}
len += initrd->len;
len = bzimage_align ( len );
DBGC ( image, "bzImage %p initrd %p from [%#08lx,%#08lx)%s%s\n",
- image, initrd, user_to_phys ( initrd->data, 0 ),
- user_to_phys ( initrd->data, initrd->len ),
+ image, initrd, virt_to_phys ( initrd->data ),
+ ( virt_to_phys ( initrd->data ) + initrd->len ),
( initrd->cmdline ? " " : "" ),
( initrd->cmdline ? initrd->cmdline : "" ) );
- DBGC2_MD5A ( image, user_to_phys ( initrd->data, 0 ),
+ DBGC2_MD5A ( image, virt_to_phys ( initrd->data ),
initrd->data, initrd->len );
}
}
/* Check that total length fits within kernel's memory limit */
- if ( user_to_phys ( bottom, len ) > bzimg->mem_limit ) {
+ if ( ( virt_to_phys ( bottom ) + len ) > bzimg->mem_limit ) {
DBGC ( image, "bzImage %p not enough space for initrds\n",
image );
return -ENOBUFS;
/* Find highest usable address */
top = ( highest->data + bzimage_align ( highest->len ) );
- if ( user_to_phys ( top, -1 ) > bzimg->mem_limit ) {
- top = phys_to_user ( ( bzimg->mem_limit + 1 ) &
+ if ( ( virt_to_phys ( top ) - 1UL ) > bzimg->mem_limit ) {
+ top = phys_to_virt ( ( bzimg->mem_limit + 1 ) &
~( INITRD_ALIGN - 1 ) );
}
DBGC ( image, "bzImage %p loading initrds from %#08lx downwards\n",
- image, user_to_phys ( top, -1 ) );
+ image, ( virt_to_phys ( top ) - 1UL ) );
/* Load initrds in order */
for_each_image ( initrd ) {
/* Record initrd location */
if ( ! bzimg->ramdisk_image )
- bzimg->ramdisk_image = user_to_phys ( dest, 0 );
- bzimg->ramdisk_size = ( user_to_phys ( dest, len ) -
+ bzimg->ramdisk_image = virt_to_phys ( dest );
+ bzimg->ramdisk_size = ( virt_to_phys ( dest ) + len -
bzimg->ramdisk_image );
}
DBGC ( image, "bzImage %p initrds at [%#08lx,%#08lx)\n",
filesz = image->len;
memsz = filesz;
- buffer = phys_to_user ( COM32_START_PHYS );
+ buffer = phys_to_virt ( COM32_START_PHYS );
if ( ( rc = prep_segment ( buffer, filesz, memsz ) ) != 0 ) {
DBGC ( image, "COM32 %p: could not prepare segment: %s\n",
image, strerror ( rc ) );
current -= len;
DBGC ( &images, "INITRD squashing %s [%#08lx,%#08lx)->"
"[%#08lx,%#08lx)\n", highest->name,
- user_to_phys ( highest->data, 0 ),
- user_to_phys ( highest->data, highest->len ),
- user_to_phys ( current, 0 ),
- user_to_phys ( current, highest->len ) );
+ virt_to_phys ( highest->data ),
+ ( virt_to_phys ( highest->data ) + highest->len ),
+ virt_to_phys ( current ),
+ ( virt_to_phys ( current ) + highest->len ) );
memmove ( current, highest->data, highest->len );
highest->data = current;
}
current -= len;
DBGC ( &images, "INITRD copying %s [%#08lx,%#08lx)->"
"[%#08lx,%#08lx)\n", initrd->name,
- user_to_phys ( initrd->data, 0 ),
- user_to_phys ( initrd->data, initrd->len ),
- user_to_phys ( current, 0 ),
- user_to_phys ( current, initrd->len ) );
+ virt_to_phys ( initrd->data ),
+ ( virt_to_phys ( initrd->data ) + initrd->len ),
+ virt_to_phys ( current ),
+ ( virt_to_phys ( current ) + initrd->len ) );
memcpy ( current, initrd->data, initrd->len );
initrd->data = current;
}
size_t new_len;
DBGC ( &images, "INITRD swapping %s [%#08lx,%#08lx)<->[%#08lx,%#08lx) "
- "%s\n", low->name, user_to_phys ( low->data, 0 ),
- user_to_phys ( low->data, low->len ),
- user_to_phys ( high->data, 0 ),
- user_to_phys ( high->data, high->len ), high->name );
+ "%s\n", low->name, virt_to_phys ( low->data ),
+ ( virt_to_phys ( low->data ) + low->len ),
+ virt_to_phys ( high->data ),
+ ( virt_to_phys ( high->data ) + high->len ), high->name );
/* Round down length of free space */
free_len &= ~( INITRD_ALIGN - 1 );
/* Dump initrd locations */
for_each_image ( initrd ) {
DBGC ( &images, "INITRD %s at [%#08lx,%#08lx)\n",
- initrd->name, user_to_phys ( initrd->data, 0 ),
- user_to_phys ( initrd->data, initrd->len ) );
- DBGC2_MD5A ( &images, user_to_phys ( initrd->data, 0 ),
+ initrd->name, virt_to_phys ( initrd->data ),
+ ( virt_to_phys ( initrd->data ) + initrd->len ) );
+ DBGC2_MD5A ( &images, virt_to_phys ( initrd->data ),
initrd->data, initrd->len );
}
}
/* Debug */
DBGC ( &images, "INITRD region [%#08lx,%#08lx)\n",
- user_to_phys ( bottom, 0 ), user_to_phys ( top, 0 ) );
+ virt_to_phys ( bottom ), virt_to_phys ( top ) );
initrd_dump();
/* Squash initrds as high as possible in memory */
start = ( ( start + 0xfff ) & ~0xfff );
/* Prepare segment */
- if ( ( rc = prep_segment ( phys_to_user ( start ),
+ if ( ( rc = prep_segment ( phys_to_virt ( start ),
module_image->len,
module_image->len ) ) != 0 ) {
DBGC ( image, "MULTIBOOT %p could not prepare module "
}
/* Copy module */
- memcpy ( phys_to_user ( start ), module_image->data,
+ memcpy ( phys_to_virt ( start ), module_image->data,
module_image->len );
/* Add module to list */
( image->len - offset ) );
memsz = ( hdr->mb.bss_end_addr ?
( hdr->mb.bss_end_addr - hdr->mb.load_addr ) : filesz );
- buffer = phys_to_user ( hdr->mb.load_addr );
+ buffer = phys_to_virt ( hdr->mb.load_addr );
if ( ( rc = prep_segment ( buffer, filesz, memsz ) ) != 0 ) {
DBGC ( image, "MULTIBOOT %p could not prepare segment: %s\n",
image, strerror ( rc ) );
/* Calculate segment load address */
switch ( NBI_LOADADDR_FLAGS ( sh.flags ) ) {
case NBI_LOADADDR_ABS:
- dest = phys_to_user ( sh.loadaddr );
+ dest = phys_to_virt ( sh.loadaddr );
break;
case NBI_LOADADDR_AFTER:
dest = ( dest + memsz + sh.loadaddr );
* maintains backwards compatibility with
* previous versions of Etherboot.
*/
- dest = phys_to_user ( ( extmemsize() + 1024 ) * 1024
+ dest = phys_to_virt ( ( extmemsize() + 1024 ) * 1024
- sh.loadaddr );
break;
default:
return -ENOTTY;
}
DBGC ( image, "SDI %p image at %08lx+%08zx\n",
- image, user_to_phys ( image->data, 0 ), image->len );
- DBGC ( image, "SDI %p boot code at %08lx+%llx\n", image,
- user_to_phys ( image->data, sdi.boot_offset ), sdi.boot_size );
+ image, virt_to_phys ( image->data ), image->len );
+ DBGC ( image, "SDI %p boot code at %08llx+%llx\n", image,
+ ( virt_to_phys ( image->data ) + sdi.boot_offset ),
+ sdi.boot_size );
/* Copy boot code */
memcpy ( real_to_user ( SDI_BOOT_SEG, SDI_BOOT_OFF ),
( image->data + sdi.boot_offset ), sdi.boot_size );
/* Jump to boot code */
- sdiptr = ( user_to_phys ( image->data, 0 ) | SDI_WTF );
+ sdiptr = ( virt_to_phys ( image->data ) | SDI_WTF );
__asm__ __volatile__ ( REAL_CODE ( "ljmp %0, %1\n\t" )
: : "i" ( SDI_BOOT_SEG ),
"i" ( SDI_BOOT_OFF ),
assert ( id <= control->apic_max );
/* Read status report */
- copy_from_user ( &status, phys_to_user ( control->status ),
+ copy_from_user ( &status, phys_to_virt ( control->status ),
( id * sizeof ( status ) ), sizeof ( status ) );
/* Ignore empty optional status reports */
/* Construct control structure */
memset ( &control, 0, sizeof ( control ) );
control.desc = virt_to_phys ( update->desc );
- control.status = user_to_phys ( status, 0 );
+ control.status = virt_to_phys ( status );
vendor = update->vendor;
if ( vendor ) {
control.ver_clear = vendor->ver_clear;
/* Populate descriptor */
desc.signature = hdr.signature;
desc.version = hdr.version;
- desc.address = user_to_phys ( image->data,
- ( start + sizeof ( hdr ) ) );
+ desc.address = ( virt_to_phys ( image->data ) +
+ start + sizeof ( hdr ) );
/* Add non-extended descriptor, if applicable */
ucode_describe ( image, start, &ucode_intel, &desc, hdr.platforms,
copy_from_user ( &patch, image->data, ( start + offset ),
sizeof ( patch ) );
desc.version = patch.version;
- desc.address = user_to_phys ( image->data, ( start + offset ) );
+ desc.address = ( virt_to_phys ( image->data ) +
+ start + offset );
offset += phdr.len;
/* Parse equivalence table to find matching signatures */
/**
* Convert physical address to user pointer
*
- * @v phys_addr Physical address
- * @ret userptr User pointer
+ * @v phys Physical address
+ * @ret virt Virtual address
*/
-static inline __always_inline userptr_t
-UACCESS_INLINE ( librm, phys_to_user ) ( unsigned long phys_addr ) {
+static inline __always_inline void *
+UACCESS_INLINE ( librm, phys_to_virt ) ( unsigned long phys ) {
/* In a 64-bit build, any valid physical address is directly
* usable as a virtual address, since the low 4GB is
* identity-mapped.
*/
if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) )
- return ( ( userptr_t ) phys_addr );
+ return ( ( void * ) phys );
/* In a 32-bit build, subtract virt_offset */
- return ( ( userptr_t ) ( phys_addr - virt_offset ) );
+ return ( ( void * ) ( phys - virt_offset ) );
}
/**
- * Convert user buffer to physical address
+ * Convert virtual address to physical address
*
- * @v userptr User pointer
- * @v offset Offset from user pointer
- * @ret phys_addr Physical address
+ * @v virt Virtual address
+ * @ret phys Physical address
*/
-static inline __always_inline unsigned long
-UACCESS_INLINE ( librm, user_to_phys ) ( userptr_t userptr, off_t offset ) {
- unsigned long addr = ( ( unsigned long ) ( userptr + offset ) );
+static inline __always_inline physaddr_t
+UACCESS_INLINE ( librm, virt_to_phys ) ( volatile const void *virt ) {
+ physaddr_t addr = ( ( physaddr_t ) virt );
/* In a 64-bit build, any virtual address in the low 4GB is
* directly usable as a physical address, since the low 4GB is
*/
static inline __always_inline userptr_t
real_to_user ( unsigned int segment, unsigned int offset ) {
- return ( phys_to_user ( ( segment << 4 ) + offset ) );
+ return ( phys_to_virt ( ( segment << 4 ) + offset ) );
}
/**
/* Record cached DHCPACK */
if ( ( rc = cachedhcp_record ( &cached_dhcpack, 0,
- phys_to_user ( cached_dhcpack_phys ),
+ phys_to_virt ( cached_dhcpack_phys ),
sizeof ( BOOTPLAYER_t ) ) ) != 0 ) {
DBGC ( colour, "CACHEDHCP could not record DHCPACK: %s\n",
strerror ( rc ) );
return rc;
/* Fill in entry point descriptor structure */
- smbios->address = phys_to_user ( entry.smbios_address );
+ smbios->address = phys_to_virt ( entry.smbios_address );
smbios->len = entry.smbios_len;
smbios->count = entry.smbios_count;
smbios->version = SMBIOS_VERSION ( entry.major, entry.minor );
}
/* Fill in entry point descriptor structure */
- smbios->address = phys_to_user ( entry.smbios_address );
+ smbios->address = phys_to_virt ( entry.smbios_address );
smbios->len = entry.smbios_len;
smbios->count = 0;
smbios->version = SMBIOS_VERSION ( entry.major, entry.minor );
if ( ( addr.count == 0xff ) ||
( ( addr.buffer.segment == 0xffff ) &&
( addr.buffer.offset == 0xffff ) ) ) {
- buffer = phys_to_user ( addr.buffer_phys );
+ buffer = phys_to_virt ( addr.buffer_phys );
DBGC2 ( sandev->drive, "%08llx",
( ( unsigned long long ) addr.buffer_phys ) );
} else {
/* Read from boot catalog */
if ( ( rc = sandev_read ( sandev, start, command.count,
- phys_to_user ( command.buffer ) ) ) != 0 ) {
+ phys_to_virt ( command.buffer ) ) ) != 0 ) {
DBGC ( sandev->drive, "INT13 drive %02x could not read boot "
"catalog: %s\n", sandev->drive, strerror ( rc ) );
return -INT13_STATUS_READ_ERROR;
"catalog (status %04x)\n", drive, status );
return -EIO;
}
- copy_from_user ( &catalog, phys_to_user ( eltorito_cmd.buffer ), 0,
+ copy_from_user ( &catalog, phys_to_virt ( eltorito_cmd.buffer ), 0,
sizeof ( catalog ) );
/* Sanity checks */
/* Use largest block */
if ( region_len > len ) {
DBG ( "...new best block found\n" );
- *start = phys_to_user ( region_start );
+ *start = phys_to_virt ( region_start );
len = region_len;
}
}
heap_size = largest_memblock ( &base );
bottom = top = ( base + heap_size );
DBG ( "External heap grows downwards from %lx (size %zx)\n",
- user_to_phys ( top, 0 ), heap_size );
+ virt_to_phys ( top ), heap_size );
}
/**
sizeof ( extmem ) );
if ( extmem.used )
break;
- DBG ( "EXTMEM freeing [%lx,%lx)\n", user_to_phys ( bottom, 0 ),
- user_to_phys ( bottom, extmem.size ) );
+ DBG ( "EXTMEM freeing [%lx,%lx)\n", virt_to_phys ( bottom ),
+ ( virt_to_phys ( bottom ) + extmem.size ) );
len = ( extmem.size + sizeof ( extmem ) );
bottom += len;
heap_size += len;
ptr = bottom = ( bottom - sizeof ( extmem ) );
heap_size -= sizeof ( extmem );
DBG ( "EXTMEM allocating [%lx,%lx)\n",
- user_to_phys ( ptr, 0 ), user_to_phys ( ptr, 0 ) );
+ virt_to_phys ( ptr ), virt_to_phys ( ptr ) );
extmem.size = 0;
}
extmem.used = ( new_size > 0 );
if ( ptr == bottom ) {
/* Update block */
new = ( ptr - ( new_size - extmem.size ) );
- align = ( user_to_phys ( new, 0 ) & ( EM_ALIGN - 1 ) );
+ align = ( virt_to_phys ( new ) & ( EM_ALIGN - 1 ) );
new_size += align;
new -= align;
if ( new_size > ( heap_size + extmem.size ) ) {
return UNULL;
}
DBG ( "EXTMEM expanding [%lx,%lx) to [%lx,%lx)\n",
- user_to_phys ( ptr, 0 ),
- user_to_phys ( ptr, extmem.size ),
- user_to_phys ( new, 0 ),
- user_to_phys ( new, new_size ));
+ virt_to_phys ( ptr ),
+ ( virt_to_phys ( ptr ) + extmem.size ),
+ virt_to_phys ( new ),
+ ( virt_to_phys ( new ) + new_size ) );
memmove ( new, ptr, ( ( extmem.size < new_size ) ?
extmem.size : new_size ) );
bottom = new;
if ( new_size > extmem.size ) {
/* Refuse to expand */
DBG ( "EXTMEM cannot expand [%lx,%lx)\n",
- user_to_phys ( ptr, 0 ),
- user_to_phys ( ptr, extmem.size ) );
+ virt_to_phys ( ptr ),
+ ( virt_to_phys ( ptr ) + extmem.size ) );
return UNULL;
}
}
/* Collect any free blocks and update hidden memory region */
ecollect_free();
- hide_umalloc ( user_to_phys ( bottom, ( ( bottom == top ) ?
- 0 : -sizeof ( extmem ) ) ),
- user_to_phys ( top, 0 ) );
+ hide_umalloc ( ( virt_to_phys ( bottom ) -
+ ( ( bottom == top ) ? 0 : sizeof ( extmem ) ) ),
+ virt_to_phys ( top ) );
return ( new_size ? new : UNOWHERE );
}
continue;
/* Extract RSDT */
- rsdt = phys_to_user ( le32_to_cpu ( rsdp.rsdt ) );
+ rsdt = phys_to_virt ( le32_to_cpu ( rsdp.rsdt ) );
DBGC ( rsdt, "RSDT %#08lx found via RSDP %#08lx\n",
- user_to_phys ( rsdt, 0 ),
- user_to_phys ( start, offset ) );
+ virt_to_phys ( rsdt ),
+ ( virt_to_phys ( start ) + offset ) );
return rsdt;
}
}
/* Search fixed BIOS area */
- rsdt = rsdp_find_rsdt_range ( phys_to_user ( RSDP_BIOS_START ),
+ rsdt = rsdp_find_rsdt_range ( phys_to_virt ( RSDP_BIOS_START ),
RSDP_BIOS_LEN );
if ( rsdt )
return rsdt;
vesafb_font();
/* Initialise frame buffer console */
- if ( ( rc = fbcon_init ( &vesafb.fbcon, phys_to_user ( vesafb.start ),
+ if ( ( rc = fbcon_init ( &vesafb.fbcon, phys_to_virt ( vesafb.start ),
&vesafb.pixel, &vesafb.map, &vesafb.font,
config ) ) != 0 )
goto err_fbcon_init;
}
/* Read entire file */
- pxe_tftp.buffer = phys_to_user ( tftp_read_file->Buffer );
+ pxe_tftp.buffer = phys_to_virt ( tftp_read_file->Buffer );
pxe_tftp.size = tftp_read_file->BufferSize;
while ( ( rc = pxe_tftp.rc ) == -EINPROGRESS )
step();
DBGC ( &com32_regs, "COM32 INT%x in %#08lx out %#08lx\n",
interrupt, inregs_phys, outregs_phys );
- memcpy ( virt_to_user( &com32_regs ), phys_to_user ( inregs_phys ),
+ memcpy ( virt_to_user( &com32_regs ), phys_to_virt ( inregs_phys ),
sizeof ( com32sys_t ) );
com32_int_vector = interrupt;
: : );
if ( outregs_phys ) {
- memcpy ( phys_to_user ( outregs_phys ),
+ memcpy ( phys_to_virt ( outregs_phys ),
virt_to_user ( &com32_regs ), sizeof ( com32sys_t ) );
}
}
DBGC ( &com32_regs, "COM32 farcall %04x:%04x in %#08lx out %#08lx\n",
( proc >> 16 ), ( proc & 0xffff ), inregs_phys, outregs_phys );
- memcpy ( virt_to_user( &com32_regs ), phys_to_user ( inregs_phys ),
+ memcpy ( virt_to_user( &com32_regs ), phys_to_virt ( inregs_phys ),
sizeof ( com32sys_t ) );
com32_farcall_proc = proc;
: : );
if ( outregs_phys ) {
- memcpy ( phys_to_user ( outregs_phys ),
+ memcpy ( phys_to_virt ( outregs_phys ),
virt_to_user ( &com32_regs ), sizeof ( com32sys_t ) );
}
}
DBGC ( &com32_regs, "COM32 cfarcall %04x:%04x params %#08lx+%#zx\n",
( proc >> 16 ), ( proc & 0xffff ), stack, stacksz );
- copy_user_to_rm_stack ( phys_to_user ( stack ), stacksz );
+ copy_user_to_rm_stack ( phys_to_virt ( stack ), stacksz );
com32_farcall_proc = proc;
__asm__ __volatile__ (
/* Do the copies */
for ( i = 0; i < count; i++ ) {
- userptr_t src_u = phys_to_user ( shuf[ i ].src );
- userptr_t dest_u = phys_to_user ( shuf[ i ].dest );
+ userptr_t src_u = phys_to_virt ( shuf[ i ].src );
+ userptr_t dest_u = phys_to_virt ( shuf[ i ].dest );
if ( shuf[ i ].src == 0xFFFFFFFF ) {
/* Fill with 0 instead of copying */
copy_to_real ( ( vector << 8 ), 0, sipi, ( ( size_t ) sipi_len ) );
}
-PROVIDE_UACCESS_INLINE ( librm, phys_to_user );
-PROVIDE_UACCESS_INLINE ( librm, user_to_phys );
+PROVIDE_UACCESS_INLINE ( librm, phys_to_virt );
+PROVIDE_UACCESS_INLINE ( librm, virt_to_phys );
PROVIDE_UACCESS_INLINE ( librm, virt_to_user );
PROVIDE_UACCESS_INLINE ( librm, memchr_user );
PROVIDE_IOMAP_INLINE ( pages, io_to_bus );
copy_from_user ( &acpi, rsdt, 0, sizeof ( acpi ) );
if ( acpi.signature != cpu_to_le32 ( RSDT_SIGNATURE ) ) {
DBGC ( colour, "RSDT %#08lx has invalid signature:\n",
- user_to_phys ( rsdt, 0 ) );
- DBGC_HDA ( colour, user_to_phys ( rsdt, 0 ), &acpi,
+ virt_to_phys ( rsdt ) );
+ DBGC_HDA ( colour, virt_to_phys ( rsdt ), &acpi,
sizeof ( acpi ) );
return UNULL;
}
len = le32_to_cpu ( acpi.length );
if ( len < sizeof ( rsdtab->acpi ) ) {
DBGC ( colour, "RSDT %#08lx has invalid length:\n",
- user_to_phys ( rsdt, 0 ) );
- DBGC_HDA ( colour, user_to_phys ( rsdt, 0 ), &acpi,
+ virt_to_phys ( rsdt ) );
+ DBGC_HDA ( colour, virt_to_phys ( rsdt ), &acpi,
sizeof ( acpi ) );
return UNULL;
}
sizeof ( entry ) );
/* Read table header */
- table = phys_to_user ( entry );
+ table = phys_to_virt ( entry );
copy_from_user ( &acpi.signature, table, 0,
sizeof ( acpi.signature ) );
/* Check table integrity */
if ( acpi_checksum ( table ) != 0 ) {
DBGC ( colour, "RSDT %#08lx found %s with bad "
- "checksum at %08lx\n", user_to_phys ( rsdt, 0 ),
+ "checksum at %08lx\n", virt_to_phys ( rsdt ),
acpi_name ( signature ),
- user_to_phys ( table, 0 ) );
+ virt_to_phys ( table ) );
break;
}
DBGC ( colour, "RSDT %#08lx found %s at %08lx\n",
- user_to_phys ( rsdt, 0 ), acpi_name ( signature ),
- user_to_phys ( table, 0 ) );
+ virt_to_phys ( rsdt ), acpi_name ( signature ),
+ virt_to_phys ( table ) );
return table;
}
DBGC ( colour, "RSDT %#08lx could not find %s\n",
- user_to_phys ( rsdt, 0 ), acpi_name ( signature ) );
+ virt_to_phys ( rsdt ), acpi_name ( signature ) );
return UNULL;
}
if ( buf != cpu_to_le32 ( signature ) )
continue;
DBGC ( zsdt, "DSDT/SSDT %#08lx found %s at offset %#zx\n",
- user_to_phys ( zsdt, 0 ), acpi_name ( signature ),
+ virt_to_phys ( zsdt ), acpi_name ( signature ),
offset );
/* Attempt to extract data */
fadt = acpi_table ( FADT_SIGNATURE, 0 );
if ( fadt ) {
copy_from_user ( &fadtab, fadt, 0, sizeof ( fadtab ) );
- dsdt = phys_to_user ( fadtab.dsdt );
+ dsdt = phys_to_virt ( fadtab.dsdt );
if ( ( rc = acpi_zsdt ( dsdt, signature, data,
extract ) ) == 0 )
return 0;
DBGC2 ( blktrans, "BLKTRANS %p created", blktrans );
if ( buffer ) {
DBGC2 ( blktrans, " for %#lx+%#zx",
- user_to_phys ( buffer, 0 ), size );
+ virt_to_phys ( buffer ), size );
}
DBGC2 ( blktrans, "\n" );
return 0;
/* Store as cached packet */
DBGC ( colour, "CACHEDHCP %s at %#08lx+%#zx/%#zx\n", cache->name,
- user_to_phys ( data, 0 ), len, max_len );
+ virt_to_phys ( data ), len, max_len );
cache->dhcppkt = dhcppkt;
cache->vlan = vlan;
/* Derive overall length */
fbcon->len = ( pixel->height * pixel->stride );
DBGC ( fbcon, "FBCON %p at [%08lx,%08lx)\n", fbcon,
- user_to_phys ( fbcon->start, 0 ),
- user_to_phys ( fbcon->start, fbcon->len ) );
+ virt_to_phys ( fbcon->start ),
+ ( virt_to_phys ( fbcon->start ) + fbcon->len ) );
/* Calculate margin. If the actual screen size is larger than
* the requested screen size, then update the margins so that
image->flags |= IMAGE_REGISTERED;
list_add_tail ( &image->list, &images );
DBGC ( image, "IMAGE %s at [%lx,%lx) registered\n",
- image->name, user_to_phys ( image->data, 0 ),
- user_to_phys ( image->data, image->len ) );
+ image->name, virt_to_phys ( image->data ),
+ ( virt_to_phys ( image->data ) + image->len ) );
/* Try to detect image type, if applicable. Ignore failures,
* since we expect to handle some unrecognised images
*/
/* Flat address space user access API */
-PROVIDE_UACCESS_INLINE ( flat, phys_to_user );
-PROVIDE_UACCESS_INLINE ( flat, user_to_phys );
+PROVIDE_UACCESS_INLINE ( flat, phys_to_virt );
+PROVIDE_UACCESS_INLINE ( flat, virt_to_phys );
PROVIDE_UACCESS_INLINE ( flat, virt_to_user );
PROVIDE_UACCESS_INLINE ( flat, memchr_user );
cmd->data_buffer_formats |= SRP_CMD_DO_FMT_DIRECT;
data_out = iob_put ( iobuf, sizeof ( *data_out ) );
data_out->address =
- cpu_to_be64 ( user_to_phys ( command->data_out, 0 ) );
+ cpu_to_be64 ( virt_to_phys ( command->data_out ) );
data_out->handle = ntohl ( srpdev->memory_handle );
data_out->len = ntohl ( command->data_out_len );
}
cmd->data_buffer_formats |= SRP_CMD_DI_FMT_DIRECT;
data_in = iob_put ( iobuf, sizeof ( *data_in ) );
data_in->address =
- cpu_to_be64 ( user_to_phys ( command->data_in, 0 ) );
+ cpu_to_be64 ( virt_to_phys ( command->data_in ) );
data_in->handle = ntohl ( srpdev->memory_handle );
data_in->len = ntohl ( command->data_in_len );
}
} else {
assert ( arbel->firmware_len == fw_len );
}
- fw_base = user_to_phys ( arbel->firmware_area, 0 );
+ fw_base = virt_to_phys ( arbel->firmware_area );
DBGC ( arbel, "Arbel %p firmware area at [%08lx,%08lx)\n",
arbel, fw_base, ( fw_base + fw_len ) );
if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_fa,
assert ( arbel->icm_len == icm_len );
assert ( arbel->icm_aux_len == icm_aux_len );
}
- icm_phys = user_to_phys ( arbel->icm, 0 );
+ icm_phys = virt_to_phys ( arbel->icm );
/* Allocate doorbell UAR */
arbel->db_rec = malloc_phys ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
next_page_addr += GOLAN_PAGE_SIZE ) {
addr = next_page_addr;
- if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
- DBGC (golan ,"Addr not Page alligned [%lx]\n", user_to_phys(addr, 0));
+ if (GOLAN_PAGE_MASK & virt_to_phys(addr)) {
+ DBGC (golan ,"Addr not Page alligned [%lx]\n", virt_to_phys(addr));
}
mailbox->mblock.data[j] = USR_2_BE64_BUS(addr);
}
#define VIRT_2_BE64_BUS( addr ) cpu_to_be64(((unsigned long long )virt_to_bus(addr)))
#define BE64_BUS_2_VIRT( addr ) bus_to_virt(be64_to_cpu(addr))
-#define USR_2_BE64_BUS( addr ) cpu_to_be64(((unsigned long long )user_to_phys(addr, 0)))
-#define BE64_BUS_2_USR( addr ) be64_to_cpu(phys_to_user(addr))
+#define USR_2_BE64_BUS( addr ) cpu_to_be64(((unsigned long long )virt_to_phys(addr)))
+#define BE64_BUS_2_USR( addr ) be64_to_cpu(phys_to_virt(addr))
#define GET_INBOX(golan, idx) (&(((struct mbox *)(golan->mboxes.inbox))[idx]))
#define GET_OUTBOX(golan, idx) (&(((struct mbox *)(golan->mboxes.outbox))[idx]))
} else {
assert ( hermon->firmware_len == fw_len );
}
- fw_base = user_to_phys ( hermon->firmware_area, 0 );
+ fw_base = virt_to_phys ( hermon->firmware_area );
DBGC ( hermon, "Hermon %p firmware area at physical [%08lx,%08lx)\n",
hermon, fw_base, ( fw_base + fw_len ) );
if ( ( rc = hermon_map_vpm ( hermon, hermon_cmd_map_fa,
assert ( hermon->icm_len == icm_len );
assert ( hermon->icm_aux_len == icm_aux_len );
}
- icm_phys = user_to_phys ( hermon->icm, 0 );
+ icm_phys = virt_to_phys ( hermon->icm );
/* Map ICM auxiliary area */
DBGC ( hermon, "Hermon %p mapping ICM AUX => %08lx\n",
port->rx_cons = 0;
/* Map receive region */
- exanic_write_base ( phys_to_bus ( user_to_phys ( port->rx, 0 ) ),
+ exanic_write_base ( phys_to_bus ( virt_to_phys ( port->rx ) ),
( port->regs + EXANIC_PORT_RX_BASE ) );
/* Enable promiscuous mode */
DBGC ( port, "EXANIC %s port %d TX [%#05zx,%#05zx) TXF %#02x RX "
"[%#lx,%#lx)\n", netdev->name, index, port->tx_offset,
( port->tx_offset + tx_len ), port->txf_slot,
- user_to_phys ( port->rx, 0 ),
- user_to_phys ( port->rx, EXANIC_RX_LEN ) );
+ virt_to_phys ( port->rx ),
+ ( virt_to_phys ( port->rx ) + EXANIC_RX_LEN ) );
/* Set initial link state */
exanic_check_link ( netdev );
static int gve_register ( struct gve_nic *gve, struct gve_qpl *qpl ) {
struct gve_pages *pages = &gve->scratch.buf->pages;
union gve_admin_command *cmd;
- physaddr_t addr;
+ void *addr;
unsigned int i;
int rc;
/* Build page address list */
for ( i = 0 ; i < qpl->count ; i++ ) {
- addr = user_to_phys ( qpl->data, ( i * GVE_PAGE_SIZE ) );
- pages->addr[i] = cpu_to_be64 ( dma_phys ( &qpl->map, addr ) );
+ addr = ( qpl->data + ( i * GVE_PAGE_SIZE ) );
+ pages->addr[i] = cpu_to_be64 ( dma ( &qpl->map, addr ) );
}
/* Construct request */
union gve_admin_command *cmd ) {
struct gve_admin_create_tx *create = &cmd->create_tx;
const struct gve_queue_type *type = queue->type;
- physaddr_t desc = user_to_phys ( queue->desc, 0 );
/* Construct request parameters */
create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
- create->desc = cpu_to_be64 ( dma_phys ( &queue->desc_map, desc ) );
+ create->desc = cpu_to_be64 ( dma ( &queue->desc_map, queue->desc ) );
create->qpl_id = cpu_to_be32 ( type->qpl );
create->notify_id = cpu_to_be32 ( type->irq );
}
union gve_admin_command *cmd ) {
struct gve_admin_create_rx *create = &cmd->create_rx;
const struct gve_queue_type *type = queue->type;
- physaddr_t desc = user_to_phys ( queue->desc, 0 );
- physaddr_t cmplt = user_to_phys ( queue->cmplt, 0 );
/* Construct request parameters */
create->notify_id = cpu_to_be32 ( type->irq );
create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
- create->desc = cpu_to_be64 ( dma_phys ( &queue->desc_map, desc ) );
- create->cmplt = cpu_to_be64 ( dma_phys ( &queue->cmplt_map, cmplt ) );
+ create->desc = cpu_to_be64 ( dma ( &queue->desc_map, queue->desc ) );
+ create->cmplt = cpu_to_be64 ( dma ( &queue->cmplt_map, queue->cmplt ));
create->qpl_id = cpu_to_be32 ( type->qpl );
create->bufsz = cpu_to_be16 ( GVE_BUF_SIZE );
}
return -ENOMEM;
DBGC ( gve, "GVE %p QPL %#08x at [%08lx,%08lx)\n",
- gve, qpl->id, user_to_phys ( qpl->data, 0 ),
- user_to_phys ( qpl->data, len ) );
+ gve, qpl->id, virt_to_phys ( qpl->data ),
+ ( virt_to_phys ( qpl->data ) + len ) );
return 0;
}
goto err_desc;
}
DBGC ( gve, "GVE %p %s descriptors at [%08lx,%08lx)\n",
- gve, type->name, user_to_phys ( queue->desc, 0 ),
- user_to_phys ( queue->desc, desc_len ) );
+ gve, type->name, virt_to_phys ( queue->desc ),
+ ( virt_to_phys ( queue->desc ) + desc_len ) );
/* Allocate completions */
if ( cmplt_len ) {
goto err_cmplt;
}
DBGC ( gve, "GVE %p %s completions at [%08lx,%08lx)\n",
- gve, type->name, user_to_phys ( queue->cmplt, 0 ),
- user_to_phys ( queue->cmplt, cmplt_len ) );
+ gve, type->name, virt_to_phys ( queue->cmplt ),
+ ( virt_to_phys ( queue->cmplt ) + cmplt_len ) );
}
/* Allocate queue resources */
writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
/* Configure and enable send queue */
- writeq ( user_to_phys ( vnic->sq.sqe, 0 ),
+ writeq ( virt_to_phys ( vnic->sq.sqe ),
( vnic->regs + TXNIC_QS_SQ_BASE(0) ) );
writeq ( ( TXNIC_QS_SQ_CFG_ENA | TXNIC_QS_SQ_CFG_QSIZE_1K ),
( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
DBGC ( vnic, "TXNIC %s SQ at [%08lx,%08lx)\n",
- vnic->name, user_to_phys ( vnic->sq.sqe, 0 ),
- user_to_phys ( vnic->sq.sqe, TXNIC_SQ_SIZE ) );
+ vnic->name, virt_to_phys ( vnic->sq.sqe ),
+ ( virt_to_phys ( vnic->sq.sqe ) + TXNIC_SQ_SIZE ) );
return 0;
}
( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
/* Configure and enable receive buffer descriptor ring */
- writeq ( user_to_phys ( vnic->rq.rqe, 0 ),
+ writeq ( virt_to_phys ( vnic->rq.rqe ),
( vnic->regs + TXNIC_QS_RBDR_BASE(0) ) );
writeq ( ( TXNIC_QS_RBDR_CFG_ENA | TXNIC_QS_RBDR_CFG_QSIZE_8K |
TXNIC_QS_RBDR_CFG_LINES ( TXNIC_RQE_SIZE /
writeq ( TXNIC_QS_RQ_CFG_ENA, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) );
DBGC ( vnic, "TXNIC %s RQ at [%08lx,%08lx)\n",
- vnic->name, user_to_phys ( vnic->rq.rqe, 0 ),
- user_to_phys ( vnic->rq.rqe, TXNIC_RQ_SIZE ) );
+ vnic->name, virt_to_phys ( vnic->rq.rqe ),
+ ( virt_to_phys ( vnic->rq.rqe ) + TXNIC_RQ_SIZE ) );
return 0;
}
writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
/* Configure and enable completion queue */
- writeq ( user_to_phys ( vnic->cq.cqe, 0 ),
+ writeq ( virt_to_phys ( vnic->cq.cqe ),
( vnic->regs + TXNIC_QS_CQ_BASE(0) ) );
writeq ( ( TXNIC_QS_CQ_CFG_ENA | TXNIC_QS_CQ_CFG_QSIZE_256 ),
( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
DBGC ( vnic, "TXNIC %s CQ at [%08lx,%08lx)\n",
- vnic->name, user_to_phys ( vnic->cq.cqe, 0 ),
- user_to_phys ( vnic->cq.cqe, TXNIC_CQ_SIZE ) );
+ vnic->name, virt_to_phys ( vnic->cq.cqe ),
+ ( virt_to_phys ( vnic->cq.cqe ) + TXNIC_CQ_SIZE ) );
return 0;
}
default:
DBGC ( vnic, "TXNIC %s unknown completion type %d\n",
vnic->name, cqe.common.cqe_type );
- DBGC_HDA ( vnic, user_to_phys ( vnic->cq.cqe, offset ),
+ DBGC_HDA ( vnic,
+ ( virt_to_phys ( vnic->cq.cqe ) + offset ),
&cqe, sizeof ( cqe ) );
break;
}
}
/* Populate scratchpad array */
- addr = dma_phys ( &scratch->buffer_map,
- user_to_phys ( scratch->buffer, 0 ) );
+ addr = dma ( &scratch->buffer_map, scratch->buffer );
for ( i = 0 ; i < scratch->count ; i++ ) {
scratch->array[i] = cpu_to_le64 ( addr );
addr += xhci->pagesize;
scratch->array ) );
DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
- xhci->name, user_to_phys ( scratch->buffer, 0 ),
- user_to_phys ( scratch->buffer, buffer_len ),
+ xhci->name, virt_to_phys ( scratch->buffer ),
+ ( virt_to_phys ( scratch->buffer ) + buffer_len ),
virt_to_phys ( scratch->array ),
( virt_to_phys ( scratch->array ) + array_len ) );
return 0;
return rc;
/* Create image */
- if ( ( rc = imgmem ( opts.name, phys_to_user ( data ), len ) ) != 0 )
+ if ( ( rc = imgmem ( opts.name, phys_to_virt ( data ), len ) ) != 0 )
return rc;
return 0;
*/
static int elf_load_segment ( struct image *image, Elf_Phdr *phdr,
physaddr_t dest ) {
- userptr_t buffer = phys_to_user ( dest );
+ userptr_t buffer = phys_to_virt ( dest );
int rc;
DBGC ( image, "ELF %p loading segment [%x,%x) to [%lx,%lx,%lx)\n",
*/
int prep_segment ( userptr_t segment, size_t filesz, size_t memsz ) {
struct memory_map memmap;
- physaddr_t start = user_to_phys ( segment, 0 );
- physaddr_t mid = user_to_phys ( segment, filesz );
- physaddr_t end = user_to_phys ( segment, memsz );
+ physaddr_t start = virt_to_phys ( segment );
+ physaddr_t mid = ( start + filesz );
+ physaddr_t end = ( start + memsz );
unsigned int i;
DBG ( "Preparing segment [%lx,%lx,%lx)\n", start, mid, end );
*
* We have no concept of the underlying physical addresses, since
* these are not exposed to userspace. We provide a stub
- * implementation of user_to_phys() since this is required by
- * alloc_memblock(). We provide no implementation of phys_to_user();
- * any code attempting to access physical addresses will therefore
- * (correctly) fail to link.
+ * implementation of virt_to_phys() since this is required by
+ * alloc_memblock(). We provide a matching stub implementation of
+ * phys_to_virt().
*/
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#endif
/**
- * Convert user pointer to physical address
+ * Convert virtual address to physical address
*
- * @v userptr User pointer
- * @v offset Offset from user pointer
- * @ret phys_addr Physical address
+ * @v virt Virtual address
+ * @ret phys Physical address
*/
-static inline __always_inline unsigned long
-UACCESS_INLINE ( linux, user_to_phys ) ( userptr_t userptr, off_t offset ) {
+static inline __always_inline physaddr_t
+UACCESS_INLINE ( linux, virt_to_phys ) ( volatile const void *virt ) {
/* We do not know the real underlying physical address. We
* provide this stub implementation only because it is
* virtual address will suffice for the purpose of determining
* alignment.
*/
- return ( ( unsigned long ) ( userptr + offset ) );
+ return ( ( physaddr_t ) virt );
}
/**
- * Convert physical address to user pointer
+ * Convert physical address to virtual address
*
- * @v phys_addr Physical address
- * @ret userptr User pointer
+ * @v phys Physical address
+ * @ret virt Virtual address
*/
-static inline __always_inline userptr_t
-UACCESS_INLINE ( linux, phys_to_user ) ( physaddr_t phys_addr ) {
+static inline __always_inline void *
+UACCESS_INLINE ( linux, phys_to_virt ) ( physaddr_t phys ) {
- /* For symmetry with the stub user_to_phys() */
- return ( ( userptr_t ) phys_addr );
+ /* For symmetry with the stub virt_to_phys() */
+ return ( ( void * ) phys );
}
static inline __always_inline userptr_t
#define PROVIDE_UACCESS_INLINE( _subsys, _api_func ) \
PROVIDE_SINGLE_API_INLINE ( UACCESS_PREFIX_ ## _subsys, _api_func )
-static inline __always_inline userptr_t
-UACCESS_INLINE ( flat, phys_to_user ) ( unsigned long phys_addr ) {
- return ( ( userptr_t ) phys_addr );
+static inline __always_inline void *
+UACCESS_INLINE ( flat, phys_to_virt ) ( physaddr_t phys ) {
+ return ( ( void * ) phys );
}
-static inline __always_inline unsigned long
-UACCESS_INLINE ( flat, user_to_phys ) ( userptr_t userptr, off_t offset ) {
- return ( ( unsigned long ) ( userptr + offset ) );
+static inline __always_inline physaddr_t
+UACCESS_INLINE ( flat, virt_to_phys ) ( volatile const void *virt ) {
+ return ( ( physaddr_t ) virt );
}
static inline __always_inline userptr_t
/* Include all architecture-dependent user access API headers */
#include <bits/uaccess.h>
-/**
- * Convert physical address to user pointer
- *
- * @v phys_addr Physical address
- * @ret userptr User pointer
- */
-userptr_t phys_to_user ( unsigned long phys_addr );
-
-/**
- * Convert user pointer to physical address
- *
- * @v userptr User pointer
- * @v offset Offset from user pointer
- * @ret phys_addr Physical address
- */
-unsigned long user_to_phys ( userptr_t userptr, off_t offset );
-
/**
* Convert virtual address to user pointer
*
/**
* Convert virtual address to a physical address
*
- * @v addr Virtual address
- * @ret phys_addr Physical address
+ * @v virt Virtual address
+ * @ret phys Physical address
*/
-static inline __always_inline unsigned long
-virt_to_phys ( volatile const void *addr ) {
- return user_to_phys ( virt_to_user ( addr ), 0 );
-}
+physaddr_t __attribute__ (( const ))
+virt_to_phys ( volatile const void *virt );
/**
* Convert physical address to a virtual address
*
- * @v addr Virtual address
- * @ret phys_addr Physical address
+ * @v phys Physical address
+ * @ret virt Virtual address
*
* This operation is not available under all memory models.
*/
-static inline __always_inline void * phys_to_virt ( unsigned long phys_addr ) {
- return ( phys_to_user ( phys_addr ) );
-}
+void * __attribute__ (( const )) phys_to_virt ( physaddr_t phys );
/**
* Copy data to user buffer
/* Locate RSDT via ACPI configuration table, if available */
if ( rsdp )
- return phys_to_user ( rsdp->RsdtAddress );
+ return phys_to_virt ( rsdp->RsdtAddress );
return UNULL;
}
mode, efifb.pixel.width, efifb.pixel.height, bpp, efifb.start );
/* Initialise frame buffer console */
- if ( ( rc = fbcon_init ( &efifb.fbcon, phys_to_user ( efifb.start ),
+ if ( ( rc = fbcon_init ( &efifb.fbcon, phys_to_virt ( efifb.start ),
&efifb.pixel, &efifb.map, &efifb.font,
config ) ) != 0 )
goto err_fbcon_init;
/* Use 64-bit table if present */
if ( smbios3_entry && ( smbios3_entry->signature == SMBIOS3_SIGNATURE ) ) {
- smbios->address = phys_to_user ( smbios3_entry->smbios_address );
+ smbios->address = phys_to_virt ( smbios3_entry->smbios_address );
smbios->len = smbios3_entry->smbios_len;
smbios->count = 0;
smbios->version =
SMBIOS_VERSION ( smbios3_entry->major, smbios3_entry->minor );
DBG ( "Found 64-bit SMBIOS v%d.%d entry point at %p (%lx+%zx)\n",
smbios3_entry->major, smbios3_entry->minor, smbios3_entry,
- user_to_phys ( smbios->address, 0 ), smbios->len );
+ virt_to_phys ( smbios->address ), smbios->len );
return 0;
}
/* Otherwise, use 32-bit table if present */
if ( smbios_entry && ( smbios_entry->signature == SMBIOS_SIGNATURE ) ) {
- smbios->address = phys_to_user ( smbios_entry->smbios_address );
+ smbios->address = phys_to_virt ( smbios_entry->smbios_address );
smbios->len = smbios_entry->smbios_len;
smbios->count = smbios_entry->smbios_count;
smbios->version =
SMBIOS_VERSION ( smbios_entry->major, smbios_entry->minor );
DBG ( "Found 32-bit SMBIOS v%d.%d entry point at %p (%lx+%zx)\n",
smbios_entry->major, smbios_entry->minor, smbios_entry,
- user_to_phys ( smbios->address, 0 ), smbios->len );
+ virt_to_phys ( smbios->address ), smbios->len );
return 0;
}
return UNULL;
}
assert ( phys_addr != 0 );
- new_ptr = phys_to_user ( phys_addr + EFI_PAGE_SIZE );
+ new_ptr = phys_to_virt ( phys_addr + EFI_PAGE_SIZE );
copy_to_user ( new_ptr, -EFI_PAGE_SIZE,
&new_size, sizeof ( new_size ) );
DBG ( "EFI allocated %d pages at %llx\n",
memcpy ( new_ptr, old_ptr,
( (old_size < new_size) ? old_size : new_size ) );
old_pages = ( EFI_SIZE_TO_PAGES ( old_size ) + 1 );
- phys_addr = user_to_phys ( old_ptr, -EFI_PAGE_SIZE );
+ phys_addr = virt_to_phys ( old_ptr - EFI_PAGE_SIZE );
if ( ( efirc = bs->FreePages ( phys_addr, old_pages ) ) != 0 ){
rc = -EEFI ( efirc );
DBG ( "EFI could not free %d pages at %llx: %s\n",
size_t len ) {
struct hv_hypervisor *hv = vmdev->hv;
struct vmbus *vmbus = hv->vmbus;
- physaddr_t addr = user_to_phys ( data, 0 );
+ physaddr_t addr = virt_to_phys ( data );
unsigned int pfn_count = hv_pfn_count ( addr, len );
struct {
struct vmbus_gpadl_header gpadlhdr;
*
*/
-PROVIDE_UACCESS_INLINE(linux, user_to_phys);
+PROVIDE_UACCESS_INLINE(linux, phys_to_virt);
+PROVIDE_UACCESS_INLINE(linux, virt_to_phys);
PROVIDE_UACCESS_INLINE(linux, virt_to_user);
PROVIDE_UACCESS_INLINE(linux, memchr_user);
if ( ( sum = smbios_checksum ( start, offset,
entry->len ) ) != 0 ) {
DBG ( "SMBIOS at %08lx has bad checksum %02x\n",
- user_to_phys ( start, offset ), sum );
+ virt_to_phys ( start + offset ), sum );
continue;
}
/* Fill result structure */
DBG ( "Found SMBIOS v%d.%d entry point at %08lx\n",
entry->major, entry->minor,
- user_to_phys ( start, offset ) );
+ virt_to_phys ( start + offset ) );
return 0;
}
if ( ( sum = smbios_checksum ( start, offset,
entry->len ) ) != 0 ) {
DBG ( "SMBIOS3 at %08lx has bad checksum %02x\n",
- user_to_phys ( start, offset ), sum );
+ virt_to_phys ( start + offset ), sum );
continue;
}
/* Fill result structure */
DBG ( "Found SMBIOS3 v%d.%d entry point at %08lx\n",
entry->major, entry->minor,
- user_to_phys ( start, offset ) );
+ virt_to_phys ( start + offset ) );
return 0;
}