--- /dev/null
+From: olh@suse.de
+Subject: vanilla sources from www.maconlinux.org
+Patch-mainline: never
+
+to avoid the km_ mess
+
+Never modify this patch! Create new patches for build fixes.
+
+---
+ drivers/macintosh/mol/_dev.c | 376 ++++++++++
+ drivers/macintosh/mol/_fault.c | 159 ++++
+ drivers/macintosh/mol/_hostirq.c | 116 +++
+ drivers/macintosh/mol/_kuname.c | 48 +
+ drivers/macintosh/mol/_misc.c | 147 +++
+ drivers/macintosh/mol/_mmu.c | 41 +
+ drivers/macintosh/mol/actions.c | 548 ++++++++++++++
+ drivers/macintosh/mol/asm-files/603.S | 218 +++++
+ drivers/macintosh/mol/asm-files/dec.S | 228 ++++++
+ drivers/macintosh/mol/asm-files/emuaccel.S | 188 +++++
+ drivers/macintosh/mol/asm-files/emulation.S | 714 +++++++++++++++++++
+ drivers/macintosh/mol/asm-files/entry.S | 433 +++++++++++
+ drivers/macintosh/mol/asm-files/iopage.S | 89 ++
+ drivers/macintosh/mol/asm-files/linux.S | 129 +++
+ drivers/macintosh/mol/asm-files/ptintercept.S | 303 ++++++++
+ drivers/macintosh/mol/asm-files/splitmode.S | 428 +++++++++++
+ drivers/macintosh/mol/asm-files/traps.S | 501 +++++++++++++
+ drivers/macintosh/mol/asm-files/vsid.S | 123 +++
+ drivers/macintosh/mol/asm_offsets.c | 161 ++++
+ drivers/macintosh/mol/context.c | 99 ++
+ drivers/macintosh/mol/emu.c | 228 ++++++
+ drivers/macintosh/mol/fault.c | 601 ++++++++++++++++
+ drivers/macintosh/mol/hash.c | 126 +++
+ drivers/macintosh/mol/include/actions.h | 177 ++++
+ drivers/macintosh/mol/include/alloc.h | 70 +
+ drivers/macintosh/mol/include/archinclude.h | 77 ++
+ drivers/macintosh/mol/include/asm.m4 | 141 +++
+ drivers/macintosh/mol/include/asm_offsets.inc | 136 +++
+ drivers/macintosh/mol/include/asmdbg.h | 184 ++++
+ drivers/macintosh/mol/include/asmdefs.h | 397 ++++++++++
+ drivers/macintosh/mol/include/asmfuncs.h | 80 ++
+ drivers/macintosh/mol/include/atomic.h | 26
+ drivers/macintosh/mol/include/config.h | 90 ++
+ drivers/macintosh/mol/include/constants.h | 36
+ drivers/macintosh/mol/include/context.h | 62 +
+ drivers/macintosh/mol/include/dbg.h | 31
+ drivers/macintosh/mol/include/debugger.h | 96 ++
+ drivers/macintosh/mol/include/emu.h | 29
+ drivers/macintosh/mol/include/emuaccel_sh.h | 41 +
+ drivers/macintosh/mol/include/extralib.h | 70 +
+ drivers/macintosh/mol/include/hash.h | 36
+ drivers/macintosh/mol/include/kernel_vars.h | 225 ++++++
+ drivers/macintosh/mol/include/locks.h | 39 +
+ drivers/macintosh/mol/include/mac_registers.h | 168 ++++
+ drivers/macintosh/mol/include/map.h | 43 +
+ drivers/macintosh/mol/include/misc.h | 105 ++
+ drivers/macintosh/mol/include/mmu.h | 102 ++
+ drivers/macintosh/mol/include/mmu_contexts.h | 55 +
+ drivers/macintosh/mol/include/mmu_mappings.h | 48 +
+ drivers/macintosh/mol/include/mmutypes.h | 76 ++
+ drivers/macintosh/mol/include/mol-ioctl.h | 121 +++
+ drivers/macintosh/mol/include/mol_config.h | 76 ++
+ drivers/macintosh/mol/include/molasm.h | 138 +++
+ drivers/macintosh/mol/include/molversion.h | 6
+ drivers/macintosh/mol/include/mtable.h | 71 +
+ drivers/macintosh/mol/include/osi.h | 170 ++++
+ drivers/macintosh/mol/include/osi_calls.h | 475 ++++++++++++
+ drivers/macintosh/mol/include/performance.h | 71 +
+ drivers/macintosh/mol/include/platform.h | 73 +
+ drivers/macintosh/mol/include/processor.h | 409 +++++++++++
+ drivers/macintosh/mol/include/prom.h | 46 +
+ drivers/macintosh/mol/include/rvec.h | 147 +++
+ drivers/macintosh/mol/include/skiplist.h | 87 ++
+ drivers/macintosh/mol/include/tlbie.h | 102 ++
+ drivers/macintosh/mol/include/uaccess.h | 36
+ drivers/macintosh/mol/include/vector.h | 189 +++++
+ drivers/macintosh/mol/include/version.h | 11
+ drivers/macintosh/mol/include/weaksym.h | 39 +
+ drivers/macintosh/mol/init.c | 191 +++++
+ drivers/macintosh/mol/misc.c | 255 ++++++
+ drivers/macintosh/mol/mmu.c | 251 ++++++
+ drivers/macintosh/mol/mmu_fb.c | 186 +++++
+ drivers/macintosh/mol/mmu_io.c | 470 ++++++++++++
+ drivers/macintosh/mol/mmu_tracker.c | 128 +++
+ drivers/macintosh/mol/mtable.c | 960 ++++++++++++++++++++++++++
+ drivers/macintosh/mol/ptaccess.c | 153 ++++
+ drivers/macintosh/mol/sheep.c | 701 ++++++++++++++++++
+ drivers/macintosh/mol/skiplist.c | 222 ++++++
+ 78 files changed, 14428 insertions(+)
+
+--- /dev/null
++++ b/drivers/macintosh/mol/_dev.c
+@@ -0,0 +1,376 @@
++/*
++ * Creation Date: <2003/08/20 17:31:44 samuel>
++ * Time-stamp: <2004/02/14 14:43:13 samuel>
++ *
++ * <dev.c>
++ *
++ * misc device
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#include "archinclude.h"
++#include <linux/module.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/bitops.h>
++#include <asm/prom.h>
++#include <asm/machdep.h>
++#include <asm/atomic.h>
++#include "kernel_vars.h"
++#include "mol-ioctl.h"
++#include "version.h"
++#include "mmu.h"
++#include "misc.h"
++#include "mtable.h"
++#include "atomic.h"
++
++MODULE_AUTHOR("Samuel Rydh <samuel@ibrium.se>");
++MODULE_DESCRIPTION("Mac-on-Linux kernel module");
++MODULE_LICENSE("GPL");
++
++static DECLARE_MUTEX( initmutex );
++static int opencnt;
++
++
++/************************************************************************/
++/* misc */
++/************************************************************************/
++
++#ifdef CONFIG_SMP
++#define HAS_SMP 1
++
++static void
++dummy_ipi( void *dummy )
++{
++ /* we don't need to _do_ anything, the exception itself is sufficient */
++}
++static inline void
++send_ipi( void )
++{
++ smp_call_function( dummy_ipi, NULL, 1, 0 );
++}
++#else /* CONFIG_SMP */
++
++#define HAS_SMP 0
++#define send_ipi() do {} while(0)
++
++#endif /* CONFIG_SMP */
++
++
++static int
++find_physical_rom( int *base, int *size )
++{
++#ifndef CONFIG_AMIGAONE
++ struct device_node *dn;
++ int len, *p;
++ int by_type = 0;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
++ if( !(dn=find_devices("boot-rom")) && !(dn=find_type_devices("rom")) )
++ return 0;
++#else
++ if (! (dn = of_find_node_by_name(NULL, "boot-rom"))) {
++ by_type = 1;
++ if (! (dn = of_find_node_by_type(NULL, "rom")))
++ return 0;
++ }
++#endif /* < Linux 2.6.21 */
++ do {
++ if( !(p=(int*)get_property(dn, "reg", &len)) || len != sizeof(int[2]) ) {
++ of_node_put(dn);
++ return 0;
++ }
++ if( (unsigned int)(0xfff00100 - p[0]) < (unsigned int)p[1] ) {
++ *base = p[0];
++ *size = p[1];
++ of_node_put(dn);
++ return 1;
++ }
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
++ dn = dn->next;
++#else
++ dn = by_type ? of_find_node_by_type(dn, "rom") :
++ of_find_node_by_name(dn, "boot-rom");
++#endif /* < Linux 2.6.21 */
++ } while( dn );
++#endif /* CONFIG_AMIGA_ONE */
++ return 0;
++}
++
++static int
++get_info( mol_kmod_info_t *user_retinfo, int size )
++{
++ mol_kmod_info_t info;
++
++ memset( &info, 0, sizeof(info) );
++ asm volatile("mfpvr %0" : "=r" (info.pvr) : );
++ info.version = MOL_VERSION;
++ find_physical_rom( &info.rombase, &info.romsize );
++ info.tb_freq = HZ * tb_ticks_per_jiffy;
++ info.smp_kernel = HAS_SMP;
++
++ if( (uint)size > sizeof(info) )
++ size = sizeof(info);
++
++ if( copy_to_user(user_retinfo, &info, size) )
++ return -EFAULT;
++ return 0;
++}
++
++
++void
++prevent_mod_unload( void )
++{
++#ifndef LINUX_26
++ MOD_INC_USE_COUNT;
++#else
++ __module_get( THIS_MODULE );
++#endif
++}
++
++int
++get_irqs( kernel_vars_t *kv, irq_bitfield_t *irq_info_p )
++{
++ irq_bitfield_t irq_mask;
++ int i;
++
++ /* copy the interrupt mask from userspace */
++ if (copy_from_user(&irq_mask, irq_info_p, sizeof(irq_mask)))
++ return -EFAULT;
++
++ /* see which of the mapped interrupts need to be enabled */
++ for (i = 0; i < NR_HOST_IRQS; i++) {
++ if (check_bit_mol(i, (char *) kv->mregs.mapped_irqs.irqs)
++ && check_bit_mol(i, (char *) irq_mask.irqs)
++ && check_bit_mol(i, (char *) kv->mregs.active_irqs.irqs)) {
++ if (test_and_clear_bit(i, kv->mregs.active_irqs.irqs))
++ atomic_dec_mol((mol_atomic_t *) &(kv->mregs.hostirq_active_cnt));
++ enable_irq(i);
++ }
++ }
++
++ /* if one of the enabled interrupts was pending, it should have fired
++ * now, updating active_irqs */
++ if (copy_to_user(irq_info_p, &(kv->mregs.active_irqs), sizeof(kv->mregs.active_irqs)))
++ return -EFAULT;
++
++ return 0;
++}
++
++/************************************************************************/
++/* ioctl */
++/************************************************************************/
++
++static int
++debugger_op( kernel_vars_t *kv, dbg_op_params_t *upb )
++{
++ dbg_op_params_t pb;
++ int ret;
++
++ if( copy_from_user(&pb, upb, sizeof(pb)) )
++ return -EFAULT;
++
++ switch( pb.operation ) {
++ case DBG_OP_GET_PHYS_PAGE:
++ ret = dbg_get_linux_page( pb.ea, &pb.ret.page );
++ break;
++ default:
++ ret = do_debugger_op( kv, &pb );
++ break;
++ }
++
++ if( copy_to_user(upb, &pb, sizeof(pb)) )
++ return -EFAULT;
++ return ret;
++}
++
++static int
++arch_handle_ioctl( kernel_vars_t *kv, int cmd, int p1, int p2, int p3 )
++{
++ char *rompage;
++ int ret = -EFAULT;
++
++ switch( cmd ) {
++ case MOL_IOCTL_GET_IRQS:
++ return get_irqs( kv, (irq_bitfield_t *) p1 );
++
++ case MOL_IOCTL_GET_DIRTY_FBLINES: /* short *retbuf, int size -- npairs */
++ if( compat_verify_area(VERIFY_WRITE, (short*)p1, p2) )
++ break;
++ ret = get_dirty_fb_lines( kv, (short*)p1, p2 );
++ break;
++
++ case MOL_IOCTL_DEBUGGER_OP:
++ ret = debugger_op( kv, (dbg_op_params_t*)p1 );
++ break;
++
++ case MOL_IOCTL_GRAB_IRQ:
++ ret = grab_host_irq(kv, p1);
++ break;
++
++ case MOL_IOCTL_RELEASE_IRQ:
++ ret = release_host_irq(kv, p1);
++ break;
++
++ case MOL_IOCTL_COPY_LAST_ROMPAGE: /* p1 = dest */
++ ret = -ENODEV;
++ if( (rompage=ioremap(0xfffff000, 0x1000)) ) {
++ ret = copy_to_user( (char*)p1, rompage, 0x1000 );
++ iounmap( rompage );
++ }
++ break;
++
++ case MOL_IOCTL_SET_RAM: /* void ( char *lvbase, size_t size ) */
++ if( compat_verify_area(VERIFY_WRITE, (char*)p1, p2) )
++ break;
++ ret = 0;
++ kv->mmu.userspace_ram_base = p1;
++ kv->mmu.ram_size = p2;
++ mtable_tune_alloc_limit( kv, p2/(1024 * 1024) );
++ break;
++
++ case MOL_IOCTL_GET_MREGS_PHYS:
++ ret = virt_to_phys( &kv->mregs );
++ break;
++
++ default:
++ ret = handle_ioctl( kv, cmd, p1, p2, p3 );
++ break;
++ }
++ return ret;
++}
++
++
++/************************************************************************/
++/* device interface */
++/************************************************************************/
++
++static int
++mol_open( struct inode *inode, struct file *file )
++{
++ int ret=0;
++
++ if( !(file->f_mode & FMODE_READ) )
++ return -EPERM;
++
++ down( &initmutex );
++ if( !opencnt++ ) {
++ if( common_init() ) {
++ ret = -ENOMEM;
++ opencnt = 0;
++ }
++ }
++ up( &initmutex );
++
++ file->private_data = NULL;
++ return ret;
++}
++
++static int
++mol_release( struct inode *inode, struct file *file )
++{
++ kernel_vars_t *kv = (kernel_vars_t*)file->private_data;
++
++ down( &initmutex );
++ if( kv )
++ destroy_session( kv->session_index );
++
++ if( !--opencnt )
++ common_cleanup();
++ up( &initmutex );
++ return 0;
++}
++
++static int
++mol_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg )
++{
++ mol_ioctl_pb_t pb;
++ kernel_vars_t *kv;
++ int ret;
++ uint session;
++
++ /* fast path */
++ if( cmd == MOL_IOCTL_SMP_SEND_IPI ) {
++ send_ipi();
++ return 0;
++ }
++
++ if( copy_from_user(&pb, (void*)arg, sizeof(pb)) )
++ return -EFAULT;
++
++ switch( cmd ) {
++ case MOL_IOCTL_GET_INFO:
++ return get_info( (mol_kmod_info_t*)pb.arg1, pb.arg2 );
++
++ case MOL_IOCTL_CREATE_SESSION:
++ if( !(file->f_mode & FMODE_WRITE) || !capable(CAP_SYS_ADMIN) )
++ return -EPERM;
++ ret = -EINVAL;
++ down( &initmutex );
++ if( (uint)pb.arg1 < MAX_NUM_SESSIONS && !file->private_data ) {
++ if( !(ret=initialize_session(pb.arg1)) ) {
++ kv = g_sesstab->kvars[pb.arg1];
++ init_MUTEX( &kv->ioctl_sem );
++ file->private_data = kv;
++ }
++ }
++ up( &initmutex );
++ return ret;
++
++ case MOL_IOCTL_DBG_COPY_KVARS:
++ session = pb.arg1;
++ ret = -EINVAL;
++ down( &initmutex );
++ if( session < MAX_NUM_SESSIONS && (kv=g_sesstab->kvars[session]) )
++ ret = copy_to_user( (char*)pb.arg2, kv, sizeof(*kv) );
++ up( &initmutex );
++ return ret;
++ }
++
++ if( !(kv=(kernel_vars_t*)file->private_data) )
++ return -EINVAL;
++
++ down( &kv->ioctl_sem );
++ ret = arch_handle_ioctl( kv, cmd, pb.arg1, pb.arg2, pb.arg3 );
++ up( &kv->ioctl_sem );
++
++ return ret;
++}
++
++static struct file_operations mol_device_fops = {
++ .owner = THIS_MODULE,
++ .open = mol_open,
++ .release = mol_release,
++ .ioctl = mol_ioctl,
++// .poll = mol_poll,
++// .mmap: = mol_mmap,
++};
++
++static struct miscdevice mol_device = {
++ MISC_DYNAMIC_MINOR, "mol", &mol_device_fops
++};
++
++static int __init
++dev_register( void )
++{
++ printk("MOL %s kernel module loaded\n", MOL_RELEASE );
++ return misc_register( &mol_device );
++}
++
++static void __exit
++dev_unregister( void )
++{
++ misc_deregister( &mol_device );
++}
++
++module_init( dev_register );
++module_exit( dev_unregister );
+--- /dev/null
++++ b/drivers/macintosh/mol/_fault.c
+@@ -0,0 +1,159 @@
++/*
++ * Creation Date: <2002/06/08 21:01:54 samuel>
++ * Time-stamp: <2004/02/19 11:54:33 samuel>
++ *
++ * <fault.c>
++ *
++ * Linux part
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "mmu.h"
++#include "mmu_contexts.h"
++#include "asmfuncs.h"
++#include "emu.h"
++#include "misc.h"
++#include "rvec.h"
++#include "performance.h"
++#include "mol-ioctl.h"
++#include "mtable.h"
++
++#ifdef CONFIG_HIGHPTE
++#error "MOL is currently incompatible with CONFIG_HIGHPTE"
++#endif
++
++static inline ulong
++fix_pte( ulong *p, ulong set, ulong flags )
++{
++ unsigned long ret, tmp;
++
++ __asm__ __volatile__("\n"
++ "1: lwarx %0,0,%3 \n"
++ " andc. %1,%5,%0 \n"
++ " addi %1,0,0 \n"
++ " bne- 2f \n"
++ " or %1,%0,%4 \n"
++ " stwcx. %1,0,%3 \n"
++ " bne- 1b \n"
++ "2: \n"
++ : "=&r" (tmp), "=&r" (ret), "=m" (*p)
++ : "r" (p), "r" (set), "r" (flags), "m" (*p)
++ : "cc" );
++ return ret;
++}
++
++/*
++ * Get physical page corresponding to linux virtual address. Invokes linux page
++ * fault handler if the page is missing. This function never fails since we
++ * know there is a valid mapping...
++ */
++#define PAGE_BITS_WRITE (_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HASHPTE )
++#define PAGE_BITS_READ (_PAGE_ACCESSED | _PAGE_HASHPTE )
++
++ulong
++get_phys_page( kernel_vars_t *kv, ulong va, int request_rw )
++{
++ char *lvptr = (char*)va;
++ ulong lpte, uptr, *ptr;
++ ulong flags;
++ struct mm_struct *mm;
++ struct vm_area_struct *vma;
++
++ /* pte bits that must be set */
++ flags = request_rw ? (_PAGE_USER | _PAGE_RW | _PAGE_PRESENT)
++ : (_PAGE_USER | _PAGE_PRESENT);
++
++ uptr = ((ulong*)current->thread.pgdir)[va>>22]; /* top 10 bits */
++ ptr = (ulong*)(uptr & ~0xfff);
++ if( !ptr )
++ goto no_page;
++#ifdef LINUX_26
++ ptr = phys_to_virt( (int)ptr );
++#endif
++ ptr = ptr + ((va>>12) & 0x3ff); /* next 10 bits */
++
++ /* this allows us to keep track of this page until we have
++ * added a full mtable entry for it. The reservation is lost if
++ * a TLB invalidation occurs.
++ */
++ make_lvptr_reservation( kv, lvptr );
++
++ /* we atomically set _PAGE_HASHPTE after checking PAGE_PRESENT and PAGE_RW.
++ * We are then guaranteed to be notified about a TLB invalidation through the
++ * flush_hash_page hook.
++ */
++ lpte = fix_pte( ptr, (request_rw? PAGE_BITS_WRITE : PAGE_BITS_READ), flags );
++
++ /* permissions violation */
++ if( !lpte )
++ goto no_page;
++
++ return lpte & ~0xfff;
++
++no_page:
++ BUMP( page_missing );
++
++ /* no mac page found... */
++ mm = current->mm;
++ down_read( &mm->mmap_sem );
++
++ if( !(vma=find_vma(mm,va)) || vma->vm_start > va )
++ goto bad_area;
++ if( !(vma->vm_flags & (request_rw ? VM_WRITE : (VM_READ | VM_EXEC))) )
++ goto bad_area;
++
++ handle_mm_fault( mm, vma, va, request_rw );
++
++ up_read( &mm->mmap_sem );
++ return get_phys_page(kv, va, request_rw);
++
++bad_area:
++ up_read( &mm->mmap_sem );
++ printk("get_phys_page: BAD AREA, lvptr = %08lx\n", va );
++ force_sig(SIGSEGV, current);
++ return 0;
++}
++
++
++/************************************************************************/
++/* Debugger functions */
++/************************************************************************/
++
++int
++dbg_get_linux_page( ulong va, dbg_page_info_t *r )
++{
++ ulong val, uptr, *ptr;
++
++ uptr = ((ulong*)current->thread.pgdir)[va>>22]; /* top 10 bits */
++ ptr = (ulong*)(uptr & ~0xfff);
++ if( !ptr )
++ return 1;
++#ifdef LINUX_26
++ ptr = phys_to_virt( (int)ptr );
++#endif
++ val = ptr[ (va>>12)&0x3ff ]; /* next 10 bits */
++
++ r->phys = val & ~0xfff;
++ r->mflags =
++ DBG_TRANSL_PAGE_FLAG( val, _PAGE_PRESENT )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_USER )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_GUARDED )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_COHERENT )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_NO_CACHE )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_WRITETHRU )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_DIRTY )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_ACCESSED )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_RW )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_HASHPTE )
++ | DBG_TRANSL_PAGE_FLAG( val, _PAGE_EXEC );
++ return 0;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/_hostirq.c
+@@ -0,0 +1,116 @@
++/*
++ * <hostirq.c>
++ *
++ * host IRQ handling (for pciproxied devices)
++ *
++ * Copyright (C) 2005 Mattias Nissler <mattias.nissler@gmx.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
++#include <linux/threads.h>
++#include <linux/spinlock.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/sched.h>
++#include <linux/bitops.h>
++#include <asm/atomic.h>
++
++#include "archinclude.h"
++#include "kernel_vars.h"
++#include "misc.h"
++#include "atomic.h"
++
++irqreturn_t
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
++hostirq_handler(int irq, void *pkv)
++#else
++hostirq_handler(int irq, void *pkv, struct pt_regs *regs)
++#endif
++{
++ siginfo_t si;
++ kernel_vars_t *kv = (kernel_vars_t *) pkv;
++
++ /* disable the irq */
++ disable_irq_nosync(irq);
++ /* have the interrupt handled */
++ if (!test_and_set_bit(irq, kv->mregs.active_irqs.irqs))
++ atomic_inc_mol((mol_atomic_t *) &(kv->mregs.hostirq_active_cnt));
++ kv->mregs.hostirq_update = 1;
++ kv->mregs.interrupt = 1;
++ /* signal the main thread (it might be DOZEing) */
++ if (kv->main_thread != NULL) {
++ memset(&si, 0, sizeof(si));
++ si.si_signo = SIGHUP;
++ si.si_code = irq;
++ send_sig_info(SIGHUP, &si, kv->main_thread);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static char *molirqdescstring = "MOL irq mapping";
++
++int
++grab_host_irq(kernel_vars_t *kv, int irq)
++{
++ int ret;
++
++ /* sanity check */
++ if (irq < 0 || irq >= NR_HOST_IRQS
++ || check_bit_mol(irq, (char *) kv->mregs.mapped_irqs.irqs))
++ return 0;
++
++ /* request the irq */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)
++ ret = request_irq(irq, hostirq_handler, IRQF_DISABLED | IRQF_SHARED, molirqdescstring, kv);
++#else
++ ret = request_irq(irq, hostirq_handler, SA_INTERRUPT | SA_SHIRQ, molirqdescstring, kv);
++#endif
++ if (!ret) {
++// printk(KERN_INFO "mapped irq line %d\n", irq);
++ set_bit_mol(irq, (char *) kv->mregs.mapped_irqs.irqs);
++ }
++
++ return ret;
++}
++
++int
++release_host_irq(kernel_vars_t *kv, int irq)
++{
++ /* sanity check */
++ if (irq < 0 || irq >= NR_HOST_IRQS
++ || !check_bit_mol(irq, (char *) kv->mregs.mapped_irqs.irqs))
++ return 0;
++
++ clear_bit_mol(irq, (char *) kv->mregs.mapped_irqs.irqs);
++ disable_irq(irq);
++ free_irq(irq, kv);
++
++ return 1;
++}
++
++void
++init_host_irqs(kernel_vars_t *kv)
++{
++ memset(&(kv->mregs.mapped_irqs), 0, sizeof(kv->mregs.mapped_irqs));
++ kv->main_thread = current;
++ kv->mregs.hostirq_update = 0;
++}
++
++void
++cleanup_host_irqs(kernel_vars_t *kv)
++{
++ int n;
++
++ for (n = 0; n < NR_HOST_IRQS; n++) {
++ if (check_bit_mol(n, (char *) kv->mregs.mapped_irqs.irqs))
++ release_host_irq(kv, n);
++ }
++}
++
+--- /dev/null
++++ b/drivers/macintosh/mol/_kuname.c
+@@ -0,0 +1,48 @@
++/*
++ * Creation Date: <2001/08/15 01:11:01 samuel>
++ * Time-stamp: <2003/10/24 10:22:00 samuel>
++ *
++ * <kuname.c>
++ *
++ * Extract from the kernel source
++ *
++ * Copyright (C) 2001, 2002, 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include <linux/version.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++#include <linux/utsrelease.h>
++#endif
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
++#include <linux/config.h>
++#else
++#include <linux/autoconf.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++
++#ifdef CONFIG_SMP
++#define SMP_STRING "-smp"
++#else
++#define SMP_STRING ""
++#endif
++
++#ifndef CONFIG_ALTIVEC
++#define ALTIVEC_STRING "-noav"
++#else
++#define ALTIVEC_STRING ""
++#endif
++
++#else
++#define SMP_STRING ""
++#define ALTIVEC_STRING ""
++#endif
++
++char *cross_compiling_magic = "-MAGIC-" UTS_RELEASE SMP_STRING ALTIVEC_STRING ;
+--- /dev/null
++++ b/drivers/macintosh/mol/_misc.c
+@@ -0,0 +1,147 @@
++/*
++ * Creation Date: <97/05/26 02:10:43 samuel>
++ * Time-stamp: <2004/03/13 14:14:20 samuel>
++ *
++ * <misc.c>
++ *
++ * Kernel interface
++ *
++ * Copyright (C) 1997-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include <linux/vmalloc.h>
++#include <linux/sched.h>
++#include <asm/uaccess.h>
++#include <asm/prom.h>
++#include "kernel_vars.h"
++#include "misc.h"
++#include "performance.h"
++#include "map.h"
++
++#define MMU kv->mmu
++
++kernel_vars_t *
++alloc_kvar_pages( void )
++{
++ kernel_vars_t *kv;
++ int i, order;
++ char *ptr;
++
++ for( i=1, order=0; i<NUM_KVARS_PAGES; i=i<<1, order++ )
++ ;
++ if( !(kv=(kernel_vars_t*)__get_free_pages(GFP_KERNEL, order)) )
++ return NULL;
++
++ /* To be able to export the kernel variables to user space, we
++ * must set the PG_reserved bit. This is due to a check
++ * in remap_pte_range() in kernel/memory.c (is this bug or a feature?).
++ */
++ for( ptr=(char*)kv, i=0; i<NUM_KVARS_PAGES; i++, ptr+=0x1000 )
++ SetPageReserved( virt_to_page(ptr) );
++
++ return kv;
++}
++
++void
++free_kvar_pages( kernel_vars_t *kv )
++{
++ char *ptr = (char*)kv;
++ int i, order;
++
++ for( i=0; i<NUM_KVARS_PAGES; i++, ptr+=0x1000 )
++ ClearPageReserved( virt_to_page(ptr) );
++
++ for( i=1, order=0; i<NUM_KVARS_PAGES; i=i<<1, order++ )
++ ;
++ free_pages( (ulong)kv, order );
++}
++
++
++/************************************************************************/
++/* hash access */
++/************************************************************************/
++
++ulong *
++map_emulated_hash( kernel_vars_t *kv, ulong mbase, ulong size )
++{
++ return (ulong*)(MMU.userspace_ram_base + mbase);
++}
++
++void
++unmap_emulated_hash( kernel_vars_t *kv )
++{
++ /* nothing */
++}
++
++/************************************************************************/
++/* kernel lowmem asm <-> kernel C-code switching */
++/************************************************************************/
++
++typedef int (*kernelfunc_t)( kernel_vars_t *, ulong, ulong, ulong );
++typedef void (*trampoline_t)( struct pt_regs *regs );
++static trampoline_t old_trampoline;
++
++static void
++mol_trampoline_vector( struct pt_regs *r )
++{
++ kernel_vars_t *kv = (kernel_vars_t*)r->gpr[8];
++
++#ifndef LINUX_26
++ /* the 0x2f00 trap does not enable MSR_EE */
++ local_irq_enable();
++#endif
++ TICK_CNTR_PUSH( kv );
++ r->gpr[3] = (*(kernelfunc_t)r->gpr[3])( kv, r->gpr[4], r->gpr[5], r->gpr[6] );
++ TICK_CNTR_POP( kv, in_kernel );
++}
++
++static trampoline_t
++set_trampoline( trampoline_t tramp )
++{
++ trampoline_t old;
++#ifdef LINUX_26
++ extern trampoline_t mol_trampoline;
++ old = mol_trampoline;
++ mol_trampoline = tramp;
++#else
++ /* we steal the unused 0x2f00 exception vector... */
++ u32 *p = (u32*)(KERNELBASE + 0x2f00);
++ static trampoline_t *tptr = NULL;
++ int i;
++
++ /* look for bl xxxx ; .long vector; .long exception_return */
++ for( i=0; !tptr && i<0x100/4; i++ ) {
++ if( (p[i] & ~0xffff) != 0x48000000 )
++ continue;
++ if( (p[i+1] & ~0x7fffff) != KERNELBASE || (p[i+2] & ~0x0fffff) != KERNELBASE )
++ continue;
++ tptr = (trampoline_t*)&p[i+1];
++ }
++ if( !tptr ) {
++ printk("MOL trampoline not found!\n");
++ return NULL;
++ }
++ old = *tptr;
++ *tptr = tramp;
++#endif
++ return old;
++}
++
++int
++arch_common_init( void )
++{
++ old_trampoline = set_trampoline( mol_trampoline_vector );
++ return !old_trampoline;
++}
++
++void
++arch_common_cleanup( void )
++{
++ set_trampoline( old_trampoline );
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/_mmu.c
+@@ -0,0 +1,41 @@
++/*
++ * Creation Date: <2002/07/13 13:58:00 samuel>
++ * Time-stamp: <2004/02/14 12:47:09 samuel>
++ *
++ * <mmu.c>
++ *
++ *
++ *
++ * Copyright (C) 2002, 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "mmu.h"
++#include "asmfuncs.h"
++
++#define MMU (kv->mmu)
++
++#ifdef CONFIG_SMP
++void (*xx_tlbie_lowmem)( void );
++void (*xx_store_pte_lowmem)( void );
++#else
++void (*xx_store_pte_lowmem)( ulong *slot, int pte0, int pte1 );
++#endif
++
++int
++arch_mmu_init( kernel_vars_t *kv )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++ MMU.emulator_context = current->mm->context.id;
++#else
++ MMU.emulator_context = current->mm->context;
++#endif
++ return 0;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/actions.c
+@@ -0,0 +1,548 @@
++/*
++ * Creation Date: <2001/04/07 17:33:52 samuel>
++ * Time-stamp: <2004/03/13 14:17:40 samuel>
++ *
++ * <actions.c>
++ *
++ * Handle assambly actions (relocations, exception vector
++ * hooking, lowmem relocations and other stuff)
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "misc.h"
++#include "asmfuncs.h"
++#include "actions.h"
++#include "map.h"
++
++
++/* globals */
++int reloc_virt_offs;
++
++static char *code_base;
++static uint code_size;
++
++/* some opcodes */
++#define OPCODE_ADDIS( dreg, sreg, hi_val ) ((15<<26) | ((dreg)<<21) | ((sreg)<<16) | (hi_val))
++#define OPCODE_LIS( dreg, hi_val ) OPCODE_ADDIS( dreg, 0, hi_val )
++#define OPCODE_ORI( dreg, sreg, val ) ((24<<26) | ((dreg)<<16) | ((sreg)<<21) | (val))
++#define OPCODE_MTSPRG2( sreg ) (0x7c1243a6 + ((sreg)<<21))
++
++
++/************************************************************************/
++/* lowmem allocations (allocates within the first 32 MB of RAM) */
++/************************************************************************/
++
++/* The low-level assembly code need to be located in memory which is
++ * physically continuous. The kernel exception vector are patched
++ * through pseudo symbols (action symbols).
++ */
++
++#define MAX_NUM_CLEANUP_HANDLERS 32
++
++typedef struct {
++ char *lowmem_addr;
++ int alloc_size;
++ int alloc_method;
++
++ ulong *inst_addr; /* these fields are used */
++ ulong opcode; /* be the hook code */
++} cleanup_entry_t;
++
++static int num_cleanup_entries;
++static cleanup_entry_t cleanup_table[ MAX_NUM_CLEANUP_HANDLERS ];
++static ulong lowmem_phys_cursor;
++
++/* Memory mapping of exception vectors */
++static ulong lowmem_phys_base;
++static char *lowmem_virt_base;
++static void *lowmem_mapping;
++
++
++static inline ulong *
++lowmem_phys_to_virt( ulong paddr ) {
++ return (ulong*)(lowmem_virt_base + (paddr - lowmem_phys_base));
++}
++
++static inline ulong
++lowmem_tophys( void *vaddr ) {
++ return lowmem_phys_base + ((ulong)vaddr - (ulong)lowmem_virt_base);
++}
++
++
++static void
++lowmem_initialize( void )
++{
++ if( num_cleanup_entries ) {
++ printk("Internal error in lowmem_initialize\n");
++ return;
++ }
++ lowmem_phys_cursor = 0x100;
++
++ /* In Darwin, the mapping will fail if we put lowmem_phys_base to zero */
++ lowmem_phys_base = 0x100;
++ lowmem_mapping = map_phys_range( lowmem_phys_base, 0x4000, &lowmem_virt_base );
++}
++
++static char *
++lowmem_alloc( int size, cleanup_entry_t **ret_ce )
++{
++ ulong *pstart;
++ cleanup_entry_t ce;
++ int found=0;
++
++ memset( &ce, 0, sizeof(ce) );
++ if( ret_ce )
++ *ret_ce = NULL;
++
++ if( num_cleanup_entries >= MAX_NUM_CLEANUP_HANDLERS ) {
++ printk("MOL: Need more cleanup slots!\n");
++ return NULL;
++ }
++
++ /* Find big enough empty piece of memory */
++ if( size < 0x10 )
++ size = 0x10;
++
++ pstart = lowmem_phys_to_virt(lowmem_phys_cursor);
++ pstart = (ulong*)(((ulong)pstart + 0xf) & ~0xf);
++ for( ; lowmem_phys_cursor < 0x3000; lowmem_phys_cursor+=4 ) {
++ ulong *p = lowmem_phys_to_virt(lowmem_phys_cursor);
++ if( ((int)p - (int)pstart) >= size ) {
++ found = 1;
++ break;
++ }
++ if( *p ) {
++ pstart = (ulong*)(((ulong)p + sizeof(ulong) + 0xf) & ~0xf);
++ continue;
++ }
++ }
++ if( !found ) {
++ printk("MOL: Did not find an empty piece of lowmem memory!\n");
++ return NULL;
++ }
++ /* printk("lowmem alloc: %08lX\n", pstart ); */
++
++ ce.lowmem_addr = (char*)pstart;
++ ce.alloc_method = 0;
++ ce.alloc_size = size;
++ /* printk("lowmem-alloc SPACE %X bytes at %08lX\n", size, (ulong)pstart ); */
++
++ cleanup_table[num_cleanup_entries] = ce;
++ if( ret_ce )
++ *ret_ce = &cleanup_table[num_cleanup_entries];
++ num_cleanup_entries++;
++
++ return ce.lowmem_addr;
++}
++
++static void
++lowmem_free_all( void )
++{
++ cleanup_entry_t *ce = &cleanup_table[0];
++ int i;
++
++ for(i=0; i<num_cleanup_entries; i++, ce++ )
++ memset( ce->lowmem_addr, 0, ce->alloc_size );
++
++ num_cleanup_entries = 0;
++
++ if( lowmem_mapping ) {
++ unmap_phys_range( lowmem_mapping );
++ lowmem_mapping = NULL;
++ }
++}
++
++
++/************************************************************************/
++/* helper functions */
++/************************************************************************/
++
++static action_pb_t *
++find_action( int action, int index )
++{
++ extern int r__actions_offs_section[], r__actions_offs_section_end[];
++ extern char *r__actions_section[];
++ const int n = ((int)r__actions_offs_section_end - (int)r__actions_offs_section)/sizeof(int);
++ int i, *op = r__actions_offs_section;
++
++ for( i=0; i<n; i++ ) {
++ action_pb_t *p = (action_pb_t*)((char*)r__actions_section + op[i]);
++
++ if( p->action != action || index-- )
++ continue;
++ return p;
++ }
++ return NULL;
++}
++
++static int
++relocate_inst( ulong *opc_ptr, ulong from, ulong to )
++{
++ ulong opcode = *opc_ptr;
++ int offs=-1;
++
++ /* XXX: UNTESTED if target instruction is a branch */
++
++ /* Since we use this on the _first_ instruction of the
++ * exception vector, it can't touch LR/CR. Thus, we
++ * only look for unconditional, relative branches.
++ */
++
++ /* relativ branch b */
++ if( (opcode & 0xfc000003) == (18<<26) ){
++ offs = (opcode & 0x03fffffc);
++ /* sign extend */
++ if( offs & 0x03000000 )
++ offs |= ~0x03ffffff;
++ }
++ /* unconditional, relativ bc branch (b 0100 001z1zz ...) */
++ if( (opcode & 0xfe800003) == 0x42800000 ){
++ offs = (opcode & 0xfffc);
++ if( offs & 0x8000 )
++ offs |= ~0xffff;
++ }
++ /* construct the absolute branch */
++ if( offs != -1 ) {
++ int dest = from + offs;
++ if( dest < 0 || dest > 33554431 ) {
++ printk("relocation of branch at %08lX to %08lX failed\n", from, to);
++ return 1;
++ }
++ /* absolute branch */
++ *opc_ptr = ((18<<26) + 2) | dest;
++ }
++ return 0;
++}
++
++
++/************************************************************************/
++/* actions */
++/************************************************************************/
++
++typedef int (*action_func_t)( int action, ulong *target, const int *pb );
++
++static int
++action_li_phys( int action, ulong *target, const int *pb )
++{
++ int r = pb[0] & 0x1f;
++ ulong addr = pb[1] + tophys_mol( code_base );
++
++ /* target[0] = addis r,0,addr@h ; target[1] = ori r,r,addr@l */
++ target[0] = (15 << 26) | (r << 21) | (addr >> 16);
++ target[1] = (24 << 26) | (r << 21) | (r << 16) | (addr & 0xffff);
++
++ /* printk("ACTION_LI_PHYS %d %08lX\n", dreg, addr ); */
++ return 0;
++}
++
++static int
++action_lwz_physaddr_r( int action, ulong *target, const int *pb )
++{
++ ulong addr = pb[1] + tophys_mol( code_base );
++ int dr = (pb[0] >> 5) & 0x1f;
++ int r = pb[0] & 0x1f;
++ short low = (addr & 0xffff);
++
++ /* target[0] = addis dr,r,addr@h ; target[1] = lwz dr,addr@l(dr) */
++ target[0] = (15 << 26) | (dr << 21) | (r << 16) | ((addr - low) >> 16);
++ target[1] = (32 << 26) | (dr << 21) | (dr << 16) | ((int)low & 0xffff);
++
++ /* printk("ACTION_LI_PHYS %d %08lX\n", dreg, addr ); */
++ return 0;
++}
++
++static int
++action_specvar( int action, ulong *target, const int *pb )
++{
++ int r = pb[0] & 0x1f;
++ ulong addr;
++
++ switch( pb[1] ) {
++ case SPECVAR_SESSION_TABLE:
++ addr = tophys_mol(g_sesstab);
++ break;
++ default:
++ return 1;
++ }
++
++ if( action == ACTION_LIS_SPECVAR_H ) {
++ /* target[0] = addis r,0,addr@h */
++ target[0] = OPCODE_LIS( r, (addr >> 16) & 0xffff );
++ return 0;
++ }
++ if( action == ACTION_ORI_SPECVAR_L ) {
++ /* target[0] = ori rD,rS,addr@l */
++ int rD = (pb[0] >> 5) & 0x1f;
++ target[0] = OPCODE_ORI( rD, r, (addr & 0xffff));
++ return 0;
++ }
++ return 1;
++}
++
++
++/* Note: this only works under linux */
++static int
++action_tophysvirt( int action, ulong *target, const int *pb )
++{
++ ulong addr = tophys_mol(0);
++ int dr = (pb[0] >> 5) & 0x1f;
++ int sr = pb[0] & 0x1f;
++
++ if( action == ACTION_TOVIRT )
++ addr = -addr;
++
++ /* target[0] = addis dr,sr,(tophys(0))@hi */
++ target[0] = OPCODE_ADDIS( dr, sr, (addr >> 16) & 0xffff );
++ return 0;
++}
++
++/* pb[] = { vector, size, vret_offs, ...hook_code... } */
++static int
++action_reloc_hook( int action, ulong *hookcode, const int *pb )
++{
++ ulong addr, inst, vector=pb[0], size=pb[1], vret_offs=pb[2];
++ cleanup_entry_t *clean;
++ ulong *vector_virt, *target;
++ action_pb_t *apb;
++ char *lowmem;
++ int i;
++
++ /* Virtual address of exception vector */
++ vector_virt = lowmem_phys_to_virt(vector);
++
++ /* address of the vector hook code */
++ addr = tophys_mol( (char*)hookcode );
++
++ /* allocate lowmem and add cleanup handler */
++ if( !(lowmem=lowmem_alloc(size, &clean)) )
++ return 1;
++
++ /* printk("ACTION_RELOC_HOOK: %lx, %lx, %lx, %lx %p\n", vector, size, vret_action, vret_offs, lowmem); */
++
++ memcpy( lowmem, &pb[3], size );
++
++ /* perform the vret_action */
++ for( i=0; (apb=find_action(ACTION_VRET, i)); i++ ) {
++ if( apb->params[0] != vector )
++ continue;
++
++ /* insert the absolut branch */
++ target = (ulong*)(code_base + apb->offs);
++ *target = ((18<<26) + 2) | lowmem_tophys(lowmem + vret_offs);
++ flush_icache_mol( (ulong)target, (ulong)target + 4 );
++ /* printk("'ba xxx' added (opcode %08lX at %p)\n", *target, target ); */
++ }
++
++ /* fix the hook address in the glue code */
++ target = (ulong*)lowmem;
++ target[1] = (target[1] & ~0xffff) | (addr >> 16); /* target[0] = addis r3,0,0 */
++ target[3] = (target[3] & ~0xffff) | (addr & 0xffff); /* target[1] = ori r3,r3,0 */
++
++ /* relocate instruction to be overwritten with a branch */
++ inst = *vector_virt;
++ clean->opcode = inst;
++ if( relocate_inst( &inst, vector, lowmem_tophys(lowmem+vret_offs) ))
++ return 1;
++ *(ulong*)(lowmem + vret_offs) = inst;
++ flush_icache_mol( (ulong)lowmem, (ulong)lowmem + size );
++
++ /* insert branch, 'ba lowmem_ph' */
++ *(volatile ulong*)vector_virt = 0x48000002 + lowmem_tophys(lowmem);
++ flush_icache_mol( (ulong)vector_virt, (ulong)vector_virt+4 );
++
++ /* we are in business! */
++ clean->inst_addr = vector_virt;
++ return 0;
++}
++
++
++/* pb = { size, where_to_store_lowmem_addr, ...code... } */
++static int
++action_reloc_low( int action, ulong *dummy, const int *pb )
++{
++ int size = pb[0];
++ char **func_ptr = (char**)pb[1];
++ char *lowmem;
++
++ if( !(lowmem=lowmem_alloc(size, NULL)) )
++ return 1;
++ memcpy( lowmem, (char*)&pb[2], size );
++
++ flush_icache_mol( (ulong)lowmem, (ulong)lowmem+size );
++ *func_ptr = lowmem;
++ return 0;
++}
++
++/* pb = { symind, size, fret_offset, codeglue... } */
++static int
++action_hook_function( int action, ulong *hookcode, const int *pb )
++{
++ ulong addr, fhook=pb[0], size=pb[1], fret_offs=pb[2];
++ ulong *target, inst;
++ char *lowmem, *func_addr=NULL;
++ cleanup_entry_t *clean;
++
++ switch( fhook ) {
++#ifdef __linux__
++ case FHOOK_FLUSH_HASH_PAGE:
++ func_addr = (char*)compat_flush_hash_pages;
++ break;
++#endif
++ default:
++ printk("Bad fhook index %ld\n", fhook );
++ return 1;
++ }
++
++ /* this does not have to be in lowmem, but it is simpler with a unified approach */
++ if( !(lowmem=lowmem_alloc(size, &clean)) )
++ return 1;
++
++ /* printk("ACTION_HOOK_FUNCTION: %lx, %lx, %lx %p\n", fhook, size, fret_offs, lowmem); */
++
++ memcpy( lowmem, &pb[3], size );
++
++ /* fix the hook address in the glue code */
++ target = (ulong*)lowmem;
++ addr = (ulong)hookcode;
++ target[1] = (target[1] & ~0xffff) | (addr >> 16); /* target[1] = addis rX,0,0 */
++ target[2] = (target[2] & ~0xffff) | (addr & 0xffff); /* target[2] = ori rX,rX,0 */
++
++ /* relocate overwritten instruction and add relative return branch */
++ inst = *(ulong*)func_addr;
++ clean->opcode = inst;
++ if( relocate_inst(&inst, (ulong)func_addr, (ulong)lowmem + fret_offs) )
++ return 1;
++ target = (ulong*)(lowmem + fret_offs);
++ target[0] = inst;
++ target[1] = (18<<26) | (((ulong)func_addr - (ulong)&target[1] + sizeof(long)) & 0x03fffffc);
++ flush_icache_mol( (ulong)lowmem, (ulong)lowmem + size );
++ _sync();
++
++ /* insert relative branch, 'b lowmem' */
++ *(volatile ulong*)func_addr = (18<<26) | ((lowmem - func_addr) & 0x03fffffc);
++ flush_icache_mol( (ulong)func_addr, (ulong)func_addr+4 );
++
++ _sync();
++
++ /* we are in business! */
++ clean->inst_addr = (ulong*)func_addr;
++ return 0;
++}
++
++static int
++action_fix_sprg2( int action, ulong *target, const int *pb )
++{
++#ifdef __darwin__
++ int sprg2;
++ int r = pb[0] & 0x1f;
++ asm volatile("mfspr %0,274" : "=r" (sprg2) );
++ target[0] = OPCODE_LIS( r, (sprg2 >> 16) & 0xffff );
++ target[1] = OPCODE_ORI( r, r, (sprg2 & 0xffff) );
++ target[2] = OPCODE_MTSPRG2( r );
++#endif
++ return 0;
++}
++
++static int
++action_noaction( int action, ulong *hookcode, const int *pb )
++{
++ return 0;
++}
++
++static action_func_t actiontable[MAX_NUM_ACTIONS] = {
++ [ACTION_LI_PHYS] = action_li_phys,
++ [ACTION_LWZ_PHYSADDR_R] = action_lwz_physaddr_r,
++ [ACTION_TOPHYS] = action_tophysvirt,
++ [ACTION_TOVIRT] = action_tophysvirt,
++ [ACTION_RELOC_HOOK] = action_reloc_hook,
++ [ACTION_RELOCATE_LOW] = action_reloc_low,
++ [ACTION_HOOK_FUNCTION] = action_hook_function,
++ [ACTION_VRET] = action_noaction,
++ [ACTION_FIX_SPRG2] = action_fix_sprg2,
++
++ [ACTION_LIS_SPECVAR_H] = action_specvar,
++ [ACTION_ORI_SPECVAR_L] = action_specvar,
++};
++
++
++/************************************************************************/
++/* write/remove hooks */
++/************************************************************************/
++
++static int
++relocate_code( void )
++{
++ extern char r__reloctable_start[], r__reloctable_end[];
++
++ code_size = r__reloctable_end - r__reloctable_start;
++
++ if( !(code_base=kmalloc_cont_mol(code_size)) )
++ return 1;
++
++ memcpy( code_base, r__reloctable_start, code_size );
++ reloc_virt_offs = (int)code_base - (int)r__reloctable_start;
++ return 0;
++}
++
++int
++perform_actions( void )
++{
++ action_pb_t *pb;
++ int action, i;
++
++ if( relocate_code() )
++ return 1;
++ lowmem_initialize();
++
++ for( action=0; action < MAX_NUM_ACTIONS; action++ ) {
++ for( i=0; (pb=find_action(action,i)); i++ ) {
++ ulong *target = (ulong*)(code_base + pb->offs);
++
++ if( pb->offs > code_size ) {
++ printk("OFFSET ERROR!\n");
++ goto error;
++ }
++
++ if( !actiontable[action] )
++ goto error;
++ if( (*actiontable[action])(action, target, pb->params) )
++ goto error;
++ }
++
++ /* we need to flush the icache before the hook actions are performed */
++ if( action == FLUSH_CACHE_ACTION )
++ flush_icache_mol( (ulong)code_base, (ulong)code_base + code_size );
++ }
++ /* to be on the safe side, flush the cache once more */
++ flush_icache_mol( (ulong)code_base, (ulong)code_base + code_size );
++ return 0;
++ error:
++ printk("MOL: action %d error\n", action );
++ cleanup_actions();
++ return 1;
++}
++
++void
++cleanup_actions( void )
++{
++ cleanup_entry_t *ce = &cleanup_table[0];
++ int i;
++
++ for( i=0; i<num_cleanup_entries; i++, ce++ ) {
++ if( ce->inst_addr ) {
++ *(volatile ulong*)ce->inst_addr = cleanup_table[i].opcode;
++ flush_icache_mol( (ulong)ce->inst_addr, (ulong)ce->inst_addr + 4 );
++ }
++ }
++ lowmem_free_all();
++ kfree_cont_mol( code_base );
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/603.S
+@@ -0,0 +1,218 @@
++/*
++ * Creation Date: <2001/06/15 20:10:49 samuel>
++ * Time-stamp: <2001/06/16 15:35:22 samuel>
++ *
++ * <603.S>
++ *
++ * 603 MMU support
++ *
++ * Copyright (C) 2001 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#define P603_USE_G_BIT
++#define P603_USE_R_BIT
++#define P603_USE_C_BIT
++
++/* NOTE: The 603 vectors are called with ctr saved in r0.
++ * Secondary interrupts are not detected automatically.
++ */
++
++/************************************************************************/
++/* Instruction TLB Miss (603 specific vector) */
++/************************************************************************/
++
++VECTOR_603( 0x1000, "Instruction TLB Miss - 603" )
++// mfctr r0 // Need to save this - CTR can't be touched!
++ mfspr r2,HASH1 // Get PTE pointer
++ mfspr r3,ICMP // Partial item compare value
++00: li r1,8 // 8 items / bucket
++ mtctr r1
++ subi r2,r2,8 // Preset pointer
++10: lwzu r1,8(r2) // Get next PTE
++ cmp 0,r1,r3 // Found entry yet?
++ bdnzf 2,10b // Jump back if not, until CTR==0
++ bne 30f // Try secondary hash if CTR==0
++ lwz r1,4(r2) // Get second word of entry
++#ifdef P603_USE_G_BIT
++ andi. r3,r1,8 // check G-bit
++ bne DoISI_603_G // if guarded, take an ISI
++#endif
++ mtctr r0 // Restore CTR
++ mfspr r3,SRR1 // Need to restore CR0
++ mtcrf 0x80,r3
++ mfspr r0,IMISS // Set to update TLB
++ mtspr RPA,r1
++#ifdef P603_USE_R_BIT
++ ori r1,r1,0x100 // Set reference bit
++ srwi r1,r1,8 // Get byte 7 of pte
++ tlbli r0 // Load the ITLB
++ stb r1,6(r2) // update page table
++#else
++ tlbli r0 // Load the ITLB
++#endif
++ rfi // All done
++
++ // Secondary hash
++30: andi. r1,r3,0x40 // Already doing secondary hash?
++ bne DoISI_603 // Yes - item not in hash table
++ mfspr r2,HASH2 // Get hash table pointer
++ ori r3,r3,0x40 // Set secondary hash
++ b 00b // Try lookup again
++
++
++/************************************************************************/
++/* Data Store TLB Miss (603 specific vector) */
++/************************************************************************/
++
++VECTOR_603( 0x1200, "Data Store TLB Miss - 603" )
++// mfctr r0 // Need to save this - CTR can't be touched!
++ mfspr r2,HASH1 // Get PTE pointer
++ mfspr r3,DCMP // Partial item compare value
++00: li r1,8 // 8 items / bucket
++ mtctr r1
++ subi r2,r2,8 // Preset pointer
++10: lwzu r1,8(r2) // Get next PTE
++ cmp 0,r1,r3 // Found entry yet?
++ bdnzf 2,10b // Jump back if not, until CTR==0
++ bne 30f // Try secondary hash if CTR==0
++ lwz r1,4(r2) // Get second word of entry
++#ifdef P603_USE_C_BIT
++ andi. r3,r1,0x80 // Check the C-bit
++ beq CheckProt_603
++16:
++#endif
++20: mtctr r0 // Restore CTR
++ mfspr r3,SRR1 // Need to restore CR0
++ mtcrf 0x80,r3
++ mfspr r0,DMISS // Set to update TLB
++ mtspr RPA,r1
++ tlbld r0
++ rfi // All done
++
++ // Secondary hash
++30: andi. r1,r3,0x40 // Already doing secondary hash?
++ bne DoDSI_603 // Yes - item not in hash table
++ mfspr r2,HASH2 // Get hash table pointer
++ ori r3,r3,0x40 // Set secondary hash
++ b 00b // Try lookup again
++
++#ifdef P603_USE_C_BIT
++ // Entry found and PTE[c] == 0. Check protection before setting PTE[c]
++ // r0 = saved ctr, r1 = second word of PTE, r2 = pointer to pteg, r3 = trash
++CheckProt_603:
++ rlwinm. r3,r1,30,0,1 // test PP
++ bge- 50f // if (PP == 00 or PP == 01) goto 50
++ andi. r3,r1,1 // test PP[0]
++ beq+ 60f // return if PP[0] == 0
++ b DoDSI_603_P // else DSI_P, (PP==11, read-only)
++
++50: mfspr r3,SRR1 // get old msr
++ andis. r3,r3,0x0008 // test the KEY bit (SRR1-bit 12)
++ beq 60f // if KEY==0, goto 60
++ b DoDSI_603_P // else DSI_P
++
++60: ori r1,r1,0x180 // set reference and change bit
++ sth r1,6(r2) // update page table
++ b 16b // and back we go
++#endif
++
++
++/************************************************************************/
++/* Data Load TLB Miss (603 specific vector) */
++/************************************************************************/
++
++VECTOR_603( 0x1100, "Data Load TLB Miss - 603" )
++// mfctr r0 // Need to save this - CTR can't be touched!
++ mfspr r2,HASH1 // Get PTE pointer
++ mfspr r3,DCMP // Partial item compare value
++00: li r1,8 // 8 items / bucket
++ mtctr r1
++ subi r2,r2,8 // Preset pointer
++10: lwzu r1,8(r2) // Get next PTE
++ cmp 0,r1,r3 // Found entry yet?
++ bdnzf 2,10b // Jump back if not, until CTR==0
++ bne 30f // Try secondary hash if CTR==0
++ lwz r1,4(r2) // Get second word of entry
++20: mtctr r0 // Restore CTR
++ mfspr r3,SRR1 // Need to restore CR0
++ mtcrf 0x80,r3
++ mfspr r0,DMISS // Set to update TLB
++ mtspr RPA,r1
++#ifdef P603_USE_R_BIT
++ ori r1,r1,0x100 // set reference bit
++ srwi r1,r1,8
++ tlbld r0
++ stb r1,6(r2)
++#else
++ tlbld r0
++#endif
++ rfi // All done
++
++// Secondary hash
++30: andi. r1,r3,0x40 // Already doing secondary hash?
++ bne DoDSI_603 // Yes - item not in hash table
++ mfspr r2,HASH2 // Get hash table pointer
++ ori r3,r3,0x40 // Set secondary hash
++ b 00b // Try lookup again
++
++
++/************************************************************************/
++/* Synthesize an ISI Exception */
++/************************************************************************/
++
++DoISI_603:
++ mfspr r3,SRR1
++ andi. r2,r3,0xFFFF // Clear upper bits of SRR1
++ addis r2,r2,0x4000 // Set bit 1 -> PTE not found (in HTAB)
++ mtctr r0 // Restore CTR
++40: mtspr SRR1,r2
++ mfmsr r0 // Restore "normal" registers
++ xoris r0,r0,MSR_TGPR>>16
++ mtcrf 0x80,r3 // Restore CR0
++ sync // Some chip revs have problems here...
++ mtmsr r0
++ SOFT_VECTOR_ENTRY_603( 0x400 ) // Jump to the ISI vector
++
++
++#ifdef P603_USE_G_BIT
++DoISI_603_G:
++ mfspr r3,SRR1
++ andi. r2,r3,0xFFFF // Clear upper bits of SRR1
++// addis r2,r2,0x0800 // Page protection violation
++ addis r2,r2,0x1000 // Guarded memory access
++ b 40b
++#endif
++
++
++/************************************************************************/
++/* Synthesize a DSI exception */
++/************************************************************************/
++
++DoDSI_603:
++ mfspr r3,SRR1
++ rlwinm r1,r3,9,6,6 // Get load/store bit
++ addis r1,r1,0x4000 // Set bit 1 -> PTE not found
++
++10: mtspr DSISR,r1
++ mtctr r0 // Restore CTR
++ andi. r2,r3,0xFFFF // Clear upper bits of SRR1
++ mtspr SRR1,r2
++ mfspr r1,DMISS // Get failing address
++ mtspr DAR,r1 // Set fault address
++ mfmsr r0 // Restore "normal" registers
++ xoris r0,r0,MSR_TGPR>>16
++ mtcrf 0x80,r3 // Restore CR0
++ sync // Some chip revs have problems here...
++ mtmsr r0
++ SOFT_VECTOR_ENTRY_603( 0x300 ) // Jump to the DSI vector
++
++DoDSI_603_P:
++ mfspr r3,SRR1
++ rlwinm r1,r3,9,6,6 // get load/store bit
++ addis r1,r1,0x800 // Set bit 4 (prot. violation)
++ b 10b
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/dec.S
+@@ -0,0 +1,228 @@
++/*
++ * Creation Date: <2001/06/21 17:10:35 samuel>
++ * Time-stamp: <2004/03/07 13:16:58 samuel>
++ *
++ * <dec.S>
++ *
++ * DEC / TimeBase stuff
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++/************************************************************************/
++/* TBWU / TBWL */
++/************************************************************************/
++
++#if 0
++spr_tbwu:
++ blr // read (won't come here)
++ mr r3,r0
++ mftb r4
++ b update_tb
++
++spr_tbwl:
++ blr // read (won't come here)
++ mftbu r3
++ mr r4,r0
++ // ...fall through...
++
++// r3 = tbu, r4 = tbl, r0,r2,r5 free.
++update_tb:
++ lwz r5,K_LOADED_DEC(r1)
++ mfdec r2
++ sub r5,r5,r2 // r5 = elapsed ticks
++
++ li r2,0 // Set timebase
++ mttbl r4
++ mttbu r3
++ mttbl r4
++
++ mfxer r2
++ subfc r4,r5,r4
++ addme r3,r3
++ stw r4,xTBL(r1)
++ stw r3,xTBU(r1)
++ mtxer r2
++ b emulation_done
++#endif
++
++
++/************************************************************************/
++/* Load/restore DEC and TB */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////////
++ // recalc_int_stamp
++ //
++ // May modify: r0,r2-r5
++ //
++
++recalc_int_stamp:
++ crclr FBIT_RecalcDecInt
++
++ lwz r5,xTIMER_STAMP(r1)
++ bf FBIT_DecSeenZero, 1f // FBIT_DecSeenZero must be set when DEC is loaded
++ bt FBIT_DecINT, 1f // interrupt might already be flagged...
++ mftb r3
++ lwz r4,xDEC_STAMP(r1) // Has the xDEC overflown already?
++ sub. r0,r4,r3
++ blt 2f // branch if xDEC < 0
++ sub. r0,r4,r5
++ blt 2f // branch if xDEC < xTIMER
++1: mr r4,r5
++2: stw r4,K_INT_STAMP(r1) // dec = xTIMER
++ blr
++
++
++ //////////////////////////////////////////////////////////////
++ // set_mol_dec
++ //
++ // May modify: r0,r2-r5
++ //
++ // Old 2.4 kernels assume that linux-DEC never ticks faster
++ // than the DEC interval measured from TB. Unfortunately,
++ // it is virtually impossible to keep DEC/TB in sync.
++ //
++ // Experimentally, the "worst case" senario is a linux DEC tick
++ // beeing delayed 0.04 ms (to be compare with the 20 ms period).
++ //
++ // Sequences similar to
++ //
++ // 1: mftb r2; mfdec r3; mftb r4; cmp r2,r4; bne 1b
++ //
++ // do *not* work - DEC and TB probably ticks on different edges.
++
++set_mol_dec:
++ lwz r5,K_INT_STAMP(r1) // DEC = K_INT_STAMP - tbl
++ mfdec r3 // Make sure linux interrupts *never*
++ mftb r2 // occur too fast
++
++ sub r4,r5,r2
++ cmpw r4,r3
++ bgtlr
++ add r5,r3,r2 // K_DEC_STAMP = DEC + tbl
++ mtdec r4
++
++ stw r5,K_DEC_STAMP(r1)
++ crset FBIT_MolDecLoaded
++ blr
++
++ ///////////////////////////////////////////////////////////////
++ // set_kernel_dec
++ //
++ // May modify: r0,r2, cr
++ //
++
++set_kernel_dec:
++ lwz r0,K_DEC_STAMP(r1)
++ crclr FBIT_MolDecLoaded
++ mftb r2 // Keep linux-DEC coherent
++ sub r2,r0,r2 // DEC = stamp - tbl
++ mtdec r2
++ blr
++
++
++
++/************************************************************************/
++/* DEC read/write */
++/************************************************************************/
++
++_dec_read:
++ lwz r4,xDEC_STAMP(r1)
++ mftb r3
++ sub r0,r4,r3
++
++ BUMP("dec_read")
++ GET_TICK_CNT(entry,"dec_read")
++ b simple_mfspr
++
++spr_dec:
++ b _dec_read
++
++ // dec write. r0 = spr_value
++ BUMP("mtdec")
++ rlwinm. r5,r0,0,0,0 // seen zero?
++ mftb r4
++ cror FBIT_DecSeenZero,FBIT_DecSeenZero,eq
++ add r5,r4,r0
++ stw r5,xDEC_STAMP(r1) // set new dec value
++ bf FBIT_DecSeenZero, emulation_done
++ bl recalc_int_stamp // M: r0,r2-r5
++ btl FBIT_MolDecLoaded, set_kernel_dec // M: r0,r2
++ bl set_mol_dec // M: r0,r2-r5
++ b emulation_done
++
++
++/************************************************************************/
++/* Decrementer Exception */
++/************************************************************************/
++
++ // __dec_VECTOR (non-MOL dec exception)
++ //
++ // r3=cr, sprg1=saved_r1, sprg0=saved_r3
++ //
++ // An exception with DEC>=0 can occur if a mac-DEC overflows occurs
++ // just prior to a context switch. These exceptions should be
++ // dropped silently.
++
++__dec_VECTOR:
++ mfdec r1
++ cmpwi r1,0
++ blt+ 1f
++ mtcr r3 // Restore and exit
++ ABORT_TRAP( 0x900 )
++1:
++ mtcr r3 // Restore and continue trap
++ CONTINUE_TRAP( 0x900 )
++
++VECTOR_( 0x900, "Decrementer", secint_bad, __dec_VECTOR )
++ EXCEPTION_PREAMBLE // r0-r5, CR, LR, r6/r7 = msr/nip
++ TRACE(0x900, "Decrementer")
++ mfdec r4
++ cmpwi r4,0
++ bge exception_return
++ bf FBIT_MolDecLoaded, take_linux_dec_exception
++
++ mftb r3
++ lis r2,0x1000 // r2 = DEC rearm constant
++
++ bf FBIT_DecSeenZero, 1f // check for xDEC overflow
++ lwz r4,xDEC_STAMP(r1)
++ sub. r0,r4,r3 // lt set if xDEC has overflown
++ cror FBIT_DecINT, FBIT_DecINT, lt // dec_int |= lt
++ crandc FBIT_DecSeenZero, FBIT_DecSeenZero, lt // szero &= ~lt
++1:
++ lwz r5,xTIMER_STAMP(r1) // r5 = xTIMER_STAMP
++ sub. r0,r5,r3 // lt set if xTIMER has overflown
++ mtdec r2 // rearm DEC
++ blt- 2f // xTIMER has higher priority...
++
++ // mac-dec interrupt
++ BUMP("DEC-overflow")
++ bl set_kernel_dec
++ bl recalc_int_stamp
++ bl set_mol_dec
++ GET_TICK_CNT(entry,"dec-overflow")
++ bf- FBIT_DecINT,exception_return // could occur if xTIMER has changed on us
++ lwz r4,xMSR(r1)
++ rlwinm. r0,r4,0,MSR_EE
++ beq- exception_return // no... simply return
++ BUMP("DEC-exception")
++ b mac_dec_trap
++
++ // timer interrupt
++2: BUMP("Timer-interrupt")
++ crset FBIT_RecalcDecInt // dec must be recalced
++ GET_TICK_CNT(entry,"timer-overflow")
++ MAC_EXIT_SAVE( RVEC_TIMER )
++
++
++take_linux_dec_exception:
++ BUMP("Linux-DEC")
++ bl save_middle_regs
++ TAKE_EXCEPTION( 0x900 )
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/emuaccel.S
+@@ -0,0 +1,188 @@
++/*
++ * Creation Date: <2003/01/24 13:54:52 samuel>
++ * Time-stamp: <2003/08/14 03:12:00 samuel>
++ *
++ * <emuaccel.S>
++ *
++ * Emulation acceleration
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "emuaccel_sh.h"
++
++ // emuaccel registers:
++ //
++ // r2 = emuaccel_slot
++ // r6,r7 = nip(return address)/mregs
++ //
++
++ balign_8
++emuaccel_mtmsr_rNN:
++mFORLOOP([i],0,31,[
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mtmsr_accel
++])
++emuaccel_mtsrr0_rNN:
++mFORLOOP([i],0,31,[
++ .if (i <= 7)
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mtsrr0_accel
++ .else
++ stw rPREFIX[]i,xSRR0(r1)
++ b emulation_done_noinc
++ .endif
++])
++emuaccel_mtsrr1_rNN:
++mFORLOOP([i],0,31,[
++ .if (i <= 7)
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mtsrr1_accel
++ .else
++ stw rPREFIX[]i,xSRR1(r1)
++ b emulation_done_noinc
++ .endif
++])
++emuaccel_mtsprg0_rNN:
++mFORLOOP([i],0,31,[
++ .if (i <= 7)
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mtsprg0_accel
++ .else
++ stw rPREFIX[]i,xSPRG0(r1)
++ b emulation_done_noinc
++ .endif
++])
++emuaccel_mtsprg1_rNN:
++mFORLOOP([i],0,31,[
++ .if (i <= 7)
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mtsprg1_accel
++ .else
++ stw rPREFIX[]i,xSPRG1(r1)
++ b emulation_done_noinc
++ .endif
++])
++emuaccel_mtsprg2_rNN:
++mFORLOOP([i],0,31,[
++ .if (i <= 7)
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mtsprg2_accel
++ .else
++ stw rPREFIX[]i,xSPRG2(r1)
++ b emulation_done_noinc
++ .endif
++])
++emuaccel_mtsprg3_rNN:
++mFORLOOP([i],0,31,[
++ .if (i <= 7)
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mtsprg3_accel
++ .else
++ stw rPREFIX[]i,xSPRG3(r1)
++ b emulation_done_noinc
++ .endif
++])
++emuaccel_mthid0_rNN:
++mFORLOOP([i],0,31,[
++ .if (i <= 7)
++ LOAD_EMUGPR_IM r0,i
++ b emulate_mthid0_accel
++ .else
++ stw rPREFIX[]i,xHID0(r1)
++ b emulation_done_noinc
++ .endif
++])
++
++emuaccel_nop:
++ b emulation_done_noinc
++
++emuaccel_rfi:
++ addi r6,r6,-4 // point nip to the rfi instruction
++ b emulate_rfi
++
++emuaccel_update_dec:
++ lwz r4,xDEC_STAMP(r1)
++ mftb r3
++ sub r0,r4,r3
++ stw r0,xDEC(r1)
++ b emulation_done_noinc
++
++ balign_8
++emuaccel_mtsr:
++ lwz r4,12(r2) // emuaccel paramamter (opcode)
++ addi r6,r6,-4 // back NIP
++ rlwinm r5,r4,14,24,28 // reg_num << 3
++ b emulate_mtsr
++
++
++/************************************************************************/
++/* implementation */
++/************************************************************************/
++
++emulate_mtsrr0_accel:
++ stw r0,xSRR0(r1)
++ b emulation_done_noinc
++emulate_mtsrr1_accel:
++ stw r0,xSRR1(r1)
++ b emulation_done_noinc
++emulate_mtsprg0_accel:
++ stw r0,xSPRG0(r1)
++ b emulation_done_noinc
++emulate_mtsprg1_accel:
++ stw r0,xSPRG1(r1)
++ b emulation_done_noinc
++emulate_mtsprg2_accel:
++ stw r0,xSPRG2(r1)
++ b emulation_done_noinc
++emulate_mtsprg3_accel:
++ stw r0,xSPRG3(r1)
++ b emulation_done_noinc
++emulate_mthid0_accel:
++ stw r0,xHID0(r1)
++ b emulation_done_noinc
++
++ balign_32
++emulate_mtmsr_accel:
++ lwz r3,xMSR(r1) // r3 = old MSR
++ bl msr_altered
++
++ GET_TICK_CNT(entry, "mtmsr-accel")
++ BUMP("emulate_mtmsr-accel")
++
++ beq+ cr1,emulation_done_noinc // no doze... we are done
++ MAC_EXIT_SAVE( RVEC_MSR_POW ) // doze
++
++
++
++/************************************************************************/
++/* setup */
++/************************************************************************/
++
++#define EMUACCEL_REL(s) s - emuaccel_table
++
++#ifdef __linux__
++ .text 70 /* this table does not need to be relocated */
++#endif
++ // format: emuaccel_inst, offset, table_index_mask
++GLOBAL_SYMBOL(emuaccel_table):
++ .long EMUACCEL_MTMSR, EMUACCEL_REL( emuaccel_mtmsr_rNN ), 0x1f
++ .long EMUACCEL_MTSRR0, EMUACCEL_REL( emuaccel_mtsrr0_rNN ), 0x1f
++ .long EMUACCEL_MTSRR1, EMUACCEL_REL( emuaccel_mtsrr1_rNN ), 0x1f
++ .long EMUACCEL_MTSPRG0, EMUACCEL_REL( emuaccel_mtsprg0_rNN ), 0x1f
++ .long EMUACCEL_MTSPRG1, EMUACCEL_REL( emuaccel_mtsprg1_rNN ), 0x1f
++ .long EMUACCEL_MTSPRG2, EMUACCEL_REL( emuaccel_mtsprg2_rNN ), 0x1f
++ .long EMUACCEL_MTSPRG3, EMUACCEL_REL( emuaccel_mtsprg3_rNN ), 0x1f
++ .long EMUACCEL_MTHID0, EMUACCEL_REL( emuaccel_mthid0_rNN ), 0x1f
++ .long EMUACCEL_RFI, EMUACCEL_REL( emuaccel_rfi ), 0
++ .long EMUACCEL_UPDATE_DEC, EMUACCEL_REL( emuaccel_update_dec ), 0
++ .long EMUACCEL_MTSR, EMUACCEL_REL( emuaccel_mtsr ), 0
++ .long EMUACCEL_NOP, EMUACCEL_REL( emuaccel_nop ), 0
++ .long 0, 0, 0 /* end marker */
++
++ .text
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/emulation.S
+@@ -0,0 +1,714 @@
++/*
++ * Creation Date: <97/07/26 18:23:02 samuel>
++ * Time-stamp: <2004/02/22 13:12:14 samuel>
++ *
++ * <emulation.S>
++ *
++ * Low-level emulation of some privileged instructions
++ *
++ * Copyright (C) 1997-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++MACRO_0(INC_NIP, [
++ addi r6,r6,4
++])
++
++
++/************************************************************************/
++/* program exception vector */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////////
++ // exception preamble
++ // r1 stack (mregs)
++ // r6,r7 nip / srr1
++ // cr4-7 flag_bits
++ //
++ // Saved r0-r5 (NOT SAVED: ctr, xer)
++
++VECTOR_( 0x700, "Program", secint_bad, mac_entry )
++ EXCEPTION_PREAMBLE // 46 cycles on a G3
++ TRACE(0x700, "Program")
++
++ mtcrf 0x10,r7 // put srr1 bits (12-15) into cr3
++ bt 14,mac_program_trap
++ GET_INSTR_OPCODE // r6=nip, r2-r3, r4=opcode (cr not touched)
++ bt+ 13,emulate_priv_instr
++ bt+ 12,emulate_illegal_instr
++ mtcrf 0x20,r7 // put srr1 bits (8-11) into cr2
++ bt+ 11,mac_program_trap // bit 11 = fpu_exception
++
++ // we should not come here
++ MAC_EXIT_SAVE( RVEC_UNUSUAL_PROGRAM_EXCEP )
++
++unhandled_priv_inst:
++ MAC_EXIT_SAVE( RVEC_PRIV_INST ) // r4 = opcode
++
++emulate_illegal_instr:
++ rlwinm r2,r4,32-1,22,31
++ lwz r3,xMSR(r1)
++ rlwinm r5,r4,14,24,28 // r5 = reg_num << 3 (instr. bits 6-10)
++ rlwimi r2,r4,16,16,21 // r2 = opcode
++
++ rlwinm r0,r2,0,~0x80 // clear mtspr/mfspr bit
++ cmpwi r0,OPCODE(31,339) // mfspr/mtspr
++ bne 1f
++
++ rlwinm r7,r7,0,14,10 // clear srr1 bit 11-13
++ cmpwi cr1,r2,OPCODE(31,339) // mfspr
++ oris r7,r7,0x4 // set bit 13 (privileged instr)
++
++ beq- cr1,emulate_mfspr // r3=xMSR, r4=opcode, r5=reg_num
++ b emulate_mtspr // must be mtspr
++1:
++#ifdef EMULATE_603
++ bl emulate_603_instr
++#endif
++ MAC_EXIT_SAVE( RVEC_ILLEGAL_INST ) // r4 = opcode
++
++
++/************************************************************************/
++/* mac exceptions */
++/************************************************************************/
++
++ /////////////////////////////////////////////////////////////
++ // mac_trap
++ //
++ // r2 exception vector
++ // r6 nip
++ // r7 reason bits (0-15)
++ //
++ // r0,r2-r5 may be modified
++
++mac_irq_trap:
++ li r2,0x500
++ li r7,0 // no reason bits
++ b mac_trap
++
++mac_dec_trap:
++ li r2,0x900
++ crclr FBIT_DecINT
++ li r7,0 // no reason bits
++ b mac_trap
++
++ balign_32
++mac_program_trap: // the reason bits already in r7 are used
++ __BUMP("mac_trap")
++ li r2,0x700
++mac_trap:
++ lwz r3,xMSR(r1)
++ stw r6,xSRR0(r1) // mac-srr0 = nip
++ mr r4,r3 // copy xMSR to SRR1
++ mr r6,r2 // new nip
++ rlwinm r5,r3,25+1,31,31 // put MSR_IP (bit 25) in r5[31]
++ rlwimi r4,r7,0,0,15 // copy reason bits from r7 to SRR1
++ rlwimi r4,r3,0,6,6 // copy MSR_VEC bit of xMSR to SRR1
++ stw r4,xSRR1(r1) // srr1 = (msr & (0xffff|MSR_VEC)) | (srr1 & ~(0xffff|MSR_VEC))
++ rlwinm r4,r3,0,25,25 // copy MSR_IP to xMSR
++ neg r5,r5 // r5 = 0xffffffff * MSR_IP
++ rlwimi r4,r3,0,19,19 // copy MSR_ME to xMSR
++ rlwimi r6,r5,0,0,11 // insert exception prefix
++ stw r4,xMSR(r1)
++ GET_TICK_CNT(entry,"mac-expception")
++ b msr_exception_return
++
++rvec_trace_trap:
++ MAC_EXIT_SAVE( RVEC_TRACE_TRAP )
++
++
++/************************************************************************/
++/* decode privileged instruction */
++/************************************************************************/
++
++ /////////////////////////////////////////////////////////////
++ // emulate_priv_instr
++ // r4 opcode
++ // r6,r7 nip/srr1
++ //
++ // r0,r2-r5,lr free
++
++ balign_32
++emulate_priv_instr:
++ rlwinm r2,r4,32-1,22,31
++ lwz r3,xMSR(r1)
++ rlwinm r5,r4,14,24,28 // r5 = reg_num << 3 (instr. bits 6-10)
++ rlwimi r2,r4,16,16,21 // r2 = opcode
++
++ GET_TICK_CNT(entry,"get_inst")
++ BUMP("decode_priv_inst")
++
++ cmpwi cr1,r2,OPCODE(31,339) // mfspr (both user and supervisor mode)
++ beq- cr1,emulate_mfspr // r3=xMSR, r4=opcode, r5=reg_num
++
++ cmpwi cr2,r2,OPCODE(31,467) // mtspr (both user and supervisor mode)
++ beq- cr2,emulate_mtspr
++
++ andi. r3,r3,MSR_PR // only emulate in supervisor mode
++ bne- mac_program_trap
++
++ cmpwi cr3,r2,OPCODE(31,83) // mfmsr
++ beq- cr3,emulate_mfmsr
++
++ cmpwi cr0,r2,OPCODE(31,146) // mtmsr
++ beq- cr0,emulate_mtmsr
++
++ cmpwi cr1,r2,OPCODE(19,50) // rfi
++ beq- cr1,emulate_rfi
++
++ cmpwi cr2,r2,OPCODE(31,595) // mfsr
++ beq- cr2,emulate_mfsr
++
++ cmpwi cr3,r2,OPCODE(31,659) // mfsrin
++ beq- cr3,emulate_mfsrin
++
++ cmpwi cr0,r2,OPCODE(31,210) // mtsr
++ beq- cr0,emulate_mtsr
++
++ cmpwi cr1,r2,OPCODE(31,242) // mtsrin
++ beq- cr1,emulate_mtsrin
++
++ cmpwi cr2,r2,OPCODE(31,306) // tlbie
++ beq- cr2,emulate_tlbie
++
++ cmpwi cr3,r2,OPCODE(31,566) // tlbsync
++ beq- cr3,emulate_tlbsync
++
++ cmpwi cr0,r2,OPCODE(31,467) // dcbi
++ beq- cr0,emulate_dcbi
++#ifdef EMULATE_603
++ cmpwi cr1,r2,OPCODE(31,978) // tlbld
++ beq- cr1,emulate_tlbld
++
++ cmpwi cr2,r2,OPCODE(31,1010) // tlbli
++ beq- cr2,emulate_tlbli
++#endif
++ // Program-trap, illegal instruction
++ b unhandled_priv_inst // r4 = opcode
++
++
++#ifdef EMULATE_603
++emulate_603_instr:
++ rlwinm r2,r4,32-1,22,31
++ rlwimi r2,r4,16,16,21 // r2 = opcode
++
++ cmpwi cr0,r2,OPCODE(31,978) // tlbld
++ beq cr0,2f
++ cmpwi cr1,r2,OPCODE(31,1010) // tlbli
++ beq cr1,2f
++ cmpwi cr2,r2,OPCODE(31,339) // mfspr
++ beq cr2,1f
++ cmpwi cr3,r2,OPCODE(31,467) // mtspr
++ bnelr cr3
++1: rlwinm r3,r4,32-16,27,31
++ rlwimi r3,r4,32-6,22,26 // r3 = spr#
++ cmpwi r3,976 // first 603 SPR
++ bltlr
++ cmpwi r3,982 // last 603 SPR
++ bgtlr
++2:
++ rlwinm r7,r7,0,14,10 // clear bit 11-13
++ oris r7,r7,0x4 // set bit 13
++ b emulate_priv_instr
++
++#endif /* EMULATE_603 */
++
++
++/************************************************************************/
++/* mac register access */
++/************************************************************************/
++
++MACRO(LOAD_EMUGPR_IM, [dreg,ereg], [
++.if _ereg <= 7
++ lwz _dreg,xGPR0+_ereg[]*4(r1)
++.else
++ mr _dreg,rPREFIX[]_ereg
++.endif
++])
++
++MACRO(STORE_EMUGPR_IM, [sreg,ereg], [
++.if _ereg <= 7
++ stw _sreg,xGPR0+_ereg[]*4(r1)
++.else
++ mr rPREFIX[]_ereg, _sreg
++.endif
++])
++
++ balign_32
++store_gpr_table:
++mFORLOOP([i],0,31,[
++ STORE_EMUGPR_IM r0,i
++ blr
++])
++
++load_gpr_table:
++mFORLOOP([i],0,31,[
++ LOAD_EMUGPR_IM r0,i
++ blr
++])
++
++MACRO(EMU_LOAD_GPR, [reg, scr], [
++ LI_PHYS( _scr, load_gpr_table )
++ add rPREFIX[]_scr,_reg,rPREFIX[]_scr
++ mtlr rPREFIX[]_scr
++ blrl
++])
++
++MACRO(EMU_STORE_GPR, [reg, scr], [
++ LI_PHYS( _scr, store_gpr_table )
++ add rPREFIX[]_scr,_reg,rPREFIX[]_scr
++ mtlr rPREFIX[]_scr
++ blrl
++])
++
++
++
++/************************************************************************/
++/* instruction emulation */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////
++ // emulate_xxxxx
++ // r3 xMSR
++ // r4 opcode
++ // r5 regnum<<3 (from opcode bits 6-10)
++ //
++ // May modify: r0,r2-r5 (lr)
++
++/********************************************************************/
++emulate_mfmsr:
++ lwz r0,xMSR(r1)
++ EMU_STORE_GPR r5, /**/ R2
++ GET_TICK_CNT( entry, "mfmsr" )
++ b emulation_done
++
++/********************************************************************/
++emulate_mfspr: // r3 = xMSR
++ BUMP("emulate_mfspr")
++ rlwimi r4,r4,32-10,21,25 // flip spr
++ rlwinm r0,r3,32-14,31,31 // r0(bit31) = MSR_PR
++ addi r3,r1,K_SPR_HOOKS
++ rlwinm r2,r4,32-4,20,29 // r2 = spr# << 2
++ addi r4,r1,xSPR_BASE
++ lwzx r3,r2,r3 // hook in r3
++ and. r0,r0,r3 // privileged SPR?
++ bne- mac_program_trap
++ lwzx r0,r2,r4 // value in r0
++ mtlr r3
++ blr // call hook
++
++/********************************************************************/
++emulate_mtspr: // r3 = xMSR
++ BUMP("emulate_mtspr")
++ rlwimi r4,r4,32-10,21,25 // flip spr
++ EMU_LOAD_GPR r5, /**/ R2 // value in r0
++ rlwinm r2,r4,32-4,20,29 // r2 = spr# << 2
++ addi r4,r1,K_SPR_HOOKS
++ rlwinm r3,r3,32-14,31,31 // r3(bit31) = MSR_PR
++ lwzx r4,r2,r4 // hook in r4
++ addi r5,r2,xSPR_BASE // r5+r1 points to SPR reg
++ addi r4,r4,4 // branch to hook +4
++ and. r3,r3,r4 // privileged SPR?
++ bne- mac_program_trap // privileged exception
++ mtlr r4 // lsb is discarded...
++ blr // call hook
++
++
++/********************************************************************/
++ balign_32
++emulate_mtmsr:
++ lwz r3,xMSR(r1) // r3 = old MSR
++ LI_PHYS( R2, load_gpr_table )
++ add r2,r5,r2
++ mtlr r2
++ blrl // r0 = new MSR
++
++ INC_NIP
++ bl msr_altered
++
++ GET_TICK_CNT(entry, "mtmsr")
++ BUMP("emulate_mtmsr")
++
++ beq+ cr1,emulation_done_noinc
++ MAC_EXIT_SAVE( RVEC_MSR_POW ) // POW 1 => doze
++
++
++/********************************************************************/
++emulate_rfi:
++ BUMP("rfi")
++ lis r4,(MSR_VEC)>>16
++ lwz r0,xSRR1(r1)
++ ori r4,r4,0xffff
++#ifdef EMULATE_603
++ oris r4,r4,(MSR_TGPR)>>16
++#endif
++ lwz r3,xMSR(r1) // r3 = old MSR
++ and r0,r0,r4
++ andc r5,r3,r4
++ lwz r6,xSRR0(r1) // new nip = SRR0
++ or r0,r0,r5
++
++ bl msr_altered // r0,r2-r5, r7 [srr1] updated
++
++ GET_TICK_CNT(entry,"rfi")
++
++ lwz r3,K_BREAK_FLAGS(r1) // break at rfi support
++ andi. r3,r3,BREAK_RFI
++ beq+ exception_return
++ li r4,BREAK_RFI // r4 = flag causing the break
++ MAC_EXIT_SAVE( RVEC_BREAK )
++
++
++/********************************************************************/
++#ifdef EMULATE_603
++emulate_tlbli:
++ LOADI r3,EXTERN(do_tlbli)
++ b 1f
++emulate_tlbld:
++ LOADI r3,EXTERN(do_tlbld)
++ b 1f
++emulate_tlbie:
++ LOADI r3,EXTERN(do_tlbie)
++1: INC_NIP
++ rlwinm r5,r4,32-8,24,28 // r5 = #B << 3
++ EMU_LOAD_GPR r5, /**/ R2 // value ret. in r0
++ mr r4,r0 // r4 = ea
++ b call_kernel_save
++#else
++emulate_tlbie:
++ b emulation_done
++#endif /* EMULATE_603 */
++
++/********************************************************************/
++emulate_tlbsync:
++ b emulation_done
++
++/********************************************************************/
++emulate_dcbi:
++ b unhandled_priv_inst // r4 = opcode
++
++/********************************************************************/
++emulate_mfsrin:
++ rlwinm r2,r4,32-8,24,28 // r2 = #B << 3
++ EMU_LOAD_GPR r2, /**/ R3 // r0 = reg B
++ rlwinm r3,r0,6,26,29 // r3 = #sr << 2
++ b 1f
++emulate_mfsr:
++ rlwinm r3,r4,32-14,26,29 // r3 = #sr << 2
++1: addi r2,r1,xSEGR_BASE
++ lwzx r0,r3,r2
++ EMU_STORE_GPR r5, /**/ R2
++ GET_TICK_CNT(entry, "mfsr")
++ BUMP("mfsr_")
++ b emulation_done
++
++
++
++/************************************************************************/
++/* SPR - emulation */
++/************************************************************************/
++
++ ////////////////////////////////////////////////////////////
++ // read (offset 0)
++ // r0 spr_value
++ // r2 spr << 2
++ // r5 dreg << 3
++ //
++ // write (offset 4)
++ // r0 gpr_value
++ // r2 spr << 2
++ // r5 spr offset (relative r1)
++ //
++ // Safe to modify: r2-r5, lr
++ // NOT SAVED: ctr, xer
++
++/********************************************************************/
++simple_mfspr:
++ EMU_STORE_GPR r5, /**/ R3
++ GET_TICK_CNT(entry,"simple_mfspr")
++ b emulation_done
++
++GLOBAL_SYMBOL(r__spr_read_write):
++spr_read_write:
++ b simple_mfspr
++ stwx r0,r5,r1 // value in r0
++ GET_TICK_CNT(entry,"simple_mtspr")
++ b emulation_done
++
++
++/********************************************************************/
++GLOBAL_SYMBOL(r__spr_read_only):
++spr_read_only:
++ b simple_mfspr // allow read
++ b emulation_done // ignore write
++
++/********************************************************************/
++GLOBAL_SYMBOL(r__spr_illegal):
++spr_illegal:
++ nop // spr read entry
++ rlwinm r7,r7,0,15,10 // clear srr1 bit 11-14
++ oris r7,r7,0x8 // set bit 12 (privileged instr)
++ b mac_program_trap
++
++
++/********************************************************************/
++unhandled_spr_read:
++ srwi r4,r2,2
++ srwi r5,r5,3
++ // r4 = spr#
++ // r5 = dest gpr
++ MAC_EXIT_SAVE( RVEC_SPR_READ )
++
++unhandled_spr:
++ b unhandled_spr_read // read hook (offs 0)
++unhandled_spr_write: // write hook (offs 4)
++ srwi r4,r2,2
++ mr r5,r0
++ // r4 = spr#
++ // r5 = register-value
++ MAC_EXIT_SAVE( RVEC_SPR_WRITE )
++
++/********************************************************************/
++spr_bat:
++ b simple_mfspr // read has no side-effects
++ INC_NIP
++ LOADI r3,EXTERN(do_mtbat)
++ bl save_middle_regs // Must do this before touching r6-r12
++ srwi r4,r2,2 // r4 = spr#
++ mr r5,r0 // r5 = value
++ li r6,0 // not forced
++ b call_kernel
++
++/********************************************************************/
++spr_sdr1:
++ b simple_mfspr // read has no side-effects
++ INC_NIP
++ LOADI r3,EXTERN(do_mtsdr1)
++ mr r4,r0 // r4 = value
++ b call_kernel_save
++
++
++
++
++/************************************************************************/
++/* handle MSR changes */
++/************************************************************************/
++
++ ////////////////////////////////////////////////////////////
++ // msr_exception_return (exception taken)
++ //
++ // r6, r7: nip / srr1
++ //
++ // modifies: r0,r2-r5 (r7 updated)
++
++ balign_16
++msr_exception_return:
++ addi r3,r1,K_UNMAPPED_SR_BASE // set unmapped context
++ li r7,(MSR_ME | MSR_SE | MSR_IR | MSR_DR | MSR_PR)
++ stw r3,K_CUR_SR_BASE(r1)
++ li r5,(fb_DbgTrace | fb_Trace)
++ ori r7,r7,MSR_EE
++
++ bt+ FBIT_DbgTrace, 1f
++ li r5,0
++ rlwinm r7,r7,0,~MSR_SE
++1:
++ stw r3,K_SR_DATA(r1)
++ li r4,fb_LoadSegreg
++
++ stw r3,K_SR_INST(r1)
++ mtcrf TRACE_CR_FIELD,r5 // set singlestep bits [cr6]
++ mtcrf MMU_CR_FIELD,r4
++
++ stw r7,K_MSR(r1)
++ b exception_return
++
++
++ ////////////////////////////////////////////////////////////
++ // msr_altered
++ //
++ // r6, r7: nip / srr1
++ //
++ // r0 = new msr
++ // r3 = old msr
++ //
++ // Sets cr1.ne if we MSR_POW is set
++ //
++ // M: r2-r5 (r7 updated).
++ // r0 may _NOT_ be modified
++
++#define MSR_CLEARBITS (MSR_FP | MSR_FE0 | MSR_FE1 | MSR_BE | MSR_SE)
++#define MSR_COPYBITS (MSR_BE | MSR_SE)
++
++ balign_32
++msr_altered:
++#ifdef EMULATE_603
++ bf+ FBIT_603_AltGPR,7f // 603 alternate GPR support
++ rlwinm. r5,r0,0,MSR_TGPR
++ bne+ 7f
++ lwz r2,xGPRSAVE0_603(r1) // MSR_TGPR cleared...
++ lwz r4,xGPRSAVE1_603(r1)
++ lwz r5,xGPRSAVE2_603(r1)
++ stw r2,xGPR0(r1)
++ lwz r2,xGPRSAVE3_603(r1)
++ stw r4,xGPR1(r1)
++ stw r5,xGPR2(r1)
++ stw r2,xGPR3(r1)
++ crclr FBIT_603_AltGPR
++7:
++#endif
++ li r7,(MSR_ME | MSR_SE | MSR_IR | MSR_DR | MSR_PR)
++ stw r0,xMSR(r1)
++ xor r3,r3,r0 // r3 == MSR bit toggle
++ bt- FBIT_IRQPending,test_for_irq // M: r2
++irq_test_ret:
++ rlwinm r4,r0,0,MSR_POW // MSR_POW
++ ori r7,r7,MSR_EE
++ andi. r3,r3,(MSR_DR|MSR_IR|MSR_PR) // MMU change (cr unused)?
++ bt- FBIT_DecINT,test_for_dec // M: r2
++dec_test_ret:
++ cmpwi cr1,r4,0 // MSR_POW set?
++ li r2,(fb_DbgTrace | fb_Trace)
++ cmpwi cr2,r3,0
++ bt- FBIT_DbgTrace,1f
++ rlwinm r2,r0,(21+32-FBIT_Trace),fb_Trace // MSR_SE[21] -> FBIT_Trace
++ rlwimi r7,r0,0,(MSR_SE|MSR_BE) // no debugger; copy MSR_SE and MSR_BE
++1:
++ stw r7,K_MSR(r1)
++ mtcrf TRACE_CR_FIELD,r2 // set singlestep bits [cr6]
++
++ bne cr2,1f // bnelr is slower...
++ blr
++1:
++ /* MMU change */
++ BUMP("MMU-change")
++
++ andi. r3,r0,(MSR_IR | MSR_DR) // IR DR part of index
++ addi r5,r1,K_MSR_SR_TABLE
++ addi r4,r3,MSR_DR // splitmode (MSR_DR != MSR_IR) testing
++ rlwimi r3,r0,32-8,25,25 // [PR IR DR] index to K_MSR_SR_TABLE
++ lwzux r3,r5,r3 // set sr bases from K_MSR_SR_TABLE
++ andi. r4,r4,MSR_IR // non-zero if in splitmode
++ lwz r2,4(r5)
++ li r4,(fb_InSplitmode | fb_LoadSegreg | fb_PrepareSplitmode)
++ lwz r5,8(r5)
++ stw r3,K_CUR_SR_BASE(r1) // new sr base in r3 (used below)
++ stw r2,K_SR_DATA(r1)
++ bne- 1f
++ li r4,fb_LoadSegreg // cur_sr_base changed
++1: stw r5,K_SR_INST(r1)
++ mtcrf MMU_CR_FIELD,r4
++ blr
++
++test_for_irq:
++ BUMP("test-for-irq")
++ and r2,r3,r0 // check whether we are turning external interrupts on
++ andi. r2,r2,MSR_EE // we need to recheck IRQs in userspace then
++ beq+ 1f
++ lwz r4,xHOSTIRQ_ACTIVE_CNT(r1)
++ cmpwi r4,0 // only return if some host irq is up
++ beq+ 1f
++ MAC_EXIT_SAVE(RVEC_CHECK_IRQS)
++
++1: andi. r2,r0,MSR_EE
++ beq irq_test_ret
++ b mac_irq_trap
++
++test_for_dec:
++ BUMP("test-for-dec")
++ andi. r2,r0,MSR_EE
++ beq dec_test_ret
++ b mac_dec_trap
++
++force_msr_altered:
++ BUMP("force-msr-altered")
++ lwz r0,xMSR(r1) // r0 = new MSR
++ xori r3,r0,(MSR_DR|MSR_IR) // r3 = faked old MSR
++ b msr_altered // might throw an exception...
++
++ // msr_altered( kv )
++GLOBAL_SYMBOL(r__msr_altered):
++ lwz r5,xFLAG_BITS(r3)
++ ori r5,r5,fb_MsrModified
++ stw r5,xFLAG_BITS(r3)
++ blr
++
++
++/************************************************************************/
++/* initialize special purpose register table */
++/************************************************************************/
++
++MACRO(SPR_HOOK, [spr, hook], [ LI_PHYS( R8, _hook ) ; stw r8,(((_spr)*4)+K_SPR_HOOKS)(r3) ])
++
++// The LSB of a SPR hook specifies that the SPR is privileged (these bits are
++// set from C-code).
++
++ // r3 = kvars
++GLOBAL_SYMBOL(r__initialize_spr_table):
++ LI_PHYS( R7, unhandled_spr )
++ addi r8,r3,K_SPR_HOOKS-4
++ li r9,1024
++ mtctr r9
++1: stwu r7,4(r8)
++ bdnz 1b
++
++ // XXX for now...
++ SPR_HOOK TBWU, spr_read_write
++ SPR_HOOK TBWL, spr_read_write
++
++ // SPRs that have side effects
++ SPR_HOOK SDR1, spr_sdr1
++ SPR_HOOK DEC, spr_dec
++
++ // BATs
++ mFORLOOP([nn],0,15,[
++ SPR_HOOK eval(nn+IBAT0U), spr_bat
++ ])
++ blr
++
++
++/************************************************************************/
++/* initialize msr segment register table */
++/************************************************************************/
++
++ /////////////////////////////////////////////////////////////
++ // initialize_sr_offs_table
++ //
++ // Copy sr_offs_table to K_MSR_SR_TABLE
++ // r1 is added to each element
++
++initialize_msr_sr_table:
++ mflr r8 // Get address of table
++ bl sr_offs_table
++ mflr r3
++ mtlr r8
++
++ li r5,4*8 // #words in table
++ mtctr r5
++ addi r3,r3,-4
++ addi r4,r1,K_MSR_SR_TABLE-4
++1:
++ lwzu r6,4(r3)
++ add r6,r6,r1 // And add r1
++ stwu r6,4(r4)
++ bdnz 1b
++ blr
++
++ // Used to construct msr_sr_table (mbase is added)
++sr_offs_table:
++ blrl
++ /* K_CUR_SR_BASE, K_SR_DATA_BASE, K_SR_INST_BASE, dummy */
++
++ .long K_UNMAPPED_SR_BASE, K_UNMAPPED_SR_BASE, K_UNMAPPED_SR_BASE, 0
++ .long K_SPLIT_SR_BASE, K_SV_SR_BASE, K_UNMAPPED_SR_BASE, 0 /* DR */
++ .long K_SPLIT_SR_BASE, K_UNMAPPED_SR_BASE, K_SV_SR_BASE, 0 /* IR */
++ .long K_SV_SR_BASE, K_SV_SR_BASE, K_SV_SR_BASE, 0 /* DR|IR */
++
++ .long K_UNMAPPED_SR_BASE, K_UNMAPPED_SR_BASE, K_UNMAPPED_SR_BASE, 0 /* PR */
++ .long K_SPLIT_SR_BASE, K_USER_SR_BASE, K_UNMAPPED_SR_BASE, 0 /* PR|DR */
++ .long K_SPLIT_SR_BASE, K_UNMAPPED_SR_BASE, K_USER_SR_BASE, 0 /* PR|IR */
++ .long K_USER_SR_BASE, K_USER_SR_BASE, K_USER_SR_BASE, 0 /* PR|DR|IR */
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/entry.S
+@@ -0,0 +1,433 @@
++/*
++ * Creation Date: <2001/01/30 00:22:35 samuel>
++ * Time-stamp: <2004/03/07 13:33:39 samuel>
++ *
++ * <entry.S>
++ *
++ * Emulator/mac switching
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++/* Short note about DEC. Due to the kernel implementation of the DEC
++ * exception handler (and get_time_of_day) in the 2.3/2.4 kernel, we
++ * must try to maintain coherency between DEC and TB. In any case,
++ * we must NEVER let DEC tick faster than TB, or get_time_of_day will
++ * occasionally return bogus values (the result is usually
++ * misbehaving X).
++ */
++
++/************************************************************************/
++/* Mac Entry */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////
++ // mac_entry_initialize
++ //
++ // Run once in order to store certain things (segment
++ // registers and NIP)
++ //
++ // sprg1 userspace stack
++ // r6 emulator return point after 'call_kernel'
++ //
++ // Safe to modify: r0-r12, lr, ctr
++
++mac_entry_initialize:
++ __ZERO_TICK_CNT(entry)
++
++ // Save emulator return point (skip over illegal inst)
++ mfsrr0 r5
++ addi r5,r5,4
++ stw r5,K_EMULATOR_NIP(r1) // This sets the return point once and for all
++ stw r2,K_EMULATOR_TOC(r1) // r2==TOC (not on linux though)
++ stw r6,K_EMULATOR_KCALL_NIP(r1) // Return point after 'call_kernel'
++ mfsrr1 r6
++ mfsprg_a1 r3
++ stw r6,K_EMULATOR_MSR(r1) // we want to return through mac_exit
++ stw r3,K_EMULATOR_STACK(r1)
++
++ lwz r3,xFLAG_BITS(r1) // Best to initialize the flag bits
++ mtcr r3
++
++ // msr to be used in mac-mode
++ LOADI r3,(MSR_ME | MSR_IR | MSR_DR | MSR_PR | MSR_EE)
++ stw r3,K_MSR(r1)
++
++ PERF_MONITOR_SETUP /**/ r4
++
++ addi r3,r1,K_EMULATOR_SR // Save segment registers
++ SAVE_SEGMENT_REGS r3, /**/ r4,r5
++ SAVE_IBATS K_IBAT0U_SAVE, /**/ r3 // And save linux BAT registers
++ SAVE_DBATS K_DBAT0U_SAVE, /**/ r3
++
++ bl initialize_msr_sr_table
++
++ addi r3,r1,K_EMULATOR_SR
++ LOAD_SEGMENT_REGS r3, /**/ r4,r5
++
++ li r3,RVEC_EXIT
++ b exit_
++
++
++ ////////////////////////////////////////////////////////////////////
++ // mac_entry [ENTRYPOINT]
++ //
++ // sprg1 userspace stack
++ // r4 MOL_ENTRY_R4_MAGIC
++ // r5 magic
++ // r6 call_kernel return point (if initializing)
++ // r7 session number | initialize flag
++ //
++ // srr1 emulator msr
++ // r13-r31 mac gprs
++ // fr14-fr31 mac fp registers
++ // fr0-fr12 mac fp registers (see xFPU_STATE)
++
++1: li r4,1
++ stw r4,ST_MAGIC(r1)
++2: mtcr r3 // restore registers
++ CONTINUE_TRAP( 0x700 ) // not MOL, take the trap
++
++ balign_32
++mac_entry:
++ lis_svh R1,SPECVAR_SESSION_TABLE
++ cmpwi r4,MOL_ENTRY_R4_MAGIC // MOL switch magic.
++ ori_svl R1,R1,SPECVAR_SESSION_TABLE
++ bne- 2b
++ lwz r4,ST_MAGIC(r1) // check that the magic match
++ cmplwi r5,1 // and is greater than 1
++ cmpw cr1,r5,r4
++ ble- 2b
++ addi r5,r1,ST_KVARS_PH // get kvars for this session
++ rlwinm r4,r7,2,((MAX_NUM_SESSIONS-1)*4)
++ bne- cr1,1b // r1 must point to session table
++ lwzx r1,r4,r5 // set r1 to kvars
++ cmpwi cr1,r1,0
++ cmplwi r7,MAX_NUM_SESSIONS
++ beq- cr1,2b // kvars == NULL
++ bge- mac_entry_initialize // initialize flag set
++
++ // =====================================================================
++ // entrypoint
++ // =====================================================================
++
++ __ZERO_TICK_CNT(entry)
++ TRACE(0x1111, "mac_entry")
++
++ // Save emulator registers (r1,msr) and restore flag bits
++ mfsprg_a1 r3
++ lwz r4,xFLAG_BITS(r1)
++ stw r3,K_EMULATOR_STACK(r1)
++ mfsrr1 r6
++ mtcr r4
++ stw r6,K_EMULATOR_MSR(r1) // The MSR_FP/MSR_VEC bits are of interest...
++
++ // Setup mac-environment
++
++ btl FBIT_RecalcDecInt, recalc_int_stamp // M: r0,r2-r5
++ bl set_mac_context // M: r0,r2-r12,XER
++ bl set_mol_dec // M: r0,r2-r5
++
++ crset FBIT_LoadSegreg // Load segment registers below
++
++ RESTORE_MIDDLE_REGS // Loads r6,r7 (nip/msr) r8-r12, ctr, xer,
++
++ rlwinm r7,r7,0,~MSR_VEC // We always clear MSR_VEC (MSR_FP should be off already)
++ stw r7,K_MSR(r1) // (enabling MSR_VEC is relatively cheap)
++ btl- FBIT_MsrModified,msr_altered_entry
++
++ GET_TICK_CNT(entry, "mac_entry")
++
++ bt- FBIT_DecINT,test_for_dec_int // check for DEC interrupts
++ b exception_return
++
++test_for_dec_int:
++ lwz r3,xMSR(r1)
++ rlwinm. r3,r3,0,MSR_EE // MSR_EE is set?
++ bne mac_dec_trap // if so take a DEC interrupt
++ b exception_return
++
++msr_altered_entry:
++ crclr FBIT_MsrModified
++ b force_msr_altered
++
++
++ /////////////////////////////////////////////////////////////
++ // All paths back to mac-mode goes through one of these
++ // functions.
++ //
++ // emulation_done
++ // emulation_done_noinc
++ // exception_return
++
++ balign_32
++emulation_done:
++ addi r6,r6,4 // increase NIP
++emulation_done_noinc:
++ bt- FBIT_Trace, rvec_trace_trap
++ GET_TICK_CNT(entry,"emulation_done")
++exception_return:
++ btl- FBIT_LoadSegreg,reload_sr // M: r3-r5, r6=nip
++ mtsrr1 r7 // setup SRR1
++ lwz r0,xCR(r1)
++ lwz r3,xLINK(r1)
++ mtsrr0 r6
++ lwz r4,xGPR4(r1)
++ lwz r5,xGPR5(r1)
++ mfcr r2 // Save flag bits
++ lwz r6,xGPR6(r1)
++ lwz r7,xGPR7(r1)
++ mtlr r3
++ stw r2,xFLAG_BITS(r1)
++ lwz r2,xGPR2(r1)
++ lwz r3,xGPR3(r1)
++ mtcr r0
++ lwz r0,xGPR0(r1)
++ __GET_TICK_CNT(entry,"asm-all") // performance measurements
++ __BUMP("asm-all")
++ lwz r1,xGPR1(r1)
++ rfi
++
++
++/************************************************************************/
++/* Exit Mac-Mode Paths */
++/************************************************************************/
++
++ // THESE ARE _ALL_ THE POSSIBLE EXIT PATHS. KEEP IT THAT WAY
++ // OR HAVE A *VERY GOOD* REASON TO INTRODUCE A NEW ONE.
++
++ /////////////////////////////////////////////////////////
++ // giveup_fpu
++ //
++ // Save fpscr and fpr13 and clear the MSR_FP bit.
++ // Restore the emulator fpscr value.
++ //
++ // IMPORTANT: Call this function only if FBIT_FPUInUse is set
++ //
++ // modifies: r7,r8 (turns on MSR_FP if FP is set in K_MSR)
++
++giveup_fpu:
++ li r8,MSR_FP | MSR_FE0 | MSR_FE1
++ lwz r7,K_MSR(r1)
++ andc r8,r7,r8 // Clear MSR_FEx bits
++ stw r8,K_MSR(r1)
++
++ mfmsr r7 // Temporary enable FPU in order to
++ ori r8,r7,MSR_FP // save fpscr and fpr13
++ mtmsr r8
++ isync
++ stfd fr13,xFPR13(r1)
++ mffs fr13
++ stfd fr13,xFPSCR-4(r1)
++ li r7,FPU_STATE_DIRTY
++ lfd fr13,xEMULATOR_FPSCR-4(r1) // We must restore FPSCR before since the emulator might
++ mtfsf 0xff,fr13 // use the FPU at any time, for instance in a signal handler.
++ stw r7,xFPU_STATE(r1) // Go to FPU_STATE_DIRTY
++
++ crclr FBIT_FPUInUse // FPU no longer in use
++ blr
++
++
++ ////////////////////////////////////////////////////////
++ // PREPARE_ERET
++ //
++ // M: r0,r2, r9-r11
++
++MACRO(PREP_ERET,[nip_variable], [
++ btl FBIT_MolDecLoaded, set_kernel_dec // M: r0,r2,r9-r11
++
++ lwz r10,_nip_variable[](r1)
++ mfcr r9
++ lwz r11,K_EMULATOR_MSR(r1)
++ mtsrr0 r10
++ lwz r2,K_EMULATOR_TOC(r1)
++ stw r9,xFLAG_BITS(r1)
++ mtsrr1 r11
++])
++
++ ////////////////////////////////////////////////////////
++ // mac_exit (return to emulator)
++ // r3 RVEC return code
++ //
++ // On stack: nip, ctr, lr, xer, r0-r12
++ // In registers: r13-r31
++
++mac_exit:
++ TRACE(0x2220, "mac_exit")
++ bl set_emulator_context // M: r0,r2,r7-r11,XER
++exit_:
++ PREP_ERET K_EMULATOR_NIP // M: r0-r2,r9-r11
++ GET_TICK_CNT(entry, "mac_exit")
++ lwz r1,K_EMULATOR_STACK(r1)
++ rfi
++
++
++ ////////////////////////////////////////////////////////
++ // take_exception (take a linux exception)
++ //
++ // On stack: nip, ctr, lr, xer, r0-r12
++ // In registers: r13-r31
++
++take_exception:
++ TRACE(0x2221, "take_exception")
++
++ mflr r12
++ bl set_emulator_context // M: r0,r2,r7-r11,XER
++
++ PREP_ERET K_EMULATOR_NIP // M: r0,r2,r9-r11
++ GET_TICK_CNT(entry, "take_exception")
++ lwz r1,K_EMULATOR_STACK(r1)
++ mtlr r12
++ li r3,RVEC_NOP
++ blr
++
++
++ //////////////////////////////////////////////////////////////
++ // call_kernel (call mol kernel routine)
++ // r3 kernel routine
++ // r4..r6 args
++ //
++ // On stack: nip, ctr, lr, xer, r0-r12
++ // In registers: r13-r31
++
++#ifdef __linux__
++call_kernel_save:
++ bl save_middle_regs // saves r8-r11, nip, ctr, xer
++call_kernel:
++ bl set_emulator_context // M: r0,r2,r7-r11,XER
++
++ TRACE(0x2222, "call_kernel")
++
++ lwz r8,K_KERNEL_VARS(r1) // r8 = kvars (lvptr)
++ PREP_ERET K_EMULATOR_KCALL_NIP // M: r0,r2,r9-r11
++ GET_TICK_CNT(entry, "call_kernel_save")
++ lwz r1,K_EMULATOR_STACK(r1)
++ ba 0x2f00 // MOL trampoline
++#endif
++
++/************************************************************************/
++/* Set Mac/Emulator Context */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////////
++ // set_mac_context [r0,r2-r12, ctr, --->XER<---]
++ //
++ // - clear BATs (except DBAT0)
++ // - setup sprgs
++ // - reload_sr loads segment registers later on
++ //
++ // Currently unmodified r8-r12, ctr
++
++set_mac_context:
++ // Save and setup SPRG2 (magic) and SPRG3 (mol stack)
++ mfsprg_a2 r6
++ mfsprg_a3 r7
++ stw r6,K_EMULATOR_SPRG2(r1)
++ stw r7,K_EMULATOR_SPRG3(r1)
++ li r2,MOL_SPRG2_MAGIC
++ mtsprg_a3 r1
++ mtsprg_a2 r2
++
++ li r4,0
++ mtspr IBAT0U,r4
++ mtspr IBAT1U,r4
++ mtspr IBAT2U,r4
++ mtspr IBAT3U,r4
++ // DBAT0 set from reload_sr
++ mtspr DBAT1U,r4
++ mtspr DBAT2U,r4
++ mtspr DBAT3U,r4
++#ifdef __darwin__
++ lwz r4,K_MOL_SDR1(r1)
++ mtsdr1 r4
++#endif
++ blr
++
++
++ ///////////////////////////////////////////////////////////////
++ // set_emulator_context [r0,r2,r7-r11,cr, --->XER<---]
++ //
++ // - load segr 0-15 with emulator context
++ // - restore BATs
++ // - restore DEC register
++
++set_emulator_context:
++ lwz r0,K_EMULATOR_SPRG2(r1)
++ lwz r2,K_EMULATOR_SPRG3(r1)
++ mtsprg_a2 r0
++ mtsprg_a3 r2
++
++ // Restore segment registers
++ addi r8,r1,K_EMULATOR_SR
++ LOAD_SEGMENT_REGS r8, /**/ r2,r10
++
++ // BATS, r11 = linux DEC
++
++ lwz r7,K_IBAT0U_SAVE(r1)
++ mtspr IBAT0U,r7
++ lwz r2,K_IBAT1U_SAVE(r1)
++ mtspr IBAT1U,r2
++ lwz r7,K_IBAT2U_SAVE(r1)
++ mtspr IBAT2U,r7
++ lwz r2,K_IBAT3U_SAVE(r1)
++ mtspr IBAT3U,r2
++
++ lwz r7,K_DBAT0U_SAVE(r1)
++ mtspr DBAT0U,r7
++ lwz r7,K_DBAT0L_SAVE(r1) // must also restore lower bat...
++ mtspr DBAT0L,r7
++ lwz r2,K_DBAT1U_SAVE(r1)
++ mtspr DBAT1U,r2
++ lwz r7,K_DBAT2U_SAVE(r1)
++ mtspr DBAT2U,r7
++ lwz r2,K_DBAT3U_SAVE(r1)
++ mtspr DBAT3U,r2
++#ifdef __darwin__
++ lwz r2,K_OS_SDR1(r1)
++ mtsdr1 r2
++ fix_sprg2 /**/ R2 // must not modify sprg2 (i.e. sprg_a0) under OSX 10.3
++#endif
++ blr
++
++
++/************************************************************************/
++/* Reload Segment Registers */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////////
++ // reload_sr
++ //
++ // r6 = mac-nip
++ //
++ // - loads segr 0-15 with mac context [modifies r3-r5]
++ // - reloads DBAT0 (used for splitmode)
++ //
++ // Modifies: r3-r5
++
++reload_sr:
++ bt FBIT_InSplitmode,prepare_splitmode // M: r0,r3-r5, r6=mac-nip
++
++ lwz r3,K_TRANSL_DBAT0L(r1)
++ lwz r4,K_TRANSL_DBAT0U(r1)
++ li r5,0
++ mtspr DBAT0L,r3
++ mtcrf MMU_CR_FIELD,r5 // clear FBIT_LoadSegreg (and splitmode stuff)
++ mtspr DBAT0U,r4
++1:
++ lwz r4,K_CUR_SR_BASE(r1)
++ LOAD_SEGMENT_REGS r4, /**/ r3,r5
++ blr
++
++ret_from_prep_splitmode:
++ lwz r3,K_SPLIT_DBAT0L(r1)
++ lwz r4,K_SPLIT_DBAT0U(r1)
++ li r5,fb_InSplitmode
++ mtspr DBAT0L,r3
++ mtcrf MMU_CR_FIELD,r5 // clear FBIT_LoadSegreg and FBIT_PrepareSplitmode
++ mtspr DBAT0U,r4
++ b 1b
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/iopage.S
+@@ -0,0 +1,89 @@
++/*
++ * Creation Date: <97/07/26 18:23:02 samuel>
++ * Time-stamp: <2002/07/06 12:12:10 samuel>
++ *
++ * <iopage.S>
++ *
++ * IO low-level support
++ *
++ * Copyright (C) 2002 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++ // NOTE: This function is also called from the splitmode secondary interrupt
++ // handler (splitmode segment registers are resetup at return).
++
++
++ ///////////////////////////////////////////////////////////////////
++ // check_io_page
++ //
++ // xINST_OPCODE is always valid at this point
++ //
++ // Examine if the page (which DAR points to) is an I/O page.
++ // Safe to modify: r0,r2-r12
++
++check_io_page:
++ // Is this is an IO-page? IMPORTANT: we must *never* insert mappings
++ // that are unreadable by supervisor (will cause a freeze right here).
++
++ mfdsisr r10 // r10 = DSISR
++ rlwinm. r3,r10,0,1,1 // BIT 1 set if no PTE (or BAT mapping)
++ bnelr
++ mfdar r8
++
++ mfmsr r7
++ ori r3,r7,MSR_DR // set MSR_DR
++ mtmsr r3
++ isync
++
++ rlwinm r5,r8,0,0,19 // mask page index
++
++ // XXX: TO BE FIXED. These instruction will _never_ cause an exceptions on a
++ // single processor system. However, on a SMP machine we *could* receive a
++ // tlbie invalidate broadcast. Thus we must implement a secondary
++ // interrupt handler to cover that case.
++
++ lwz r2,IOP_MAGIC(r5)
++ lwz r3,IOP_MAGIC2(r5) // These should _never_ cause any exceptions
++ lwz r9,IOP_ME_PHYS(r5) // r9 = physical addr of iopage
++
++ mtmsr r7 // restore MSR
++ isync
++
++ lis r6,HI(IO_PAGE_MAGIC_1) // check MAGIC 1
++ ori r6,r6,LO(IO_PAGE_MAGIC_1)
++ cmplw r6,r2
++ bnelr
++
++ lis r7,HI(IO_PAGE_MAGIC_2) // check MAGIC 2
++ ori r7,r7,LO(IO_PAGE_MAGIC_2)
++ cmplw r7,r3
++ bnelr
++
++ // Obtain translation info from the iopage:
++ //
++ // r4 = mphys_ioaddr = iop->mphys | (dar & 0xfff);
++ // r5 = usr_data = iop->usr_data[ (dar & 0xfff) >> 3 ];
++ //
++ lwz r4,IOP_MPHYS(r9)
++ rlwimi r4,r8,0,20,31 // insert page offset
++
++ rlwinm r7,r8,32-1,21,29 // grain = double word
++ addi r7,r7,IOP_USR_DATA // usr_data[ (dar&0xfff) ]
++ lwzx r5,r9,r7 // r5 = usr_data
++
++ rlwinm. r2,r10,0,6,6 // was it a write? (r10=DSISR)
++ bne handle_write
++
++ // r4 = mphys_ioaddr
++ // r5 = usr_data
++ MAC_EXIT( RVEC_IO_READ )
++
++handle_write:
++ // r4 = mphys_ioaddr
++ // r5 = usr_data
++ MAC_EXIT( RVEC_IO_WRITE )
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/linux.S
+@@ -0,0 +1,129 @@
++/*
++ * Creation Date: <2001/02/24 14:08:28 samuel>
++ * Time-stamp: <2003/09/03 12:34:20 samuel>
++ *
++ * <platform.S>
++ *
++ * Linux Kernel Hooks
++ *
++ * Copyright (C) 2001, 2002, 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++ ////////////////////////////////////////////////////////
++ // flush_hash_page_hook
++ //
++ // Kernel hook
++ //
++ // r3: context
++ // r4: virtual address
++ // r5: 2.4.6 pointer to linux PTE (2.4.6 or later)
++ // 2.6 pmdval
++ // r6: 2.6 count
++ // r10: return address
++ //
++ // [must not modify: r3-r5,r10 - otherwise normal C-function]
++ // MMU is ON
++
++.set STACK_SPACE, 32
++.set STACK_LR, STACK_SPACE+4
++.set STACK_V0, 8
++.set STACK_V1, 12
++.set STACK_V2, 16
++.set STACK_V3, 20
++.set STACK_V4, 24
++.set STACK_V5, 28
++
++FHOOK( FHOOK_FLUSH_HASH_PAGE )
++GLOBAL_SYMBOL( r__flush_hash_page_hook ):
++ stwu r1,-STACK_SPACE(r1) // Push stackframe
++ mflr r0
++ stw r0,STACK_LR(r1)
++
++ LOADI r7,EXTERN(do_flush)
++ mtctr r7
++
++ stw r10,STACK_V1(r1) // Save registers
++ stw r3,STACK_V2(r1)
++ stw r4,STACK_V3(r1)
++ stw r5,STACK_V4(r1)
++#ifdef LINUX_26
++ stw r6,STACK_V5(r1)
++#else
++ li r6,1
++#endif
++ bctrl
++
++ lwz r10,STACK_V1(r1) // Restore registers
++ lwz r3,STACK_V2(r1)
++ lwz r4,STACK_V3(r1)
++ lwz r5,STACK_V4(r1)
++#ifdef LINUX_26
++ lwz r6,STACK_V5(r1)
++#endif
++
++ lwz r0,STACK_LR(r1) // Pop stackframe
++ addi r1,r1,STACK_SPACE
++ mtlr r0
++ blr
++
++
++/************************************************************************/
++/* lowmem reallocations */
++/************************************************************************/
++
++#ifdef CONFIG_SMP
++
++ /* r3=ea, r4=pte_slot, r5=pte1, r6=pte2, r7=lock, r8=lockval, M: r0 */
++RELOC_LOW(xx_store_pte_lowmem)
++1: lwarx r0,0,r7
++ cmpwi r0,0
++ bne- 1b
++ stwcx. r8,0,r7 // take hash lock
++ bne- 1b
++ // isync // sync below is sufficient
++ stw r0,0(r4) // clear old PTE[V] (if we evict something)
++ sync // probably not needed - no other CPU uses this PTE
++ stw r6,4(r4) // store PTE2
++ eieio
++ stw r5,0(r4) // store PTE1
++ tlbie r3 // flush old pte
++ eieio // order tlbie before tlbsync
++ tlbsync // ensure tlbie finish on all processors
++ sync // ensure tlbsync completed
++ stw r0,0(r7) // release hash lock
++ blr
++RELOC_LOW_END(xx_store_pte_lowmem)
++
++
++ /* r3=ea, r7=lock, r8=lockval, M: r0 */
++RELOC_LOW(xx_tlbie_lowmem)
++1: lwarx r0,0,r7
++ cmpwi r0,0
++ bne- 1b
++ stwcx. r8,0,r7 // take hash lock
++ bne- 1b
++ //isync // needed if we get rid of the sync
++ sync // make sure any PTE zero-outs have finished
++ tlbie r3 // flush old pte
++ eieio // order tlbie before tlbsync
++ tlbsync // ensure tlbie finish on all processors
++ sync // ensure tlbsync completed
++ stw r0,0(r7) // release hash lock
++ blr
++RELOC_LOW_END(xx_tlbie_lowmem)
++
++#else
++
++ /* r3=pte_slot, r4=pte0, r5=pte1 */
++RELOC_LOW(xx_store_pte_lowmem)
++ stw r4,0(r3) // interrupts are off and we won't take a page fault
++ stw r5,4(r3) // so this is safe...
++ blr
++RELOC_LOW_END(xx_store_pte_lowmem)
++
++#endif
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/ptintercept.S
+@@ -0,0 +1,303 @@
++/*
++ * Creation Date: <2001/03/17 18:00:05 samuel>
++ * Time-stamp: <2003/05/26 00:08:48 samuel>
++ *
++ * <ptintercept.S>
++ *
++ * Handles writes to the (mac) hash table
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++//////////////////////////////////////////////////////////////////////
++// Implementing the tlbie instruction properly is tricky.
++// The tlbie is supposed to invalidate an equivalence
++// class of PTEs and this does not map well to the huge TLB
++// MOL uses (the linux PTE hash, a lot bigger than the 2x64
++// table found in most CPUs).
++//
++// The solution is intercepting PTE writes and maintain
++// coherence without relying on the tlbie instruction (which
++// can safely be replaced by a nop).
++//
++// Splitmode: The instruction sr might be active. DBAT0 does
++// _not_ have the supervisor valid bit set so we have to load
++// the data segment register.
++//////////////////////////////////////////////////////////////////////
++
++MACRO(SET_MSR_DR, [scr], [
++ li _scr,MSR_DR
++ mtmsr _scr
++ isync
++])
++MACRO(CLEAR_MSR_DR, [scr], [
++ li _scr,0
++ mtmsr _scr
++ isync
++])
++
++ // r3 = exception vector
++secint_pt_store:
++ bf+ FBIT_InSplitmode,1f
++ mfdar r2
++ lwz r4,K_TMP_SCRATCH0(r1) // in splitmode (might be
++ mtsrin r4,r2 // the instrucion sr)
++1:
++ lwz r6,xNIP(r1) // restore NIP & MSR
++ lwz r7,K_MSR(r1)
++ cmpwi r3,0x300
++ beq 1f
++ cmpwi r3,0x600 // Alignment
++ beq 2f
++ DEBUGGER_SAVE( 0x6909 )
++1: bl save_middle_regs
++ b dsi_cont
++2: bl save_middle_regs
++ b alignment_cont
++
++ ////////////////////////////////////////////////////////////////////////
++ // check_pthash_hit
++ //
++ // m: r0,r2-r5, cr
++
++check_pthash_hit:
++ mfdsisr r3
++ rlwinm. r4,r3,0,4,4 // Is this a protection violation?
++ beqlr
++ rlwinm. r4,r3,0,6,6 // and a write?
++ beqlr-
++
++ mfdar r2 // Is this a pthash hit?
++ lwz r3,K_TLBHASH_BASE_EA(r1) // First check EA...
++ lwz r4,K_HASH_MASK(r1)
++ sub r5,r2,r3
++ cmplw r5,r4
++ bgtlr+
++
++ lwz r5,K_SR_DATA(r1) // Correct context for EA?
++ rlwinm r3,r2,4+2,26,29 // #sr << 2
++ lwz r4,K_TLBHASH_SR(r1)
++ lwzx r3,r3,r5
++ cmpw r3,r4
++ bnelr-
++
++ /////////////////////////////////////////////////
++ // Handle page table write, r2=dar, r4=segreg
++ /////////////////////////////////////////////////
++
++ bf+ FBIT_InSplitmode,1f
++ mfsrin r3,r2 // save old segment register
++ stw r3,K_TMP_SCRATCH0(r1) // in splitmode (might be
++ mtsrin r4,r2 // the instrucion sr)
++1:
++ lwz r3,xINST_OPCODE(r1)
++ LI_PHYS( R4,secint_pt_store ) // r4 = secondary exception handler
++ stw r6,xNIP(r1)
++
++ // r2 = dar, r3 = opcode
++ rlwinm r0,r3,6,0x3e // primary opcode & ~1
++ rlwinm r5,r3,6+5+3,(0x1f<<3) // r5 = rS << 3
++ cmpwi r0,30 // 31 & ~1
++ beq- do_opcode_31
++ mtcrf 0x40,r3 // cr[5] = update bit
++ cmpwi r0,36 // 36, stw/stwu
++ beq do_stw
++ cmpwi r0,38 // 38, stb/stbu
++ beq do_stb
++ cmpwi r0,44 // 44, stb/stbu
++ beq do_sth
++ cmpwi r0,52 // 52, stfs/stsu
++ beq do_stfs
++ cmpwi r0,54 // 54, stfd/stfdu
++ beq do_stfd
++ cmpwi r0,46 // 47, stmw
++ beq do_stmw
++ b do_st_bad // ???
++
++do_opcode_31:
++ rlwinm r0,r3,32-1,22,31 // secondary opcode
++ rlwinm. r6,r3,0,(32<<1) // update form?
++ rlwinm r0,r0,0,~32 // clear update bit
++ crnot 5,eq
++
++ cmpwi r0,151 // stwx/stwxu
++ beq+ do_stw
++ cmpwi r0,215 // stbx / stbxu
++ beq- do_stb
++ cmpwi r0,150 // stwcx.
++ beq- do_stwcx
++ cmpwi r0,407 // sthx / sthxu
++ beq- do_sth
++ cmpwi r0,727 // stfdx / stfdxu
++ beq- do_stfd
++ cmpwi r0,663 // stfsx / stfsxu
++ beq- do_stfs
++ crclr 5
++ rlwinm r0,r3,32-1,22,31 // unmasked secondary opcode
++ cmpwi r0,470 // dcbi
++ beq- do_dcbi
++ cmpwi r0,1014 // dcbz
++ beq- do_dcbz
++ cmpwi r0,983 // stfiwx [optional]
++ beq- do_stfiw
++ cmpwi r0,725 // stswi
++ beq- do_stswi
++ cmpwi r0,661
++ beq- do_stswx
++ b do_st_bad // float, cache or altivec
++
++do_st_bad:
++do_stfd:
++do_stfs:
++do_stfiw:
++do_stswi:
++do_stswx:
++ lwz r6,xNIP(r1)
++ DEBUGGER_SAVE(0x1882) // unimplemented store instruction
++
++ // r2=dar, r3=opcode, r4=secint_handler, r5=rS_offs
++do_dcbi:
++do_dcbz:
++ mtlr r4
++ SET_MSR_DR /**/ r6
++ rlwinm r6,r2,0,~0x7
++ lwz r4,0(r6)
++ lwz r5,4(r6)
++ dcbz 0,r2
++ CLEAR_MSR_DR /**/ r0
++ b st_continue_2
++do_stwcx:
++ EMU_LOAD_GPR r5, /**/ R6 // r0 = value
++ mtlr r4
++ SET_MSR_DR /**/ r6
++ rlwinm r6,r2,0,~0x7
++ lwz r4,0(r6)
++ lwz r5,4(r6)
++ stwcx. r0,0,r2
++ CLEAR_MSR_DR /**/ r0
++ lwz r6,xCR(r1)
++ mfcr r0
++ rlwimi r6,r0,0,0,3
++ stw r6,xCR(r1)
++ b st_continue_2
++do_sth:
++ EMU_LOAD_GPR r5, /**/ R6 // r0 = value
++ mtlr r4
++ SET_MSR_DR /**/ r6
++ rlwinm r6,r2,0,~0x7
++ lwz r4,0(r6)
++ lwz r5,4(r6)
++ sth r0,0(r2)
++ b st_continue
++do_stb:
++ EMU_LOAD_GPR r5, /**/ R6 // r0 = value
++ mtlr r4
++ SET_MSR_DR /**/ r6
++ rlwinm r6,r2,0,~0x7
++ lwz r4,0(r6)
++ lwz r5,4(r6)
++ stb r0,0(r2)
++ b st_continue
++do_stw:
++ EMU_LOAD_GPR r5, /**/ R6 // r0 = value
++ mtlr r4
++ SET_MSR_DR /**/ r6
++ rlwinm r6,r2,0,~0x7
++ lwz r4,0(r6)
++ lwz r5,4(r6)
++ stw r0,0(r2)
++ b st_continue
++
++ // r4=PTE0, r5=PTE1, r3=opcode, r2=dar, scratch: r0
++st_continue:
++ CLEAR_MSR_DR /**/ r0
++ bf+ 5,st_continue_2 // update form?
++ rlwinm r3,r3,6+5+5+3,(31<<3)
++ mr r0,r2
++ EMU_STORE_GPR r3, /**/ R6 // r0 = value
++st_continue_2:
++ // check if the old PTE has been used (r2=dar,r4/r5=PTE)
++ lwz r3,K_TLBHASH_BASE_EA(r1) // Calculate tlb offset
++ sub r0,r2,r3 // r0 = pte_nr * 8
++ lwz r6,K_PTHASH_INUSE_PH(r1)
++ rlwinm r3,r0,32-6,6,29 // r3 = word offset
++ cmpwi r6,0
++ beq- 1f
++ lwzx r3,r3,r6
++ rlwinm r6,r2,32-3,27,31 // pte_nr & 0x1f
++ li r0,1
++ slw r6,r0,r6 // r6 = bit
++ and. r6,r6,r3
++ GET_TICK_CNT(entry,"ptintercept-1")
++ bne- pt_intercept
++1:
++ BUMP("pt_intercept_not_taken")
++ // return from exception [r2=dar]
++ lwz r6,xNIP(r1) // restore r6
++ bf+ FBIT_InSplitmode,emulation_done
++ lwz r3,K_TMP_SCRATCH0(r1) // in splitmode (might be
++ mtsrin r3,r2 // the instrucion sr)
++ b emulation_done
++
++ // transfer to C-function [r2=dar, r3/r4=pte]
++pt_intercept:
++ BUMP("pt_intercept_taken")
++ lwz r6,xNIP(r1) // restore r6
++ lwz r3,K_TLBHASH_BASE_EA(r1) // calculate tlb offset
++ addi r6,r6,4 // inc NIP
++ bl save_middle_regs
++ sub r6,r2,r3
++ rlwinm r6,r6,0,0,28 // argument pteoffs (dword aligned)
++
++ // r4=PTE0, r5=PTE1
++ LOADI r3,EXTERN(do_intercept_tlbie)
++ b call_kernel
++
++ // stmw uses another emulator entry point because it might overwrite a bunch of PTEs
++do_stmw:
++ mtlr r4
++
++ // save some debugging info
++// stw r2,xDBG_TRACE_SPACE(r1)
++
++ SET_MSR_DR /**/ r6
++
++ // now do the stmw. we do that manually since we have to access emulator regs.
++ mr r4,r2 // r4: memory pointer
++ srwi r6,r5,3 // r6: rS
++1: cmpwi r5,32 << 3 // loop condition
++ bge 2f
++ EMU_LOAD_GPR r5, /**/ R3 // r0 = value
++ stw r0,0(r4) // store the value
++ addi r5,r5,1 << 3 // update register counter
++ addi r4,r4,4 // and memory pointer
++ b 1b
++
++2: CLEAR_MSR_DR /**/ r0
++
++ // load up r4 and r5 for do_intercept_tlbie_block (see below)
++ subfic r5,r6,32 // number of registers (=words) stored
++ slwi r5,r5,2 // number of bytes stored
++ add r4,r2,r5 // last byte stored
++ addi r4,r4,7 // alignment to
++ rlwinm r5,r4,0,~0x7 // PTE size
++ rlwinm r4,r2,0,~0x7 // pte block pointer
++ sub r5,r5,r4 // substract block pointer -> length (in bytes)
++ lwz r3,K_TLBHASH_BASE_EA(r1) // calculate tlb offset
++ sub r4,r4,r3 // tlb offset
++ b st_block_continue
++
++ // transfer to C-function [r2=dar, r4=pte block offset, r5=pte block length]
++st_block_continue:
++ BUMP("pt_intercept_taken")
++ lwz r6,xNIP(r1)
++ addi r6,r6,4 // inc NIP
++ bl save_middle_regs
++
++ LOADI r3,EXTERN(do_intercept_tlbie_block)
++ b call_kernel
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/splitmode.S
+@@ -0,0 +1,428 @@
++/*
++ * Creation Date: <2000/07/11 03:38:32 samuel>
++ * Time-stamp: <2003/08/20 16:37:04 samuel>
++ *
++ * <splitmode.S>
++ *
++ * Handles splitmode (MSR_IR != MSR_DR)
++ *
++ * Copyright (C) 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++MACRO(SM_SET_MSR_DR, [scr], [
++ li _scr,MSR_DR
++ mtmsr _scr
++ isync
++])
++MACRO(SM_CLEAR_MSR_DR, [scr], [
++ li _scr,0
++ mtmsr _scr
++ isync
++])
++MACRO(SPLITMODE_SMP_LOCK, [scr1,scr2], [
++#ifdef CONFIG_SMP
++ LI_PHYS( _scr1,splitmode_lock )
++7: lwarx _scr2,0,_scr1
++ cmpwi _scr2,0
++ li _scr2,1
++ bne- 7b
++ stwcx. _scr2,0,_scr1
++ bne- 7b
++ isync
++#endif
++])
++MACRO(SPLITMODE_SMP_UNLOCK, [scr1,scr2], [
++#ifdef CONFIG_SMP
++ LI_PHYS( _scr1,splitmode_lock )
++ li _scr2,0
++ stw _scr2,0(_scr1)
++#endif
++])
++
++ /////////////////////////////////////////////////////////////
++ // prepare_splitmode
++ //
++ // r6,r7: nip/srr1
++ //
++ // M: r0,r3-r5
++ //
++ // Fill in splitmode segment register table. The segment register
++ // containing xNIP is set up for instruction access (if xNIP does
++ // not hold nip, an extra ISI exception will occur). The instruction
++ // segment is protected from data access through the use of a DBAT
++ // register.
++ //
++ // It MUST be safe to call this function even if we are *not* in
++ // splitmode.
++
++prepare_splitmode:
++ bf FBIT_PrepareSplitmode, ret_from_prep_splitmode
++
++ ZERO_TICK_CNT(splitmode_prep)
++
++ // fill split mode table with data segment registers
++ lwz r3,K_SR_DATA(r1) // physical addr
++ addi r4,r1,K_SPLIT_SR_BASE-4
++ li r5,16
++ mfctr r0 // save ctr in r0
++ mtctr r5
++ addi r3,r3,-4
++1: lwzu r5,4(r3)
++ oris r5,r5,0x1000 // no-execute segment bit
++ stwu r5,4(r4)
++ bdnz 1b
++ mtctr r0 // restore ctr
++
++ // insert instruction mode segment
++ rlwinm r3,r6,0,0,3
++ stw r3,K_SPLIT_NIP_SEGMENT(r1)
++ rlwinm r3,r6,4+2,26,29 // r3 = offset, ((sr & 0xf000000) >> 28 ) * 4
++ lwz r5,K_SR_INST(r1)
++ lwzx r5,r3,r5 // segment register for instructions
++ addi r4,r1,K_SPLIT_SR_BASE
++ stwx r5,r3,r4
++
++ // and protect it with DBAT0.
++ //
++ // The supervisor valid bit must be cleared
++ // - we don't want to block get_opcode.
++
++ rlwinm r3,r6,0,0,3 // segment base
++ ori r3,r3,0x1ffd // user valid bit | 256MB mask
++ stw r3,K_SPLIT_DBAT0U(r1)
++ li r4,0
++ stw r4,K_SPLIT_DBAT0L(r1) // pp=0, wimg=0
++ GET_TICK_CNT(splitmode_prep, "splitmode_prep")
++ b ret_from_prep_splitmode
++
++
++ ///////////////////////////////////////////////////////////////////
++ // split_sr_no_execute
++ //
++ // r6,r7: nip/srr1
++ //
++ // An instruction is to be fetched from one of the no-execute
++ // segments. This function reinitializes the segment registers.
++ //
++ // M: r0, r3-r5
++
++split_sr_no_execute:
++ rlwinm. r0,r7,0,3,3 // Guarded access or no-execute?
++ beqlr
++
++ rlwinm r3,r6,0,0,3 // segment
++ lwz r4,K_SPLIT_NIP_SEGMENT(r1)
++ cmpw r3,r4
++ beqlr // guarded PTE/mac-guarded segment
++
++ crset FBIT_PrepareSplitmode
++ crset FBIT_LoadSegreg
++ b exception_return
++
++
++
++ //////////////////////////////////////////////////////////////////
++ // splitmode_dsi
++ //
++ // r6/r7: nip/srr1
++ //
++ // An DSI exception occured (DBAT protection violation).
++ // That is, a load/store instruction targeted the segment
++ // instructions was fetched from.
++ //
++ // Safe to modify: r0,r2-r5, (lr)
++
++splitmode_dsi:
++ mfdsisr r3 // DBAT/page protection violation?
++ rlwinm. r0,r3,0,4,4
++ beqlr- // If not, it does not concern us
++
++ mfdar r2 // Normal page protected exception?
++ lwz r4,K_SPLIT_NIP_SEGMENT(r1) // the instruction segment?
++ rlwinm r5,r2,0,0,3 // data segment
++ cmpw r4,r5
++ bnelr // exit - not in the instruction segment
++
++ // splitmode write, r2=dar
++ mfsrin r0,r2 // r0 = old segment register
++ stw r6,xNIP(r1) // need one additional reg
++ rlwinm r4,r2,4+2,26,29 // sr_offset = sr_num * 4
++ lwz r5,K_SR_DATA(r1)
++ lwzx r4,r4,r5
++ stw r0,K_TMP_SCRATCH0(r1) // save old segment register
++ oris r4,r4,0x4000 // set supervisor key bit (Ks)
++ mtsrin r4,r2
++
++ rlwinm. r3,r3,0,6,6 // cr[eq] set if this a read
++ lwz r3,xINST_OPCODE(r1)
++ LI_PHYS( R4, secint_splitm ) // r4 = secint handler
++
++ // handle simple stores r2=seg#, r3=opcode, r4=secint
++ beq splitm_load
++
++ rlwinm r5,r3,6+5+3,(0x1f<<3) // r5 = rS << 3
++ EMU_LOAD_GPR r5, /**/ R6 // r0 = value
++ mtlr r4 // secint handler
++ SM_SET_MSR_DR /**/ r5
++
++ rlwinm r6,r3,6,0x3e // primary opcode & ~1
++ mtcrf 0x40,r3 // cr[5] = update bit (if opcode != 31)
++ cmpwi r6,30 // 31 & ~1
++ beq- splitm_store_op31
++ cmpwi r6,36 // 36, stw/stwu
++ beq splitm_stw
++ cmpwi r6,38 // 38, stb/stbu
++ beq splitm_stb
++ cmpwi r6,44 // 44, stb/stbu
++ beq splitm_sth
++ b splitm_fallback
++splitm_store_op31:
++ rlwinm. r6,r3,0,(32<<1) // update form?
++ rlwinm r6,r3,32-1,22,31 // secondary opcode
++ rlwinm r6,r6,0,~32 // clear update bit
++ crnot 5,eq
++ cmpwi r6,151 // stwx/stwxu
++ beq+ splitm_stw
++ cmpwi r6,215 // stbx / stbxu
++ beq- splitm_stb
++ cmpwi r6,407 // sthx / sthxu
++ beq- splitm_sth
++ rlwinm r6,r3,32-1,22,31 // secondary opcode
++ cmpwi r6,150 // stwcx.
++ beq- splitm_stwcx
++ cmpwi r6,1014 // 1014, dcbz
++ beq- splitm_dcbz
++ cmpwi r6,662 // 662, stwbrx
++ beq- splitm_stwbrx
++ b splitm_fallback
++
++splitm_load:
++ mtlr r4 // secint handler
++ SM_SET_MSR_DR /**/ r5
++ rlwinm r6,r3,6,0x3e // primary opcode & ~1
++ mtcrf 0x40,r3 // cr[5] = update bit (if opcode != 31)
++ cmpwi r6,30 // 31 & ~1
++ beq- splitm_load_op31
++ cmpwi r6,32 // 32, lwz/lwzu
++ beq+ splitm_lwz
++ cmpwi r6,34 // 34, lbz/lbzu
++ beq- splitm_lbz
++ cmpwi r6,40 // 40, lhz/lhzu
++ beq- splitm_lhz
++ b splitm_fallback
++splitm_load_op31:
++ rlwinm. r6,r3,0,(32<<1) // update form?
++ rlwinm r6,r3,32-1,22,31 // secondary opcode
++ rlwinm r6,r6,0,~32 // clear update bit
++ crnot 5,eq
++ cmpwi r6,23 // 23, lwzx/lwzux
++ beq+ splitm_lwz
++ cmpwi r6,87 // 87, lbzx/lbzux
++ beq- splitm_lbz
++ cmpwi r6,279 // 279, lhzx/lhzux
++ beq- splitm_lhz
++ rlwinm r6,r3,32-1,22,31 // secondary opcode
++ crclr 5
++ cmpwi r6,20 // 20, lwarx
++ beq- splitm_lwarx
++ cmpwi r6,86 // 86, dcbf
++ beq- splitm_dcbf
++ cmpwi r6,982 // 982, icbi
++ beq- splitm_icbi
++ cmpwi r6,534 // 534, lwbrx
++ beq- splitm_lwbrx
++ b splitm_fallback
++
++
++ // r0=value, r2=ea, r3=opcode
++splitm_stwcx:
++ stwcx. r0,0,r2
++ SM_CLEAR_MSR_DR /**/ r0
++ lwz r6,xCR(r1)
++ mfcr r0
++ rlwimi r6,r0,0,0,3
++ stw r6,xCR(r1)
++ b splitm_done2
++splitm_sth:
++ sth r0,0(r2)
++ b splitm_store_continue
++splitm_stb:
++ stb r0,0(r2)
++ b splitm_store_continue
++splitm_stw:
++ stw r0,0(r2)
++ b splitm_store_continue
++splitm_lwz:
++ lwz r0,0(r2)
++ b splitm_load_continue
++splitm_lhz:
++ lhz r0,0(r2)
++ b splitm_load_continue
++splitm_lbz:
++ lbz r0,0(r2)
++ b splitm_load_continue
++splitm_lwarx:
++ lwarx r0,0,r2
++ b splitm_load_continue
++splitm_lwbrx:
++ lwbrx r0,0,r2
++ b splitm_load_continue
++splitm_dcbz:
++ dcbz 0,r2
++ b splitm_done
++splitm_icbi:
++ icbi 0,r2
++ b splitm_done
++splitm_dcbf:
++ dcbf 0,r2
++ b splitm_done
++splitm_stwbrx:
++ stwbrx r0,0,r2
++ b splitm_done
++
++splitm_load_continue:
++ SM_CLEAR_MSR_DR /**/ r4
++ BUMP("splitm_load")
++ rlwinm r4,r3,6+5+3,(0x1f<<3) // r5 = rS << 3
++ EMU_STORE_GPR r4, /**/ R6 // r0 = value
++ bf+ 5,splitm_done2 // update form?
++ b 1f
++
++splitm_store_continue:
++ SM_CLEAR_MSR_DR /**/ r0
++ BUMP("splitm_store")
++ bf+ 5,splitm_done2 // update form?
++1: rlwinm r3,r3,6+5+5+3,(31<<3)
++ mr r0,r2
++ EMU_STORE_GPR r3, /**/ R6 // r0 = value
++ b splitm_done2
++splitm_done:
++ SM_CLEAR_MSR_DR /**/ r0
++splitm_done2:
++ lwz r3,K_TMP_SCRATCH0(r1)
++ lwz r6,xNIP(r1) // restore NIP
++ mtsrin r3,r2
++ b emulation_done
++
++
++ // fallback, store and execute the instruction, r3=opcode
++splitm_fallback:
++ SM_CLEAR_MSR_DR /**/ r0
++#if 0
++ stw r3,xDEBUG1(r1)
++ stw r6,xDEBUG0(r1)
++ lwz r6,xNIP(r1)
++ DEBUGGER_SAVE(0x1111)
++#endif
++ SPLITMODE_SMP_LOCK /**/ r0,r2
++
++ BUMP("splitm_fallback")
++ bl secint_splitm_fallback // set secondary exception handler
++
++ LI_PHYS( R2,split_store_patch ) // r2 = addr of split_store_patch
++ stw r3,0(r2) // store instruction
++ dcbst 0,r2
++ sync
++ icbi 0,r2
++ sync // 74xx needs this
++
++ mtsrr0 r2 // The simplest thing is to do an RFI
++ LOADI r3,(MSR_EE | MSR_PR | MSR_IR | MSR_SE | MSR_BE)
++ andc r4,r7,r3 // Clear msr bits (r7=srr1)
++ xGPR_LOAD R6
++ xGPR_LOAD R7
++ mtsrr1 r4
++ xGPR_LOAD_RANGE R2,R5,r1 // Restore registers (except r1)
++ xGPR_LOAD R0
++ xGPR_LOAD R1
++ rfi
++
++split_store_patch:
++ nop
++
++ mtsprg_a0 r1 // restore MSR
++ li r1,MSR_ME
++ mtmsr r1
++ isync
++ mfsprg_a3 r1 // and stack pointer
++
++ xGPR_SAVE_RANGE R2,R7,r1
++ SPLITMODE_SMP_UNLOCK /**/ r3,r4
++
++ mfsprg_a0 r2 // r1 - to be saved
++ lwz r6,xNIP(r1) // restore r6,r7 and segment register
++ lwz r7,K_MSR(r1)
++ stw r0,xGPR0(r1)
++ stw r2,xGPR1(r1)
++ lwz r2,K_TMP_SCRATCH0(r1)
++ mtsrin r2,r6
++
++ GET_TICK_CNT( entry, "splitmode_dsi" )
++ b emulation_done
++
++
++ //////////////////////////////////////////////////////////////////////
++ // secint_splitm / secint_splitm_fallback
++ // r1: stack (sprg1 = old r1)
++ // r3: vector index (sprg0 = old r3)
++ // srr0/srr1: kernel nip/msr
++ //
++ // xGPR(0-5) are valid (unless this is a trace exception)
++
++secint_splitm_fallback:
++ blrl
++ SPLITMODE_SMP_UNLOCK /**/ R2,R4
++
++secint_splitm:
++ lwz r6,xNIP(r1) // Restore nip/msr
++ lwz r7,K_MSR(r1)
++
++ cmpwi r3,0x300 // ** DSI **
++ bne- 1f
++ mfsrin r2,r6 // r6 = NIP
++ rlwinm r2,r2,0,2,0 // Clear Ks [bit1] (supervisor key bit)
++ mtsrin r2,r6
++ bl save_middle_regs // Note: If dsi_cont ever returns immediately,
++ bl check_io_page // we will need to fix the segment registers before
++ b dsi_cont // the last dsi_cont branch.
++
++1: lwz r2,K_TMP_SCRATCH0(r1) // We might return immediately...
++ mtsrin r2,r6
++
++ cmpwi r3,0x600 // ** Alignment **
++ bne 2f
++ bl save_middle_regs
++ b alignment_cont
++
++2: cmpwi r3,0x800 // ** FPU Unavailable **
++ beq fpu_cont
++ cmpwi r3,0xf20 // ** AltiVec Unavailable **
++ beq altivec_cont
++
++ DEBUGGER_SAVE( 0x5918 ) // ERROR...
++
++
++ ////////////////////////////////////////////////////////////////////////
++ // invalidate_splitmode( kernel_vars_t *kv )
++ //
++ // This function must be called whenever the segment registers are
++ // modified. A flag is set which will force a refresh of the slitmode
++ // segment registers (at mac context switch in). We could rewrite this
++ // in C but it might be better to keep things centralized.
++
++GLOBAL_SYMBOL(r__invalidate_splitmode_sr):
++ // this will have no effect if fb_InSplitmode is not set
++ lwz r4,xFLAG_BITS(r3)
++ ori r4,r4,fb_PrepareSplitmode | fb_LoadSegreg
++ stw r4,xFLAG_BITS(r3)
++ blr
++
++#ifdef CONFIG_SMP
++splitmode_lock:
++ .long 0
++#endif
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/traps.S
+@@ -0,0 +1,501 @@
++/*
++ * Creation Date: <2001/01/27 16:25:14 samuel>
++ * Time-stamp: <2004/03/07 21:58:48 samuel>
++ *
++ * <traps.S>
++ *
++ * Exception Vectors
++ *
++ * Copyright (C) 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#define DBG_TRACE 0 // enable TRACE macro?
++
++#include "archinclude.h"
++#include "processor.h" /* avoid <asm/processor.h> (bogus SPRN_TBWU/L) */
++#include "asm_offsets.h"
++#include "asmdefs.h"
++#include "molasm.h"
++#include "asmdbg.h"
++#include "actions.h"
++#include "vector.h"
++#include "rvec.h"
++#include "constants.h"
++#include "mac_registers.h"
++#include "mmu.h"
++#include "osi.h"
++#include "kernel_vars.h"
++
++
++/************************************************************************/
++/* Exception Vector Definitions */
++/************************************************************************/
++
++ ACTIONS_SECTION
++GLOBAL_SYMBOL(r__actions_section):
++ ACTIONS_OFFS_SECTION
++GLOBAL_SYMBOL(r__actions_offs_section):
++
++ .text
++GLOBAL_SYMBOL(r__reloctable_start):
++
++MACRO_0(EXCEPTION_PREAMBLE, [
++ // SPRG1 = r1, SPRG0 = r3, r3=CR, r1=MOL_STACK
++ __ZERO_TICK_CNT(entry)
++ stw r4,xGPR4(r1) // store instruction are store serializing
++ lwz r4,xFLAG_BITS(r1)
++ stw r5,xGPR5(r1) // and mix very badly with execution serializing
++ stw r6,xGPR6(r1) // instructions (like mfspr)
++ mfsrr0 r6
++ stw r7,xGPR7(r1)
++ mfsrr1 r7
++ stw r0,xGPR0(r1)
++ mtcr r4
++ stw r2,xGPR2(r1)
++ mflr r2
++ stw r3,xCR(r1)
++ mfsprg_a1 r5 // SPRG1 = r1
++ stw r2,xLINK(r1)
++ mfsprg_a0 r3 // SPRG0 = r3
++ stw r5,xGPR1(r1)
++ stw r3,xGPR3(r1)
++
++ // saved: r0-r7, cr, lr
++ // r1 = stack, cr5-7=flag_bits, r6/r7 = nip/msr
++])
++
++ balign_32
++save_middle_regs: // save r8-r12 and nip (r13-r31 should always be in regs)
++ stw r6,xNIP(r1)
++ mfxer r7
++ mfctr r6
++ stw r8,xGPR8(r1)
++ stw r9,xGPR9(r1)
++ stw r10,xGPR10(r1)
++ stw r11,xGPR11(r1)
++ stw r12,xGPR12(r1)
++ stw r6,xCTR(r1)
++ stw r7,xXER(r1)
++ bt- FBIT_FPUInUse,giveup_fpu // modifies r7/r8
++ blr
++
++MACRO_0(RESTORE_MIDDLE_REGS, [ // reverse save_middle_regs, restores registers
++ lwz r11,xCTR(r1) // r8-r12, ctr, xer, r6/r7 = nip/msr
++ lwz r12,xXER(r1)
++ lwz r8,xGPR8(r1)
++ lwz r9,xGPR9(r1)
++ mtctr r11
++ lwz r10,xGPR10(r1)
++ lwz r11,xGPR11(r1)
++ mtxer r12
++ lwz r12,xGPR12(r1)
++ lwz r6,xNIP(r1)
++ lwz r7,K_MSR(r1)
++])
++
++#define EXCEPTION_SAVE_ALL \
++ EXCEPTION_PREAMBLE ;\
++ bl save_middle_regs ;
++
++#define VECTOR_KERNEL( v, dummy_name, secint ) \
++ VECTOR( v, dummy_name, secint ) ;\
++ EXCEPTION_SAVE_ALL ;\
++ TAKE_EXCEPTION( v ) ;
++
++#define VECTOR_RESERVED( v, dummy_name, secint ) \
++ VECTOR( v, dummy_name, secint ) ;\
++ DEBUGGER_SAVE( v ) ;
++
++#define MAC_EXIT( rvec_code ) \
++ li r3,rvec_code ;\
++ b mac_exit ;
++
++#define MAC_EXIT_SAVE( rvec_code ) \
++ bl save_middle_regs ;\
++ li r3,rvec_code ;\
++ b mac_exit ;
++
++#define MAC_TRAP( trap_num ) \
++ li r2,trap_num ;\
++ b mac_trap ;
++
++#define DEBUGGER(n) li r4,n ; MAC_EXIT( RVEC_DEBUGGER );
++#define DEBUGGER_SAVE(n) li r4,n ; MAC_EXIT_SAVE( RVEC_DEBUGGER );
++
++
++/************************************************************************/
++/* Misc macros */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////////
++ // _get_instr_opcode
++ // r6 nip
++ // ret: r4 opcode
++ //
++ // Modifies: r2,r3.
++ // Side-effects: Might return from the exception
++
++MACRO(GET_INSTR_OPCODE, [dummy], [
++ mfmsr r3 // r3 = exception MSR
++ ori r2,r3,MSR_DR
++ bl 8f
++
++ // secondary interrupt entry (we _know_ this must be a DSI)
++#ifdef CONFIG_SMP
++ LOADI r5,EXTERN(compat_hash_table_lock)
++ li r4,1
++ tophys r5,r5
++7: lwarx r0,0,r5 // lock
++ cmpwi r0,0
++ bne- 7b
++ stwcx. r4,0,r5
++ bne- 7b
++ isync
++ tlbie r6
++ eieio
++ tlbsync
++ sync
++ stw r0,0(r5) // release lock
++#else
++ tlbie r6 // flush PTE from ITLB
++#endif
++ b exception_return
++8:
++ mtmsr r2
++ isync
++
++ lwz r4,0(r6) // get opcode
++
++ mtmsr r3 // restore exception MSR
++ isync
++])
++
++/************************************************************************/
++/* Reserved / Kernel Vectors */
++/************************************************************************/
++
++VECTOR_KERNEL( 0x100, "System Reset", secint_bad )
++VECTOR_KERNEL( 0x500, "External Interrupt", secint_bad )
++VECTOR_KERNEL( 0x1400, "System Management Interrupt", secint_bad )
++VECTOR_KERNEL( 0x1700, "Thermal Management Interrupt", secint_bad )
++
++VECTOR_RESERVED( 0x200, "Machine Check", secint_bad )
++VECTOR_RESERVED( 0xa00, "Reserved", secint_bad )
++VECTOR_RESERVED( 0xb00, "Reserved", secint_bad )
++VECTOR_RESERVED( 0xe00, "FPU Assist", secint_bad )
++
++PERFMON_VECTOR_RELOCATION( PERFMON_VECTOR )
++VECTOR_RESERVED( PERFMON_VECTOR, "Performance Monitor Interrupt", secint_bad )
++
++//VECTOR_RESERVED( 0x1000, "InstructionTLBMiss-603", secint_bad )
++//VECTOR_RESERVED( 0x1100, "DataLoadTLBMiss-603", secint_bad )
++//VECTOR_RESERVED( 0x1200, "DataLoadTLBMiss-603", secint_bad )
++
++/************************************************************************/
++/* DSI Exceptions */
++/************************************************************************/
++
++VECTOR( 0x300, "DSI", secint_lr_call )
++ EXCEPTION_PREAMBLE
++ TRACE(0x300, "DSI")
++
++ GET_INSTR_OPCODE // m: r2-r3, ret: r4=opcode, r6=nip
++ stw r4,xINST_OPCODE(r1)
++ bl check_pthash_hit // m: r0,r2-r5
++ btl- FBIT_InSplitmode,splitmode_dsi
++ bl save_middle_regs
++ bl check_io_page
++
++dsi_cont:
++ LOADI r3,EXTERN(dsi_exception)
++ mfdar r4 // We might need to do this earlier
++ mfdsisr r5 // when the splitmode code is activated...
++ b call_kernel
++
++
++/************************************************************************/
++/* ISI Exceptions */
++/************************************************************************/
++
++VECTOR( 0x400, "ISI", secint_bad )
++ EXCEPTION_PREAMBLE
++ TRACE(0x400, "ISI")
++
++ // emuaccel engine
++ rlwinm. r0,r7,0,4,4 // protection violation
++ beq- 1f
++ lwz r3,K_EMUACCEL_MPHYS(r1)
++ rlwinm r4,r6,0,~0x0fff // nip page
++ rlwinm r2,r6,0,0x0ff8 // nip offset (dword align)
++ lwz r5,K_EMUACCEL_PAGE_PHYS(r1)
++ cmpw r3,r4
++ bne- 1f
++ lwzux r0,r2,r5 // r0 = handler
++ mtlr r0
++ lwz r6,4(r2) // address of next instruction
++ blr // parameters: r2 == emuaccel_slot
++1:
++ btl- FBIT_InSplitmode, split_sr_no_execute
++ bl save_middle_regs
++
++ LOADI r3,EXTERN(isi_exception)
++ mfsrr0 r4
++ mfsrr1 r5
++ b call_kernel
++
++
++/************************************************************************/
++/* Alignement Exception */
++/************************************************************************/
++
++VECTOR( 0x600, "Alignment", secint_lr_call )
++ EXCEPTION_SAVE_ALL
++ TRACE(0x400, "Alignment")
++
++alignment_cont:
++ mfdar r4
++ mfdsisr r5
++ MAC_EXIT( RVEC_ALIGNMENT_TRAP )
++
++
++/************************************************************************/
++/* FPU Unavailable Exception */
++/************************************************************************/
++
++ // xFPU_STATE has only meaning when FBIT_FPUInUse is not set
++ //
++ // FPU_STATE_DIRTY - fr13 & fpscr are not loaded (everything else is).
++ // FPU_STATE_HALF_SAVED - fr14-fr31 are loaded.
++ // FPU_STATE_SAVED - fr14-fr31 are loaded (but also saved in mregs).
++ //
++ // FPU_STATE_DIRTY in the *emulator* means that all floating point
++ // registers *EXCEPT* fr13 and fpscr are valid.
++ //
++ // Implementation note: When we do not own the fpu, the MSR_FEx bits
++ // must be cleared. Otherwise we might experience bogus FPU exceptions.
++ //
++ // MOL will never throw FPU exceptions went the FP bit is off. This
++ // is a small violation of the standard but the alternative would be
++ // always loading FPSCR (which requires FPU ownership...)
++ //
++
++VECTOR( 0x800, "FPU Unavailable", secint_lr_call )
++ EXCEPTION_PREAMBLE
++ TRACE(0x800, "FPU Unavailable")
++fpu_cont:
++
++ lwz r2,xMSR(r1) // r2 = xMSR (used below too)
++ andi. r4,r2,MSR_FP
++ beq- mac_fpu_unavailable // mac trap?
++ bt FBIT_FPUInUse,2f // FPU ready for use?
++
++ lwz r3,K_EMULATOR_MSR(r1) // FPU owned by our userland process?
++ andi. r4,r3,MSR_FP
++ bne+ 1f
++ MAC_EXIT_SAVE( RVEC_ENABLE_FPU ) // No... grab FPU in userspace
++
++mac_fpu_unavailable:
++ BUMP( "mac-fpu-trap" )
++ MAC_TRAP( 0x800 )
++
++ // userland process owns FPU
++1: BUMP( "enable-fpu" )
++ ENABLE_MSR_FP /**/ r4 // enable kernel FPU
++ // flag the fpu dirty
++ lwz r3,xFPU_STATE(r1)
++ lfd fr13,xFPSCR-4(r1) // fp13 and fpscr are *ALWAYS* saved
++ crset FBIT_FPUInUse // we own the FPU now
++ cmpwi r3,FPU_STATE_HALF_SAVED
++ mtfsf 0xff,fr13
++ lfd fr13,xFPR13(r1)
++ bne 2f
++ xLOAD_LOW_FPU r1 // load fr0-fr12
++2:
++ li r3,MSR_FP| MSR_FE0 | MSR_FE1 // FPU bits
++ lwz r7,K_MSR(r1) // enable MSR_FP
++ and r2,r2,r3 // r2 = (xMSR & MSR_FEx)
++ andc r7,r7,r3 // K_MSR &= ~MSR_FEx
++ or r7,r7,r2 // K_MSR |= (xMSR & MSR_FEx)
++ stw r7,K_MSR(r1)
++ GET_TICK_CNT(entry,"enable_fpu")
++ b exception_return
++
++
++/************************************************************************/
++/* Decrementer Exception */
++/************************************************************************/
++
++// The 0x900 decrementer vector is in dec.S
++
++/************************************************************************/
++/* System Call Exception */
++/************************************************************************/
++
++VECTOR( 0xc00, "System Call", secint_bad )
++ EXCEPTION_PREAMBLE
++ TRACE( 0xc00, "System Call")
++
++ lwz r3,xGPR3(r1)
++ LOADI r5,OSI_SC_MAGIC_R3
++ lwz r4,xGPR4(r1)
++ LOADI r2,OSI_SC_MAGIC_R4
++ cmpw cr1,r3,r5
++ cmpw cr0,r4,r2
++ crand eq,eq,cr1_eq
++ beq+ 2f
++
++ MAC_TRAP(0xc00) // r7 reason bits used (zero)
++2:
++ MAC_EXIT_SAVE( RVEC_OSI_SYSCALL )
++
++
++/************************************************************************/
++/* Trace Exception */
++/************************************************************************/
++
++VECTOR( 0xd00, "Trace", secint_bad )
++trace_vector:
++ EXCEPTION_PREAMBLE
++ TRACE(0xd00, "Trace")
++
++ MAC_EXIT_SAVE( RVEC_TRACE_TRAP );
++
++
++/************************************************************************/
++/* AltiVec Exception */
++/************************************************************************/
++
++VECTOR( 0xf20, "AltiVec", secint_lr_call )
++ EXCEPTION_PREAMBLE
++ TRACE(0xf20, "AltiVec")
++altivec_cont:
++
++ lwz r4,xNO_ALTIVEC(r1) // AltiVec support disabled?
++ cmpwi r4,0
++ bne- mac_altivec_unavailable
++
++ lwz r2,xMSR(r1)
++ rlwinm. r4,r2,0,6,6 // bit 6 = MSR_VEC
++ beq- mac_altivec_unavailable
++
++ lwz r3,K_EMULATOR_MSR(r1)
++ rlwinm. r4,r3,0,6,6 // bit 6 = MSR_VEC
++ bne+ enable_altivec
++ MAC_EXIT_SAVE( RVEC_ENABLE_ALTIVEC )
++
++mac_altivec_unavailable:
++ MAC_EXIT_SAVE( RVEC_ALTIVEC_UNAVAIL_TRAP )
++
++enable_altivec:
++ // We don't need to load any registers since the emulator
++ // won't touch the altivec unit (at least for now).
++
++ lwz r7,K_MSR(r1)
++ oris r7,r7,HI(MSR_VEC)
++ stw r7,K_MSR(r1)
++ b exception_return
++
++
++VECTOR( 0x1600, "AltiVec Assist", secint_bad )
++ EXCEPTION_SAVE_ALL
++ TRACE(0x1600, "AltiVec Assist")
++
++ mr r4,r7
++ MAC_EXIT( RVEC_ALTIVEC_ASSIST ) // r4 = srr1
++
++
++/************************************************************************/
++/* Instruction Breakpoint */
++/************************************************************************/
++
++VECTOR( 0x1300, "Instruction Breakpoint", secint_bad )
++ EXCEPTION_SAVE_ALL
++ TRACE(0x1300, "IABR")
++
++ DEBUGGER(0x1300)
++
++
++/************************************************************************/
++/* RunMode-601 (trace) */
++/************************************************************************/
++
++VECTOR( 0x2000, "RunMode-601", secint_bad )
++ b trace_vector
++
++
++/************************************************************************/
++/* Secondary Interrupt Handlers */
++/************************************************************************/
++
++ //////////////////////////////////////////////////////////////////////
++ // secint_xxx
++ //
++ // r1: stack (sprg1 = old r1)
++ // r3: vector addr (sprg0 = old r3)
++ // srr0/srr1: kernel nip/msr
++ //
++ // secint_lr_call:
++ // lr secondary interrupt handler
++
++secint_bad:
++ TRACE(0xbad, "secint_bad")
++ mr r4,r3
++ MAC_EXIT( RVEC_INTERNAL_ERROR )
++
++secint_lr_call:
++ blrl
++ li r4,0x6666
++ MAC_EXIT( RVEC_INTERNAL_ERROR )
++
++
++/**************************************************************
++* Includes
++**************************************************************/
++
++// We need to be sure this code is contiguous, the simplest/safest
++// method is using only a single file. This will also effectively
++// reduce the size of the relocation table.
++
++#ifdef __darwin__
++#include "darwin.S"
++#endif
++#include "entry.S"
++#include "dec.S"
++#include "emulation.S"
++#include "emuaccel.S"
++#include "iopage.S"
++#include "splitmode.S"
++#include "ptintercept.S"
++#include "vsid.S"
++#ifdef __MPC107__
++#include "./mpc107/mpc107.S"
++#else
++#ifdef __linux__
++#include "linux.S"
++#include "603.S"
++#endif
++#endif
++
++#ifdef __linux__
++ .text 50
++#endif
++GLOBAL_SYMBOL(r__reloctable_end):
++
++ ACTIONS_OFFS_SECTION
++GLOBAL_SYMBOL(r__actions_offs_section_end):
++
++// The BUMP("counter") macro adds entries to text subsection 90.
++// This adds labels before the counter entries.
++
++#ifdef __linux__
++ .text 89
++GLOBAL_SYMBOL(__start_bumptable):
++ .text 91
++GLOBAL_SYMBOL(__end_bumptable):
++ .text
++#endif
+--- /dev/null
++++ b/drivers/macintosh/mol/asm-files/vsid.S
+@@ -0,0 +1,123 @@
++/*
++ * Creation Date: <2003/03/06 22:03:59 samuel>
++ * Time-stamp: <2004/02/21 16:30:45 samuel>
++ *
++ * <vsid.S>
++ *
++ * VSID lookup (skiplist search)
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "mtable.h"
++
++#if (SKIPLIST_LEVELSIZE == 4)
++#define SLEVEL_SHIFT 2
++#endif
++#if (SKIPLIST_LEVELSIZE == 8)
++#define SLEVEL_SHIFT 3
++#endif
++
++emulate_mtsrin:
++ rlwinm r2,r4,32-8,24,28 // r2 = #B << 8
++ EMU_LOAD_GPR r2, /**/ R3 // r0 = reg B
++ rlwinm r4,r0,0,0,3 // #sr top 4 bits of r4
++ b 1f
++emulate_mtsr:
++ rlwinm r4,r4,12,0,3 // #sr in top 4 bits of r4
++1: EMU_LOAD_GPR r5, /**/ R2 // r0 = new segr value
++
++ stw r8,xGPR8(r1) // save r8
++ rlwinm r8,r4,6,26,29 // r8 = #sr << 2
++ stw r9,xGPR9(r1) // save r9
++ add r8,r8,r1 // r8 = r1 + sr_offset
++ stw r6,xNIP(r1) // save r6 (need more registers)
++ rlwinm r9,r0,0,0,3 // r9 = [T Ks Kp N]
++ stw r0,xSEGR_BASE(r8) // store new value
++ rlwinm r0,r0,0,8,31 // mask VSID
++
++ /******* vsid lookup (skiplist search) *******/
++
++ lwz r2,K_VSID_SL_SLEVEL(r1) // n = search level (0..15)
++ rlwinm r2,r2,SLEVEL_SHIFT,0,29 // n *= 4 (or n *= 8 on darwin)
++
++ addi r3,r1,K_VSID_SL_ROOT_ELEM // p = root element
++1: add r5,r2,r3 //
++2: lwz r3,SKIPLIST_NEXT(r5) // r3 = p->next[n]
++#ifdef __linux__
++ tophys R3,R3
++#endif
++ lwz r4,SKIPLIST_KEY(r3) // r0 = p->next[n].key
++ cmpw cr1,r4,r0
++ bge- cr1,3f
++ b 1b
++3:
++ addi r2,r2,-SKIPLIST_LEVELSIZE // r2 -= 4|8
++ beq- cr1,vsid_found // jump if key matches
++ cmpwi r2,0
++ addi r5,r5,-SKIPLIST_LEVELSIZE // r5 -= 4|8
++ bge+ 2b
++
++ /******* vsid missing *******/
++ BUMP("vsid_missing")
++ // r8 used
++ lwz r2,K_ILLEGAL_SR(r1) // vsid unallocated...
++ li r4,0 // no entry...
++ stw r2,K_SV_SR_BASE(r8)
++ stw r2,K_USER_SR_BASE(r8)
++ stw r2,K_SPLIT_SR_BASE(r8) // this way we can forget the splitmode case
++ stw r4,K_VSID_ENT_BASE(r8)
++ sub r5,r8,r1 // r5 = VSID offset
++ lwz r0,K_CUR_SR_BASE(r1) // r0 = cur_sr_table
++ b 6f
++
++ /******* vsid found *******/
++vsid_found:
++
++ BUMP("vsid_found")
++ // match, r3 points to the skiplist element
++ addi r4,r3,-SIZEOF_VSID_ENT // sizeof(vsid_ent_t)
++ lwz r2,VSID_USER_OFFS(r4) // vsid_user
++ lwz r3,VSID_SV_OFFS(r4) // vsid_sv
++#ifdef __linux__
++ tovirt R4,R4 // r4 = vsid_element
++#else
++ lwz r4,VSID_MYSELF_VIRT(r4)
++#endif
++ rlwinm r6,r9,2,31,31 // r6 = Ks ? 1:0
++ stw r4,K_VSID_ENT_BASE(r8) // store vsid entry (possibly NULL)
++ rlwinm r4,r9,3,31,31 // r4 = Kp ? 1:0
++ rlwimi r2,r9,0,3,3 // copy the N-bit (no-execute)
++ rlwimi r3,r9,0,3,3 // copy the N-bit (no-execute)
++ cror FBIT_LoadSegreg, FBIT_LoadSegreg, FBIT_InSplitmode
++ addi r6,r6,-1 // r6 = Ks-mask
++ addi r4,r4,-1 // r4 = Kp-mask
++ andc r0,r2,r6 // Kp == 1 case
++ and r6,r3,r6 // Kp == 0 case
++ crmove FBIT_PrepareSplitmode, FBIT_InSplitmode
++ andc r9,r2,r4 // Ks == 1 case
++ or r6,r6,r0
++ lwz r0,K_CUR_SR_BASE(r1) // r0 = cur_sr_table
++ and r4,r3,r4 // Ks == 0 case
++ stw r6,K_SV_SR_BASE(r8) // sv_sr set
++ or r4,r4,r9
++ sub r5,r8,r1 // r5 = VSID offset
++ stw r4,K_USER_SR_BASE(r8) // user_sr set
++6:
++ // r0,r5 used
++ rlwinm r2,r5,32-6,0,3 // r2 = sr# in bits 0-3
++ lwzx r3,r5,r0 // r3 = sr to be loaded into sr#
++ mtsrin r3,r2 // update segment register
++
++ lwz r6,xNIP(r1) // restore r6
++ lwz r8,xGPR8(r1) // restore r8
++ lwz r9,xGPR9(r1) // restore r9
++ GET_TICK_CNT(entry, "mtsr")
++ BUMP("mtsr_")
++ b emulation_done
++
+--- /dev/null
++++ b/drivers/macintosh/mol/asm_offsets.c
+@@ -0,0 +1,161 @@
++/*
++ * This program is used to generate definitions needed by
++ * some assembly functions.
++ *
++ * We use the technique used in the OSF Mach kernel code:
++ * generate asm statements containing #defines,
++ * compile this file to assembler, and then extract the
++ * #defines from the assembly-language output.
++ */
++
++#ifdef __KERNEL__
++#include "archinclude.h"
++#include "kernel_vars.h"
++#include "mmu.h"
++#else
++#include "mol_config.h"
++#include <stddef.h>
++#include "mac_registers.h"
++#endif
++
++#include "processor.h"
++
++#define DEFINE(sym, val) \
++ asm volatile("\n#define\t" #sym "\t%0" : : "i" (val))
++
++#define K_DEF(sym, val ) \
++ DEFINE(sym, offsetof(kernel_vars_t, val ))
++
++#define ST_DEF(sym, val ) \
++ DEFINE(sym, offsetof(session_table_t, val ))
++
++#define M_DEF(sym, val ) \
++ DEFINE(sym, XOFFS + offsetof(mac_regs_t, val ))
++
++#define IO_DEF(sym, val) \
++ DEFINE(sym, offsetof(struct io_page, val ))
++
++int main( void )
++{
++#ifdef __KERNEL__
++ #define XOFFS offsetof(kernel_vars_t, mregs)
++#else
++ #define XOFFS 0
++#endif
++ /* --- mac_regs offsets --- */
++
++ M_DEF( xVEC_BASE, vec[0] );
++ M_DEF( xVEC0, vec[0] );
++ M_DEF( xVEC1, vec[1] );
++ M_DEF( xVEC2, vec[2] );
++ M_DEF( xVSCR, vscr );
++ M_DEF( xVRSAVE, spr[S_VRSAVE] );
++
++ M_DEF( xGPR_BASE, gpr[0] );
++ M_DEF( xGPR0, gpr[0] );
++ M_DEF( xGPR1, gpr[1] );
++ M_DEF( xGPR2, gpr[2] );
++ M_DEF( xGPR3, gpr[3] );
++ M_DEF( xGPR4, gpr[4] );
++ M_DEF( xGPR5, gpr[5] );
++ M_DEF( xGPR6, gpr[6] );
++ M_DEF( xGPR7, gpr[7] );
++ M_DEF( xGPR8, gpr[8] );
++ M_DEF( xGPR9, gpr[9] );
++ M_DEF( xGPR10, gpr[10] );
++ M_DEF( xGPR11, gpr[11] );
++ M_DEF( xGPR12, gpr[12] );
++ M_DEF( xGPR13, gpr[13] );
++ M_DEF( xGPR14, gpr[14] );
++ M_DEF( xGPR15, gpr[15] );
++ M_DEF( xGPR16, gpr[16] );
++ M_DEF( xGPR17, gpr[17] );
++ M_DEF( xGPR18, gpr[18] );
++ M_DEF( xGPR19, gpr[19] );
++ M_DEF( xGPR20, gpr[20] );
++ M_DEF( xGPR21, gpr[21] );
++ M_DEF( xGPR22, gpr[22] );
++ M_DEF( xGPR23, gpr[23] );
++ M_DEF( xGPR24, gpr[24] );
++ M_DEF( xGPR25, gpr[25] );
++ M_DEF( xGPR26, gpr[26] );
++ M_DEF( xGPR27, gpr[27] );
++ M_DEF( xGPR28, gpr[28] );
++ M_DEF( xGPR29, gpr[29] );
++ M_DEF( xGPR30, gpr[30] );
++ M_DEF( xGPR31, gpr[31] );
++
++ M_DEF( xNIP, nip);
++ M_DEF( xCR, cr);
++ M_DEF( xFPR_BASE, fpr[0]);
++ M_DEF( xFPR13, fpr[13]);
++ M_DEF( xFPSCR, fpscr );
++ M_DEF( xEMULATOR_FPSCR, emulator_fpscr );
++ M_DEF( xFPU_STATE, fpu_state );
++
++ M_DEF( xLINK, link);
++ M_DEF( xXER, xer);
++ M_DEF( xCTR, ctr);
++ M_DEF( xFLAG_BITS, flag_bits );
++ M_DEF( xDEC, spr[S_DEC]);
++ M_DEF( xDEC_STAMP, dec_stamp);
++ M_DEF( xTIMER_STAMP, timer_stamp);
++ M_DEF( xMSR, msr);
++ M_DEF( xSPR_BASE, spr[0]);
++
++ M_DEF( xHID0, spr[S_HID0]);
++
++ M_DEF( xSRR0, spr[S_SRR0]);
++ M_DEF( xSRR1, spr[S_SRR1]);
++
++ M_DEF( xSPRG0, spr[S_SPRG0]);
++ M_DEF( xSPRG1, spr[S_SPRG1]);
++ M_DEF( xSPRG2, spr[S_SPRG2]);
++ M_DEF( xSPRG3, spr[S_SPRG3]);
++
++ M_DEF( xSEGR_BASE, segr[0]);
++ M_DEF( xIBAT_BASE, spr[S_IBAT0U] );
++ M_DEF( xSDR1, spr[S_SDR1] );
++
++ M_DEF( xINST_OPCODE, inst_opcode );
++ M_DEF( xALTIVEC_USED, altivec_used );
++ M_DEF( xNO_ALTIVEC, no_altivec );
++
++ M_DEF( xINTERRUPT, interrupt );
++ M_DEF( xIN_VIRTUAL_MODE, in_virtual_mode );
++
++ M_DEF( xRVEC_PARAM0, rvec_param[0] );
++ M_DEF( xRVEC_PARAM1, rvec_param[1] );
++ M_DEF( xRVEC_PARAM2, rvec_param[2] );
++
++#ifdef EMULATE_603
++ M_DEF( xGPRSAVE0_603, gprsave_603[0] );
++ M_DEF( xGPRSAVE1_603, gprsave_603[1] );
++ M_DEF( xGPRSAVE2_603, gprsave_603[2] );
++ M_DEF( xGPRSAVE3_603, gprsave_603[3] );
++#endif
++
++ M_DEF( xDEBUG0, debug[0] );
++ M_DEF( xDEBUG1, debug[1] );
++ M_DEF( xDEBUG2, debug[2] );
++ M_DEF( xDEBUG3, debug[3] );
++ M_DEF( xDEBUG4, debug[4] );
++ M_DEF( xDEBUG5, debug[5] );
++ M_DEF( xDEBUG6, debug[6] );
++ M_DEF( xDEBUG7, debug[7] );
++ M_DEF( xDEBUG8, debug[8] );
++ M_DEF( xDEBUG9, debug[9] );
++
++ M_DEF( xDEBUG_SCR1, debug_scr1 );
++ M_DEF( xDEBUG_SCR2, debug_scr2 );
++ M_DEF( xDEBUG_TRACE, debug_trace );
++ M_DEF( xDBG_TRACE_SPACE, dbg_trace_space[0] );
++ M_DEF( xDBG_LAST_RVEC, dbg_last_rvec );
++
++ M_DEF( xKERNEL_DBG_STOP, kernel_dbg_stop );
++
++ M_DEF( xHOSTIRQ_ACTIVE_CNT, hostirq_active_cnt );
++
++ return 0;
++}
++
+--- /dev/null
++++ b/drivers/macintosh/mol/context.c
+@@ -0,0 +1,99 @@
++/*
++ * Creation Date: <1998-11-20 16:18:20 samuel>
++ * Time-stamp: <2004/02/28 19:16:44 samuel>
++ *
++ * <context.c>
++ *
++ * MMU context allocation
++ *
++ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "mmu.h"
++#include "mmu_contexts.h"
++#include "misc.h"
++#include "asmfuncs.h"
++#include "emu.h"
++#include "mtable.h"
++#include "performance.h"
++#include "context.h"
++#include "hash.h"
++
++#define MMU (kv->mmu)
++
++
++static int
++flush_all_PTEs( kernel_vars_t *kv )
++{
++ int i, count=0, npte=(ptehash.pte_mask + 8)/8;
++ ulong *pte, ea, v;
++
++ for( pte=ptehash.base, i=0; i<npte; i++, pte+=2 ) {
++ v = *pte;
++ if( !(v & BIT(0)) ) /* test V-bit */
++ continue;
++ v = (v & ~BIT(0)) >> 7;
++ v = (v - ((v & 0xf) * MUNGE_ESID_ADD)) * MUNGE_MUL_INVERSE;
++ v = (v>>4) & CTX_MASK;
++
++ if( v >= MMU.first_mol_context && v <= MMU.last_mol_context ) {
++ *pte = 0;
++ count++;
++ }
++ }
++
++ /* perform a tlbia */
++ for( ea=0; ea <= (0x3f << 12); ea += 0x1000 )
++ __tlbie( ea );
++
++ if( count )
++ printk("%d stale PTEs flushed (something is wrong)\n", count );
++ return count;
++}
++
++int
++init_contexts( kernel_vars_t *kv )
++{
++ MMU.first_mol_context = FIRST_MOL_CONTEXT( kv->session_index );
++ MMU.last_mol_context = LAST_MOL_CONTEXT( kv->session_index );
++ MMU.next_mol_context = MMU.first_mol_context;
++
++ MMU.illegal_sr = alloc_context(kv) | VSID_Kp | VSID_N;
++
++ flush_all_PTEs( kv );
++ return 0;
++}
++
++void
++cleanup_contexts( kernel_vars_t *kv )
++{
++ flush_all_PTEs( kv );
++}
++
++void
++handle_context_wrap( kernel_vars_t *kv, int n )
++{
++ if( MMU.next_mol_context + n > MMU.last_mol_context ) {
++ printk("MOL context wrap\n");
++
++ clear_all_vsids( kv );
++ init_contexts( kv );
++ }
++}
++
++int
++alloc_context( kernel_vars_t *kv )
++{
++ int mol_context = MMU.next_mol_context++;
++ int vsid = MUNGE_CONTEXT(mol_context >> 4);
++
++ vsid += MUNGE_ESID_ADD * (mol_context & 0xf);
++ return (vsid & VSID_MASK);
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/emu.c
+@@ -0,0 +1,228 @@
++/*
++ * Creation Date: <1998-11-21 16:07:47 samuel>
++ * Time-stamp: <2004/03/13 14:08:18 samuel>
++ *
++ * <emu.c>
++ *
++ * Emulation of some assembly instructions
++ *
++ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "mmu.h"
++#include "kernel_vars.h"
++#include "emu.h"
++#include "asmfuncs.h"
++#include "rvec.h"
++#include "processor.h"
++#include "mtable.h"
++#include "performance.h"
++#include "emuaccel_sh.h"
++#include "misc.h"
++#include "map.h"
++
++#define BAT_PERFORMANCE_HACK
++// #define DEBUG
++
++/* If BAT_PERFORMANCE_HACK is defined, PTEs corresponding to a mac bat
++ * mapping will not necessary be flushed when the bat registers are
++ * touched. This gives a huge performance gain in MacOS 9.1 (which
++ * clears the bat registers in the idle loop). Of course, this break
++ * compatibility (although most operating systems initializes the
++ * BATs once and for all).
++ */
++
++#ifdef BAT_PERFORMANCE_HACK
++ #define BAT_HACK(kv) (!MREGS.use_bat_hack || kv->mmu.bat_hack_count++ < 0x100)
++#else
++ #define BAT_HACK(kv) 1
++#endif
++
++#define MREGS (kv->mregs)
++#define MMU (kv->mmu)
++
++
++int
++do_mtsdr1( kernel_vars_t *kv, ulong value )
++{
++ ulong mbase, mask;
++ int s;
++
++ MREGS.spr[S_SDR1] = value;
++
++ /* the mask must be a valid one; we hade better make sure we are
++ * not tricked by a bogus sdr1 value
++ */
++ for( mask=BIT(23); mask && !(mask & value) ; mask=mask>>1 )
++ ;
++ mask = mask? ((mask | (mask-1)) << 16) | 0xffff : 0xffff;
++ mbase = value & ~mask;
++
++ if( mbase + mask >= MMU.ram_size ) {
++ /* S_SDR1 out of range, fallback to a safe setting */
++ printk("WARNING, S_SDR1, %08lX is out of range\n", value);
++ mbase = 0;
++ mask = 0xffff;
++ }
++
++ MMU.hash_mbase = mbase;
++ MMU.hash_mask = mask;
++ MMU.pthash_sr = -1; /* clear old tlbhash matching */
++
++ if( MMU.hash_base )
++ unmap_emulated_hash( kv );
++ MMU.hash_base = map_emulated_hash( kv, MMU.hash_mbase, mask+1 );
++
++ /* try to allocate the PTE bitfield table (16K/128 MB ram). The worst
++ * case is 512K which will fail since the kmalloc limit is 128K.
++ * If the allocation fails, we simply don't use the bitfield table.
++ */
++ s = (mask+1)/8/8;
++ if( MMU.pthash_inuse_bits )
++ kfree_cont_mol( MMU.pthash_inuse_bits );
++ if( !(MMU.pthash_inuse_bits=kmalloc_cont_mol(s)) )
++ MMU.pthash_inuse_bits_ph = 0;
++ else {
++ memset( MMU.pthash_inuse_bits, 0, s );
++ MMU.pthash_inuse_bits_ph = tophys_mol( MMU.pthash_inuse_bits );
++ }
++
++ /* make sure the unmapped ram range is flushed... */
++ flush_lv_range( kv, MMU.userspace_ram_base + mbase, mask+1 );
++
++ /* ...as well as any MMU mappings */
++ clear_pte_hash_table( kv );
++
++ BUMP(do_mtsdr1);
++ return RVEC_NOP;
++}
++
++/* This function is _very_ slow, since it must destroy a lot of PTEs.
++ * Fortunately, BAT-maps are normally static.
++ */
++int
++do_mtbat( kernel_vars_t *kv, int sprnum, ulong value, int force )
++{
++ mac_bat_t *d;
++ int batnum;
++ mBAT *p;
++
++ BUMP(do_mtbat);
++
++ if( !force && MREGS.spr[sprnum] == value )
++ return RVEC_NOP;
++
++ /* printk("do_mtbat %d %08lX\n", sprnum, value); */
++
++ MREGS.spr[sprnum] = value;
++
++ /* upper bat register have an even number */
++ batnum = (sprnum - S_IBAT0U) >>1;
++ d = &MMU.bats[batnum];
++
++ /* First we must make sure that all PTEs corresponding to
++ * the old BAT-mapping are purged from the hash table.
++ */
++ if( BAT_HACK(kv) && d->valid )
++ flush_ea_range(kv, d->base & ~0xf0000000, d->size );
++
++ p = (mBAT*)&MREGS.spr[sprnum & ~1];
++ d->valid = p->batu.vs | p->batu.vp;
++ d->vs = p->batu.vs;
++ d->vp = p->batu.vp;
++ d->wimg = (p->batl.w<<3) | (p->batl.i<<2) | (p->batl.m<<1) | p->batl.g;
++ d->ks = d->ku = 1; /* IBAT/DBATs, behaves as if key==1 */
++ d->pp = p->batl.pp;
++ d->size = (p->batu.bl+1)<<17;
++ d->base = (p->batu.bepi & ~p->batu.bl)<<17;
++ d->mbase = (p->batl.brpn & ~p->batu.bl)<<17;
++
++ /* Next, we must make sure that no PTEs refer to the new
++ * BAT-mapped area.
++ */
++
++ if( BAT_HACK(kv) && d->valid )
++ flush_ea_range( kv, d->base & ~0xf0000000, d->size );
++
++ return RVEC_NOP;
++}
++
++
++/************************************************************************/
++/* Emulation acceleration */
++/************************************************************************/
++
++static ulong
++lookup_emuaccel_handler( int emuaccel )
++{
++ extern ulong emuaccel_table[];
++ ulong handler, *p = emuaccel_table;
++
++ for( ; p[0]; p+=3 ) {
++ if( (emuaccel & EMUACCEL_INST_MASK) != p[0] )
++ continue;
++ emuaccel &= p[2]; /* offset mask */
++ handler = p[1] + (ulong)emuaccel_table + emuaccel * 8;
++ return tophys_mol( (ulong*)reloc_ptr(handler) );
++ }
++ return 0;
++}
++
++int
++alloc_emuaccel_slot( kernel_vars_t *kv, int emuaccel, int param, int inst_addr )
++{
++ ulong *p = (ulong*)((char*)kv->emuaccel_page + kv->emuaccel_size);
++ ulong handler = lookup_emuaccel_handler( emuaccel );
++ int size, ret;
++
++ size = (emuaccel & EMUACCEL_HAS_PARAM)? 16 : 8;
++ if( !handler || !p || kv->emuaccel_size + size > 0x1000 )
++ return 0;
++
++ ret = kv->emuaccel_mphys + kv->emuaccel_size;
++ p[0] = handler;
++ p[1] = inst_addr + 4;
++
++ if( emuaccel & EMUACCEL_HAS_PARAM ) {
++ /* p[2] is already EMUACCEL_NOP */
++ p[3] = param;
++ }
++
++ kv->emuaccel_size += size;
++ return ret;
++}
++
++int
++mapin_emuaccel_page( kernel_vars_t *kv, int mphys )
++{
++ int i, handler;
++ ulong *p;
++
++ if( kv->emuaccel_page || (mphys & 0xfff) )
++ return 0;
++
++ if( !(kv->emuaccel_page=alloc_page_mol()) )
++ return 0;
++
++ kv->emuaccel_page_phys = tophys_mol( (char*)kv->emuaccel_page );
++ kv->emuaccel_mphys = mphys;
++ p = (ulong*)kv->emuaccel_page;
++
++ handler = lookup_emuaccel_handler( EMUACCEL_NOP );
++ for( i=0; i<0x1000/sizeof(int); i+=2 ) {
++ p[i] = handler;
++ p[i+1] = 0;
++ }
++
++ /* flush translations - an old translation is overridden */
++ clear_pte_hash_table( kv );
++ /* printk("emuaccel_mapin: %08x\n", mphys ); */
++ return mphys;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/fault.c
+@@ -0,0 +1,601 @@
++/*
++ * Creation Date: <2002/06/08 20:53:20 samuel>
++ * Time-stamp: <2004/02/22 13:07:50 samuel>
++ *
++ * <fault.c>
++ *
++ * Page fault handler
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++
++#include "kernel_vars.h"
++#include "mmu.h"
++#include "mmu_contexts.h"
++#include "asmfuncs.h"
++#include "emu.h"
++#include "misc.h"
++#include "constants.h"
++#include "rvec.h"
++#include "mtable.h"
++#include "performance.h"
++#include "processor.h"
++#include "hash.h"
++
++/* exception bits (srr1/dsisr and a couple of mol defined bits) */
++#define EBIT_PAGE_FAULT BIT(1) /* I/D, PTE missing */
++#define EBIT_NO_EXEC BIT(3) /* I, no-execute or guarded */
++#define EBIT_PROT_VIOL BIT(4) /* I/D, protection violation */
++#define EBIT_IS_WRITE BIT(6) /* D */
++#define EBIT_IS_DSI 1 /* D, virtual bit */
++#define EBIT_USE_MMU 2 /* I/D, virtual bit */
++
++#define use_mmu(ebits) ((ebits) & EBIT_USE_MMU)
++#define is_write(ebits) ((ebits) & EBIT_IS_WRITE)
++#define is_dsi(ebits) ((ebits) & EBIT_IS_DSI)
++#define is_prot_viol(ebits) ((ebits) & EBIT_PROT_VIOL)
++#define is_page_fault(ebits) ((ebits) & EBIT_PAGE_FAULT)
++
++typedef struct {
++ /* filled in by exception handler */
++ ulong ea;
++ ulong *sr_base;
++ struct vsid_ent **vsid_eptr; /* pointer to MMU.vsid or MMU.unmapped_vsid */
++
++ /* filled in by lookup_mphys */
++ mPTE_t *mpte; /* lvptr to mac-pte (if != NULL) */
++ ulong mphys_page; /* ea of mphys page */
++ int pte1; /* RPN | 000 | R | C | WIMG | 00 | PP */
++ int key; /* pp key bit */
++} fault_param_t;
++
++static const char priv_viol_table[16] = { /* [is_write | key | PP] */
++ 0,0,0,0,1,0,0,0, /* read (1 == violation) */
++ 0,0,0,1,1,1,0,1 /* write */
++};
++
++#define NO_MMU_PTE1 (PTE1_R | PTE1_C /*| PTE1_M*/ | 0x2 /*pp*/ )
++
++#define MREGS (kv->mregs)
++#define MMU (kv->mmu)
++
++#ifdef CONFIG_SMP
++#define SMP_PTE1_M PTE1_M
++#else
++#define SMP_PTE1_M 0
++#endif
++
++
++/************************************************************************/
++/* Debugging */
++/************************************************************************/
++
++static inline void
++DEBUG_print_inserted_pte( ulong *slot, ulong pte0, ulong pte1, ulong ea )
++{
++#if 0
++ mPTE_t pte;
++ ulong *p = (ulong)&pte;
++ p[0] = pte0;
++ p[1] = pte1;
++
++ printk("[%p] ", slot );
++ printk("RPN %08X API %08X EA %08lX ", pte.rpn << 12, pte.api<<12, ea );
++ printk("%c%c %c%c; PP %d\n",
++ pte.h ? 'H' : 'h',
++ pte.v ? 'V' : 'v',
++ pte.r ? 'R' : 'r',
++ pte.c ? 'C' : 'c', pte.pp );
++#endif
++}
++
++
++/************************************************************************/
++/* MMU virtualization and page fault handling */
++/************************************************************************/
++
++#ifdef EMULATE_603
++static inline int
++lookup_603_pte( kernel_vars_t *kv, ulong vsid, ulong ea, int is_dsi, mPTE_t **ret_pte )
++{
++ int ind = (ea >> 12) & 0x1f; /* 32x2 PTEs */
++ ulong mask, phash, cmp, pteg, cmp_ea, *eap;
++ mPTE_t *p;
++
++ // printk("lookup_603_pte %08lX\n", ea);
++
++ if( is_dsi ) {
++ p = &MMU.ptes_d_603[ind];
++ eap = &MMU.ptes_d_ea_603[ind];
++ } else {
++ p = &MMU.ptes_i_603[ind];
++ eap = &MMU.ptes_i_ea_603[ind];
++ }
++ cmp_ea = ea & 0x0ffff000;
++ for( ; ind < 64 ; ind += 32, p += 32, eap += 32 ) {
++ if( *eap == cmp_ea && p->vsid == vsid ) {
++ *ret_pte = p;
++ return 0;
++ }
++ }
++ mask = MMU.hash_mask >> 6;
++
++ /* calculate primary and secondary PTEG */
++ phash = (cmp_ea >> 12) ^ (vsid & 0x7ffff);
++ pteg = ((phash & mask) << 6);
++ MREGS.spr[S_HASH1] = MMU.hash_mbase + pteg;
++ MREGS.spr[S_HASH2] = MMU.hash_mbase + (pteg ^ (mask << 6));
++
++ /* construct compare word */
++ cmp = BIT(0) | (vsid <<7) | (cmp_ea >> 22);
++ if( is_dsi ) {
++ MREGS.spr[S_DCMP] = cmp;
++ MREGS.spr[S_DMISS] = ea;
++ } else {
++ MREGS.spr[S_ICMP] = cmp;
++ MREGS.spr[S_IMISS] = ea;
++ }
++ return 1;
++}
++#endif
++
++static inline mPTE_t *
++lookup_mac_pte( kernel_vars_t *kv, ulong vsid, ulong ea )
++{
++ ulong phash, cmp, pteg, *p;
++ ulong mask;
++ int i;
++
++ /* make sure the hash is mapped... */
++ if( !MMU.hash_base )
++ return NULL;
++
++ /* we are only interested in the page index */
++ ea &= 0x0ffff000;
++ mask = MMU.hash_mask>>6;
++
++ /* calculate primary hash function */
++ phash = (ea >> 12) ^ (vsid & 0x7ffff);
++ pteg = ((phash & mask) << 6);
++
++ /* construct compare word */
++ cmp = BIT(0) | (vsid <<7) | ((ea&0x0fffffff)>>22);
++
++ /* look in primary PTEG */
++ p = (ulong*)((ulong)MMU.hash_base + pteg);
++ for( i=0; i<8; i++, p+=2 )
++ if( cmp == *p )
++ return (mPTE_t*)p;
++
++ /* look in secondary PTEG */
++ p = (ulong*)( (ulong)MMU.hash_base + (pteg ^ (mask << 6)) );
++ cmp |= BIT(25);
++
++ for( i=0; i<8; i++,p+=2 )
++ if( cmp == *p )
++ return (mPTE_t*)p;
++ return NULL;
++}
++
++static int
++lookup_mphys( kernel_vars_t *kv, fault_param_t *pb, const int ebits )
++{
++ ulong ea = (pb->ea & ~0xfff);
++ mSEGREG segr;
++ mac_bat_t *bp;
++ int sv_mode, i, sbits;
++ mPTE_t *mpte;
++
++ pb->mpte = NULL;
++
++ if( !use_mmu(ebits) ) {
++ pb->mphys_page = ea;
++ pb->pte1 = NO_MMU_PTE1;
++ pb->key = 0;
++ return 0;
++ }
++
++ segr = *(mSEGREG*)&MREGS.segr[ea>>28];
++ sv_mode = !(MREGS.msr & MSR_PR);
++
++ /* I/O segment? */
++ if( segr.t ) {
++ /* Memory forced (601/604)? Note that the 601 uses I/O segments
++ * even if translation is off(!). We don't implement this though.
++ */
++ ulong sr = MREGS.segr[ea>>28];
++ BUMP( memory_forced_segment );
++
++ if( ((sr >> 20) & 0x1ff) != 0x7f )
++ return RVEC_MMU_IO_SEG_ACCESS;
++ pb->mphys_page = (ea & 0x0ffff000) | ((sr & 0xf)<<28);
++ pb->pte1 = NO_MMU_PTE1;
++ pb->key = 0;
++ return 0;
++ }
++
++ /* BAT translation? 0-3 = IBATs, 4-7 = DBATs. Separated I/D BATS, hace 3/8/99 */
++ bp = is_dsi(ebits) ? &MMU.bats[4] : &MMU.bats[0];
++ for( i=0; i<4; i++, bp++ ) {
++ if( !bp->valid )
++ continue;
++ if( (sv_mode && !bp->vs) || (!sv_mode && !bp->vp) )
++ continue;
++ if( ea < bp->base || ea > bp->base+bp->size-1 )
++ continue;
++
++ pb->mphys_page = ea - bp->base + bp->mbase;
++ pb->pte1 = bp->pp | (bp->wimg << 3) | PTE1_R | PTE1_C;
++ pb->key = sv_mode ? bp->ks : bp->ku;
++ return 0;
++ }
++
++#ifdef EMULATE_603
++ if( (MREGS.spr[S_PVR] >> 16) == 3 ) {
++ if( lookup_603_pte(kv, segr.vsid, ea, is_dsi(ebits), &mpte) )
++ return is_dsi(ebits) ? (is_write(ebits) ? RVEC_DMISS_STORE_TRAP :
++ RVEC_DMISS_LOAD_TRAP) : RVEC_IMISS_TRAP;
++
++ pb->mpte = NULL; /* imporant */
++ pb->mphys_page = (mpte->rpn << 12);
++ pb->pte1 = ((ulong*)mpte)[1] & (PTE1_PP | PTE1_WIMG | PTE1_R | PTE1_C);
++ pb->key = sv_mode ? segr.ks : segr.kp;
++ return 0;
++ }
++#endif
++ /* mac page table lookup */
++ if( (mpte=lookup_mac_pte(kv, segr.vsid, ea)) ) {
++ pb->mpte = mpte;
++ pb->mphys_page = (mpte->rpn << 12);
++ pb->pte1 = ((ulong*)mpte)[1] & (PTE1_PP | PTE1_WIMG | PTE1_R | PTE1_C);
++ pb->key = sv_mode ? segr.ks : segr.kp;
++ return 0;
++ }
++ /* mac page fault */
++ sbits = EBIT_PAGE_FAULT | (ebits & EBIT_IS_WRITE); /* r/w bit + page_fault */
++ RVEC_RETURN_2( &MREGS, is_dsi(ebits) ? RVEC_DSI_TRAP : RVEC_ISI_TRAP, pb->ea, sbits );
++}
++
++
++/* PTE0 must be fully initialized on entry (with V=1 and H=0).
++ * The pte_present flag should be set from srr1/dsisr bit and indicates
++ * that a valid PTE might already be present in the hash table.
++ */
++static inline ulong *
++find_pte_slot( ulong ea, ulong *pte0, int pte_present, int *pte_replaced )
++{
++ static int grab_add=0;
++ ulong phash, pteg, *p, cmp = *pte0;
++ ulong *primary, *secondary;
++ int i;
++
++ /* we are only interested in the page index */
++ ea &= 0x0ffff000;
++
++ /* primary hash function */
++ phash = (ea >> 12) ^ (PTE0_VSID(cmp) & 0x7ffff);
++
++ pteg = (phash << 6) & ptehash.pteg_mask;
++ primary = (ulong*)((ulong)ptehash.base + pteg);
++
++ pteg = pteg ^ ptehash.pteg_mask;
++ secondary = (ulong*)((ulong)ptehash.base + pteg);
++
++ if( pte_present ) {
++ *pte_replaced = 1;
++
++ /* look in primary PTEG */
++ p = primary;
++ for( i=0; i<8; i++, p+=2 )
++ if( cmp == *p )
++ return p;
++
++ /* look in secondary PTEG */
++ p = secondary;
++ cmp |= BIT(25);
++ for( i=0; i<8; i++, p+=2 )
++ if( cmp == *p ) {
++ *pte0 |= PTE0_H;
++ return p;
++ }
++ /* we will actually come here if the previous PTE
++ * was only available in the on-chip cache.
++ */
++ }
++ *pte_replaced = 0;
++
++ /* free slot in primary PTEG? */
++ for( p=primary, i=0; i<8; i++, p+=2 )
++ if( !(*p & BIT(0)) )
++ return p;
++
++ /* free slot in secondary PTEG? */
++ for( p=secondary, i=0; i<8; i++, p+=2 )
++ if( !(*p & BIT(0)) ) {
++ *pte0 |= PTE0_H;
++ return p;
++ }
++
++ /* steal a primary PTEG slot */
++ grab_add = (grab_add+1) & 0x7;
++
++ /* printk("Grabbing slot %d, EA %08X\n",grab_add, ea ); */
++ return (ulong*)((ulong)primary + grab_add * sizeof(ulong[2]));
++}
++
++static inline int
++insert_pte( kernel_vars_t *kv, fault_param_t *pb, const int ebits )
++{
++ ulong ea=pb->ea, mphys=pb->mphys_page;
++ ulong sr=pb->sr_base[ea>>28];
++ int status, pte_replaced;
++ pte_lvrange_t *lvrange;
++ ulong pte0, pte1, *slot;
++ ulong lvptr;
++
++#ifdef CONFIG_AMIGAONE
++ pte1 = PTE1_R | (pb->pte1 & (PTE1_R | PTE1_C | PTE1_WIMG))
++ | (is_write(ebits) ? 2:3);
++#else
++ pte1 = PTE1_M | PTE1_R | (pb->pte1 & (PTE1_R | PTE1_C | PTE1_WIMG))
++ | (is_write(ebits) ? 2:3);
++#endif
++
++ /* PP and WIMG bits must set before the call to mphys_to_pte */
++ status = mphys_to_pte( kv, mphys, &pte1, is_write(ebits), &lvrange );
++
++ if( !status || (is_write(ebits) && (status & MAPPING_RO)) ) {
++ ulong addr = (mphys | (ea & 0xfff));
++ if( is_dsi(ebits) ) {
++ int rvec = is_write(ebits) ? RVEC_IO_WRITE : RVEC_IO_READ;
++ BUMP( io_read_write );
++ RVEC_RETURN_2( &MREGS, rvec, addr, NULL );
++ } else {
++ RVEC_RETURN_1( &MREGS, RVEC_BAD_NIP, addr );
++ }
++ }
++
++ /* tlbhash table hit? */
++ if( (ulong)(pb->mphys_page - MMU.hash_mbase) < (ulong)MMU.hash_mask ) {
++ /* printk("hash_table_hit at %08lX\n", pb->ea ); */
++ MMU.pthash_sr = sr;
++ MMU.pthash_ea_base = ea & ~MMU.hash_mask;
++
++ /* user read (always), superuser r/w */
++ pte1 &= ~PTE1_PP;
++ pte1 |= is_write(ebits) ? 1:3;
++ /* write accesses of the page table are handled in ptintercept.S */
++ }
++
++ pte0 = PTE0_V | (sr << 7) | ((ea>>22) & PTE0_API);
++ slot = find_pte_slot( ea, &pte0, !is_page_fault(ebits), &pte_replaced );
++
++ lvptr = (status & MAPPING_PHYSICAL) ? 0 : (pte1 & PTE1_RPN);
++
++ /* the RC bits should correspond to the is_write flag; this prevents the
++ * CPU from stamping RC bits unnecessary (besides, the kernel seems to
++ * assume no RC-stamps will ever occur so RC-stamping is unsafe).
++ */
++ if( is_write(ebits) )
++ pte1 |= PTE1_C;
++ pte1 |= SMP_PTE1_M;
++
++ /* if a page-out occurs between prepare_pte_insert() and the pte_inserted()
++ * call, then the PTE slot is zeroed out.
++ */
++ if( !(status & MAPPING_PHYSICAL) ) {
++#if 0
++ if( is_write(ebits) )
++ lvpage_dirty( kv, lvptr );
++#endif
++ pte1 &= ~PTE1_RPN;
++
++ /* zero pages should work just fine now... */
++ pte1 |= get_phys_page( kv, lvptr, is_write(ebits) );
++ /* pte1 |= get_phys_page( kv, lvptr, !(status & MAPPING_RO) ); */
++ }
++
++ if( status & MAPPING_FB_ACCEL )
++ video_pte_inserted( kv, lvptr, slot, pte0, pte1, ea );
++
++ BUMP( page_fault_ctr );
++ DEBUG_print_inserted_pte( slot, pte0, pte1, ea );
++
++ __store_PTE( ea, slot, pte0, pte1 );
++
++ pte_inserted( kv, ea, (char*)lvptr, lvrange, slot, pb->vsid_eptr[ea>>28], sr );
++
++ /* debugger support */
++ if( (kv->break_flags & BREAK_EA_PAGE) && (ea & ~0xfff) == MREGS.mdbg_ea_break )
++ RVEC_RETURN_1( &MREGS, RVEC_BREAK, BREAK_EA_PAGE );
++
++ return RVEC_NOP;
++}
++
++static int
++page_fault( kernel_vars_t *kv, fault_param_t *pb, const int ebits )
++{
++ int topind = pb->ea >> 28;
++ int ind, ret;
++
++ BUMP( access_exception_ctr );
++
++ if( (ret=lookup_mphys(kv, pb, ebits)) ) {
++ BUMP(mac_page_fault);
++ return ret;
++ }
++
++ /* printk("MPHYS_PAGE: %08lX, pp %d, key %d, wimg %d, mpte %p\n",
++ pb->mphys_page, (pb->pte1 & 3), pb->key, ((pb->pte1 >> 3) & 0xf), pb->mpte ); */
++
++ /* check privileges */
++ ind = (is_write(ebits) ? 8:0) | (pb->pte1 & PTE1_PP) | (pb->key?4:0);
++ if( priv_viol_table[ind] ) {
++ /* r/w bit + priv. violation */
++ int sbits = EBIT_PROT_VIOL | (ebits & EBIT_IS_WRITE);
++ BUMP(mac_priv_violation);
++ RVEC_RETURN_2( &MREGS, is_dsi(ebits) ? RVEC_DSI_TRAP : RVEC_ISI_TRAP, pb->ea, sbits );
++ }
++
++ /* stamp R/C bits (mpte is NULL if this is not a page translation). */
++ if( pb->mpte ) {
++ pb->mpte->r = 1;
++ if( is_write(ebits) )
++ pb->mpte->c = 1;
++
++ /* stamp pthash_inuse_bit */
++ if( MMU.pthash_inuse_bits ) {
++ int nr = ((int)pb->mpte - (int)MMU.hash_base) >> 3;
++ set_bit_mol( nr, MMU.pthash_inuse_bits );
++ }
++ }
++
++ /* perform memory allocations if necessary; we are not allowed to
++ * do this later (the mtable insertion must be atomic)
++ */
++ if( mtable_memory_check(kv) )
++ return RVEC_NOP; /* out of memory */
++
++ /* the vsid entry might have been released */
++ if( !pb->vsid_eptr[topind] )
++ return RVEC_NOP;
++
++ return insert_pte( kv, pb, ebits );
++}
++
++
++/************************************************************************/
++/* VSID allocation (the normal VSID lookup occurs in vsid.S) */
++/************************************************************************/
++
++static void
++fix_sr( kernel_vars_t *kv, int sr, int mapped )
++{
++ int macvsid = mapped ? (MREGS.segr[sr] & VSID_MASK) : VSID_MASK + 1 + sr;
++ ulong user_sr, sv_sr;
++ vsid_ent_t *r = vsid_get_user_sv( kv, macvsid, &user_sr, &sv_sr );
++
++ BUMP(fix_sr);
++ if( !r )
++ return;
++
++ if( mapped ) {
++ int value = MREGS.segr[sr];
++ int nbit = value & VSID_N;
++ MMU.vsid[sr] = r;
++ MMU.user_sr[sr] = ((value & VSID_Kp) ? user_sr : sv_sr) | nbit;
++ MMU.sv_sr[sr] = ((value & VSID_Ks) ? user_sr : sv_sr) | nbit;
++ } else {
++ MMU.unmapped_vsid[sr] = r;
++ MMU.unmapped_sr[sr] = user_sr;
++ }
++ invalidate_splitmode_sr( kv );
++}
++
++
++/************************************************************************/
++/* Exception entrypoints (called from assembly) */
++/************************************************************************/
++
++extern int dsi_exception( kernel_vars_t *kv, ulong dar, ulong dsisr );
++extern int isi_exception( kernel_vars_t *kv, ulong nip, ulong srr1 );
++
++int
++dsi_exception( kernel_vars_t *kv, ulong dar, ulong dsisr )
++{
++ int ebits, topind = dar >> 28;
++ fault_param_t pb;
++
++ /* printk("DSI: EA %08lX, DSISR %08lX\n", dar, dsisr ); */
++ if( dsisr & 0x84500000 ) /* 0,5,9,11 */
++ RVEC_RETURN_2( &MREGS, RVEC_UNUSUAL_DSISR_BITS, dar, dsisr );
++
++ pb.ea = dar;
++ ebits = EBIT_IS_DSI | (dsisr & (EBIT_PAGE_FAULT | EBIT_PROT_VIOL | EBIT_IS_WRITE))
++ | ((MREGS.msr & MSR_DR) ? EBIT_USE_MMU : 0);
++
++ pb.vsid_eptr = (MREGS.msr & MSR_DR) ? MMU.vsid : MMU.unmapped_vsid;
++ pb.sr_base = (ulong*)((ulong)MMU.sr_data - kv->kvars_tophys_offs);
++
++ /* segment register switch-in required? */
++ if( !pb.vsid_eptr[topind] ) {
++ fix_sr( kv, topind, use_mmu(ebits) );
++ return RVEC_NOP;
++ }
++ BUMP(dsi);
++ return page_fault( kv, &pb, ebits );
++}
++
++int
++isi_exception( kernel_vars_t *kv, ulong nip, ulong srr1 )
++{
++ fault_param_t pb;
++ /* printk("ISI: NIP %08lX, SRR1 %08lX\n", nip, srr1 ); */
++
++ pb.vsid_eptr = (MREGS.msr & MSR_IR) ? MMU.vsid : MMU.unmapped_vsid;
++
++ if( srr1 & EBIT_PAGE_FAULT ) {
++ int ebits = EBIT_PAGE_FAULT | ((MREGS.msr & MSR_IR) ? EBIT_USE_MMU : 0);
++ pb.ea = nip;
++ pb.sr_base = (ulong*)((ulong)MMU.sr_inst - kv->kvars_tophys_offs);
++ BUMP(isi_page_fault);
++ return page_fault( kv, &pb, ebits );
++ }
++ if( srr1 & EBIT_NO_EXEC ) {
++ int sr = nip >> 28;
++ if( !pb.vsid_eptr[sr] ) {
++ fix_sr( kv, sr, (MREGS.msr & MSR_IR) );
++ return RVEC_NOP;
++ }
++ /* printk("Guarded memory access at %08lX\n", nip ); */
++ RVEC_RETURN_2( &MREGS, RVEC_ISI_TRAP, nip, EBIT_NO_EXEC );
++ }
++
++ BUMP(isi_prot_violation);
++ /* must be privileges violation */
++ RVEC_RETURN_2( &MREGS, RVEC_ISI_TRAP, nip, EBIT_PROT_VIOL );
++}
++
++
++/************************************************************************/
++/* debugger functions */
++/************************************************************************/
++
++int
++dbg_translate_ea( kernel_vars_t *kv, int context, ulong va, int *ret_mphys, int data_access )
++{
++ int ebits = data_access ? EBIT_IS_DSI : 0;
++ fault_param_t pb;
++
++ memset( &pb, 0, sizeof(pb) );
++ pb.ea = va;
++
++ switch( context ) {
++ case kContextUnmapped:
++ pb.sr_base = MMU.unmapped_sr;
++ break;
++ case kContextMapped_S:
++ pb.sr_base = MMU.sv_sr;
++ ebits |= EBIT_USE_MMU;
++ break;
++ case kContextMapped_U:
++ pb.sr_base = MMU.user_sr;
++ ebits |= EBIT_USE_MMU;
++ break;
++ default:
++ return 1;
++ }
++
++ if( lookup_mphys(kv, &pb, ebits) )
++ return 1;
++ *ret_mphys = pb.mphys_page | (va & 0xfff);
++ return 0;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/hash.c
+@@ -0,0 +1,126 @@
++/*
++ * Creation Date: <2004/02/14 11:42:19 samuel>
++ * Time-stamp: <2004/03/13 14:25:00 samuel>
++ *
++ * <hash.c>
++ *
++ * CPU PTE hash handling
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "mmu.h"
++#include "mmu_contexts.h"
++#include "asmfuncs.h"
++#include "emu.h"
++#include "misc.h"
++#include "mtable.h"
++#include "performance.h"
++#include "context.h"
++#include "hash.h"
++#include "map.h"
++
++/* GLOBALS */
++hash_info_t ptehash;
++
++static struct {
++ int hash_mapped;
++ int sdr1_loaded;
++ char *allocation;
++} hs;
++
++
++static int
++create_pte_hash( void )
++{
++ ulong size = 1024*128; /* 128K is the kmalloc limit */
++ ulong sdr1, mask, base, physbase;
++ char *p;
++
++ if( !(p=kmalloc_cont_mol(size)) )
++ return 1;
++ memset( p, 0, size );
++ base = (ulong)p;
++ physbase = tophys_mol( (char*)base );
++
++ if( (physbase & (size-1)) ) {
++ int offs;
++ printk("Badly aligned SDR1 allocation - 64K wasted\n");
++ size /= 2;
++ offs = ((physbase + size) & ~(size-1)) - physbase;
++ physbase += offs;
++ base += offs;
++ }
++ mask = (size-1) >> 6;
++ sdr1 = mask >> 10;
++ sdr1 |= physbase;
++
++ hs.allocation = p;
++ ptehash.sdr1 = sdr1;
++ ptehash.base = (ulong*)base;
++
++ printk("SDR1 = %08lX\n", sdr1 );
++ return 0;
++}
++
++int
++init_hash( void )
++{
++ ulong sdr1;
++
++ memset( &ptehash, 0, sizeof(ptehash) );
++
++ if( IS_LINUX ) {
++ sdr1 = _get_sdr1();
++
++ /* linux does not use SDR1 on the 603[e] */
++ if( !sdr1 ) {
++ create_pte_hash();
++ sdr1 = ptehash.sdr1;
++ _set_sdr1( sdr1 );
++ hs.sdr1_loaded = 1;
++ }
++ } else {
++ /* sharing the hash under darwin is too complicated */
++ create_pte_hash();
++ sdr1 = ptehash.sdr1;
++ }
++
++ if( !sdr1 )
++ return 1;
++
++ ptehash.sdr1 = sdr1;
++ ptehash.pteg_mask = (((sdr1 & 0x1ff) << 10) | 0x3ff) << 6;
++ ptehash.pte_mask = ptehash.pteg_mask | 0x38;
++ ptehash.physbase = sdr1 & ~0xffff;
++
++ if( !ptehash.base ) {
++ hs.hash_mapped = 1;
++ ptehash.base = map_hw_hash( ptehash.physbase, ptehash.pte_mask + 8 );
++ }
++
++ return !ptehash.base;
++}
++
++void
++cleanup_hash( void )
++{
++ if( hs.hash_mapped )
++ unmap_hw_hash( ptehash.base );
++
++ if( hs.sdr1_loaded )
++ _set_sdr1( 0 );
++ if( hs.allocation )
++ kfree_cont_mol( hs.allocation );
++
++ memset( &ptehash, 0, sizeof(ptehash) );
++ memset( &hs, 0, sizeof(hs) );
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/include/actions.h
+@@ -0,0 +1,177 @@
++/*
++ * Creation Date: <2004/01/31 13:08:42 samuel>
++ * Time-stamp: <2004/03/07 14:25:23 samuel>
++ *
++ * <actions.h>
++ *
++ *
++ *
++ * Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_ACTIONS
++#define _H_ACTIONS
++
++/* Certain assembly macros (like LI_PHYS) adds meta information to a special
++ * ELF segment. This information is parsed when the module is loaded/used and
++ * the appropriate action is performed (a few assembly instruction are typically
++ * modified).
++ *
++ * Actions with lower opcodes are performed before actions with higher opcodes.
++ */
++
++#define ACTION_LIS_SPECVAR_H 1 /* dreg, special_var_index */
++#define ACTION_ORI_SPECVAR_L 2 /* dreg*32 + sreg, special_var_index */
++#define ACTION_LI_PHYS 3 /* dreg, addr_offs */
++#define ACTION_LWZ_PHYSADDR_R 4 /* dreg*32 + reg, addr_offs */
++#define ACTION_TOPHYS 5 /* dreg*32 + sreg */
++#define ACTION_TOVIRT 6 /* dreg*32 + sreg */
++#define ACTION_RELOCATE_LOW 7 /* code_size, destvar, code... */
++#define ACTION_VRET 8 /* vector [special, used by RELOC_HOOK] */
++#define ACTION_FIX_SPRG2 9 /* scratch_reg */
++
++#define FLUSH_CACHE_ACTION 9 /* flush the icache at this point */
++
++#define ACTION_HOOK_FUNCTION 10
++#define ACTION_RELOC_HOOK 11 /* trigger, size, vret_action#, vret_offs */
++#define MAX_NUM_ACTIONS 12
++
++/* Special vars (ACTION_*_SPECVAR) */
++#define SPECVAR_SESSION_TABLE 1
++
++/* Function hooks (ACTION_HOOK_FUNCTION) */
++#define FHOOK_FLUSH_HASH_PAGE 1
++
++#ifndef __ASSEMBLY__
++typedef struct {
++ int action; /* ACTION_xxx */
++ int offs; /* target instruction offset (from r__reloctable_start) */
++ int params[1]; /* parameters */
++} action_pb_t;
++#endif /* __ASSEMBLY__ */
++
++
++/************************************************************************/
++/* assembly macros */
++/************************************************************************/
++
++/*
++ * WARNING: These macros uses the 9 label (the OSX assembler
++ * can only use labels (0-9).
++ */
++
++#ifdef __ASSEMBLY__
++
++#ifdef __linux__
++#define ACTIONS_SECTION .text 95
++#define ACTIONS_OFFS_SECTION .text 96
++#endif
++#ifdef __darwin__
++#define ACTIONS_SECTION .section __TEXT,__areloc
++#define ACTIONS_OFFS_SECTION .section __DATA,__areloc_offs
++#endif
++
++mDEFINE(ACTION_PB, [action], [
++ .text
++9:
++ ACTIONS_SECTION
++ .long _action /* action */
++ .long (9b - r__reloctable_start) /* target PC */
++9:
++ ACTIONS_OFFS_SECTION
++ .long (9b - r__actions_section - 8) /* store pointer to PB */
++ ACTIONS_SECTION
++])
++
++mDEFINE(ACTION_1, [action, p1], [
++ ACTION_PB( _action )
++ .long _p1
++ .text
++ nop /* replaced */
++])
++
++mDEFINE(ACTION_21, [action, p1, p2], [
++ ACTION_PB( _action )
++ .long _p1, _p2
++ .text
++ nop /* replaced */
++])
++
++mDEFINE(ACTION_2, [action, p1, p2], [
++ ACTION_PB( _action )
++ .long _p1, _p2
++ .text
++ nop /* replaced */
++ nop /* replaced */
++])
++
++mDEFINE(ACTION_13, [action, p1], [
++ ACTION_PB( _action )
++ .long _p1
++ .text
++ nop /* replaced */
++ nop /* replaced */
++ nop /* replaced */
++])
++
++
++ /* replaced with lis dreg,addr@ha ; addi dreg,dreg,addr@l */
++#define LI_PHYS( dreg, addr ) \
++ ACTION_2( ACTION_LI_PHYS, dreg, (addr - r__reloctable_start) )
++
++ /* replaced with addis dreg,reg,addr@ha ; lwz dreg,addr@lo(dreg). */
++#define LWZ_PHYSADDR_R( dreg, addr, reg ) \
++ ACTION_2( ACTION_LWZ_PHYSADDR_R, (dreg*32 + reg), (addr - r__reloctable_start) )
++
++#define LWZ_PHYS( dreg, addr ) \
++ LWZ_PHYSADDR_R( dreg, addr, 0 );
++
++ /* syntax: tophys rD,rS */
++MACRO(tophys, [dreg, sreg], [
++ ACTION_1( ACTION_TOPHYS, (_dreg * 32 + _sreg) )
++])
++ /* syntax: tovirt rD,rS */
++MACRO(tovirt, [dreg, sreg], [
++ ACTION_1( ACTION_TOVIRT, (_dreg * 32 + _sreg) )
++])
++
++ /* syntax: lis_specvar_ha rD,SPECIAL_VAR */
++MACRO(lis_svh, [dreg, specvariable], [
++ ACTION_21( ACTION_LIS_SPECVAR_H, _dreg, _specvariable )
++])
++
++ /* syntax: addi_specvar_ha rD,rS,SPECIAL_VAR */
++MACRO(ori_svl, [dreg, sreg, specvariable], [
++ ACTION_21( ACTION_ORI_SPECVAR_L, (_dreg * 32)+_sreg, _specvariable )
++])
++
++ /* syntax: FIX_SPRG2 rN */
++MACRO(fix_sprg2, [reg], [
++ /* only darwin needs this (sprg_a0 holds bits describing the CPU) */
++#ifdef __darwin__
++ ACTION_13( ACTION_FIX_SPRG2, _reg )
++#endif
++])
++
++mDEFINE(RELOC_LOW, [destvar], [
++ ACTION_PB( ACTION_RELOCATE_LOW )
++ .long _destvar[]_end - _destvar[]_start
++ .long EXTERN([]_destvar)
++_destvar[]_start:
++])
++
++mDEFINE(RELOC_LOW_END, [destvar], [
++_destvar[]_end:
++ .text
++])
++
++
++#endif /* __ASSEMBLY__ */
++
++
++#endif /* _H_ACTIONS */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/alloc.h
+@@ -0,0 +1,70 @@
++/*
++ * Creation Date: <2002/01/13 16:35:18 samuel>
++ * Time-stamp: <2004/01/25 17:36:49 samuel>
++ *
++ * <alloc.h>
++ *
++ * Memory allocation and mappings
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_ALLOC
++#define _H_ALLOC
++
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++
++#ifdef LINUX_26
++#include <asm/cacheflush.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++#include <asm/io.h>
++#endif
++#endif
++
++static inline void *kmalloc_mol( int size ) {
++ return kmalloc( size, GFP_KERNEL );
++}
++static inline void kfree_mol( void *p ) {
++ kfree( p );
++}
++static inline void *vmalloc_mol( int size ) {
++ return vmalloc( size );
++}
++static inline void vfree_mol( void *p ) {
++ vfree( p );
++}
++static inline ulong alloc_page_mol( void ) {
++ return get_zeroed_page( GFP_KERNEL );
++}
++static inline void free_page_mol( ulong addr ) {
++ free_page( addr );
++}
++static inline void *kmalloc_cont_mol( int size ) {
++ return kmalloc( size, GFP_KERNEL );
++}
++static inline void kfree_cont_mol( void *addr ) {
++ kfree( addr );
++}
++static inline ulong tophys_mol( void *addr ) {
++ return virt_to_phys(addr);
++}
++static inline void flush_icache_mol( ulong start, ulong stop ) {
++ flush_icache_range( start, stop );
++}
++static inline void *map_phys_range( ulong paddr, ulong size, char **ret_addr ) {
++ /* Warning: This works only for certain addresses... */
++ *ret_addr = phys_to_virt(paddr);
++ return (void*)(-2); /* dummy */
++}
++static inline void unmap_phys_range( void *handle ) {}
++
++
++#endif /* _H_ALLOC */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/archinclude.h
+@@ -0,0 +1,77 @@
++/*
++ * Creation Date: <2002/01/12 22:11:51 samuel>
++ * Time-stamp: <2004/04/10 22:27:41 samuel>
++ *
++ * <archinclude.h>
++ *
++ *
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_ARCHINCLUDE
++#define _H_ARCHINCLUDE
++
++//#define PERF_MONITOR
++//#define PERFORMANCE_INFO /* collect performance statistics */
++//#define PERFORMANCE_INFO_LIGHT /* sample only the most important counters */
++
++#include "mol_config.h"
++#include "kconfig.h"
++
++#include <linux/version.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++#include <linux/utsrelease.h>
++#endif
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
++#include <linux/config.h>
++#else
++#include <linux/autoconf.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++#define LINUX_26
++#endif
++
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <asm/atomic.h>
++#include <linux/sched.h> /* needed by <asm/mmu_context.h> */
++#include <asm/mmu_context.h>
++#include <asm/time.h>
++
++#include "dbg.h"
++
++/* these are declared, but we just want to be sure the definition does not change */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++extern int flush_hash_pages( unsigned context, unsigned long va, unsigned long pmdval, int count ); /* 2.6 */
++#else
++extern int flush_hash_page( unsigned context, unsigned long va, pte_t *ptep ); /* 2.5 */
++#endif /* Linux 2.6 */
++
++#endif /* __ASSEMBLY__ */
++
++#ifdef LINUX_26
++#define compat_flush_hash_pages flush_hash_pages
++#define compat_hash_table_lock mmu_hash_lock
++#else
++#define compat_flush_hash_pages flush_hash_page
++#define compat_hash_table_lock hash_table_lock
++#endif
++
++
++#define ENOSYS_MOL ENOSYS
++#define EFAULT_MOL EFAULT
++
++#define IS_LINUX 1
++#define IS_DARWIN 0
++
++
++#endif /* _H_ARCHINCLUDE */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/asm.m4
+@@ -0,0 +1,141 @@
++/* -*- asm -*-
++ * Creation Date: <2001/12/30 20:08:53 samuel>
++ * Time-stamp: <2002/01/14 00:48:09 samuel>
++ *
++ * <asm.m4>
++ *
++ * m4 initialization (m4 is used as an assembly preprocessor)
++ *
++ * Copyright (C) 2001, 2002, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++/* This end-of-quote matches the start-of-quote in mol_config.h */
++]]]]]
++divert(-1)
++changequote([,])
++
++dnl m4 macros to avoid in header files (we can not rename these)
++dnl ==========================================================
++dnl shift, eval, expr, decr, incr, ifelse, popdef, pushdef
++
++
++dnl **************************************************************
++dnl * Rename to reduce namespace conflicts
++dnl **************************************************************
++
++dnl *** Changing the name of built-in macros using defn does not always work ***
++
++undefine([changecom])
++undefine([changequote])
++dnl undefine([decr])
++undefine([defn])
++dnl undefine([divert])
++undefine([divnum])
++undefine([errprint])
++dnl undefine([eval])
++dnl undefine([expr])
++undefine([file])
++undefine([format])
++undefine([len])
++undefine([line])
++dnl undefine([ifelse])
++dnl undefine([incr])
++undefine([indir])
++undefine([include])
++undefine([index])
++undefine([maketemp])
++undefine([paste])
++undefine([patsubst])
++dnl undefine([popdef])
++dnl undefine([pushdef])
++undefine([regexp])
++dnl undefine([shift])
++undefine([sinclude])
++undefine([spaste])
++undefine([substr])
++undefine([syscmd])
++undefine([sysval])
++undefine([translit])
++undefine([traceoff])
++undefine([traceon])
++undefine([undivert])
++undefine([unix])
++dnl undefine([__gnu__])
++dnl undefine([__unix__])
++
++dnl Uncomment to list m4 definitions
++dnl dumpdef m4exit
++
++/************************************************************************/
++/* M4 Macros */
++/************************************************************************/
++
++dnl
++dnl WARNING - M4 BUG IN MacOS X (10.1.2):
++dnl eval() in MacOS X (10.1.2) handles '&' as '&&' and '|' as '||'.
++dnl
++
++/* FORLOOP(var, from, to, [body var...]) */
++define([mFORLOOP], [pushdef([$1], [$2])_mFORLOOP([$1], [$2], [$3], [$4])popdef([$1])])
++define([_mFORLOOP], [$4[]ifelse($1, [$3], ,
++ [define([$1], incr($1))_mFORLOOP([$1], [$2], [$3], [$4])])])
++
++define([mFIRST],[$1])
++define([mCONCAT_C],[ [$@] ])
++
++/* FOREACH(var, [item1, ...], [body var ...]) */
++define([mFOREACH],[pushdef([$1],mFIRST($2))_mFOREACH([$1],[shift($2)],[$3])popdef([$1])])
++define([_mFOREACH],[$3] [ifelse(mFIRST($2),,,[define([$1],mFIRST($2)) _mFOREACH([$1],[shift($2)],[$3])])])
++
++
++/******************** Nice macro definitions **************************/
++
++/* MACRO(name, [param1, ...], [body _param1 ...]) */
++#ifdef __linux__
++define([MACRO], [
++ .macro [$1] $2
++ mFOREACH([i],[$2],[ pushdef(_[]i,\i) ])
++ $3
++ .endm
++ mFOREACH([i],[$2],[ popdef(_[]i) ])
++])
++#else
++define([MACRO], [
++ .macro [$1]
++ pushdef([_n],0)
++ mFOREACH([i],[$2],[ pushdef(_[]i,[$[]]_n) define([_n],incr(_n)) ])
++ $3
++ .endmacro
++ mFOREACH([i],[$2],[ popdef(_[]i) ])
++ popdef([_n])
++])
++#endif
++define([MACRO_0], [MACRO([$1],[_dummy_param_],[$2])])
++
++
++/* mDEFINE(name, [param1, ...], [body _param1 ...]) */
++define([mDEFINE], [
++ pushdef([_n],1)
++ mFOREACH([i],[$2],[ pushdef(_[]i,[$[]]_n) define([_n],incr(_n)) ])
++ define([$1], mCONCAT_C($3) )
++ mFOREACH([i],[$2],[ popdef(_[]i) ])
++ popdef([_n])
++])
++
++
++/* rLABEL(label): b label_b ; b label_f */
++define(rLABEL,[dnl
++ifdef([$1]_curnum,,[$1[]f:])dnl
++ define([_tmp_curnum],ifdef($1[]_curnum, [eval($1_curnum+1)], 1)) dnl
++ define([$1]_curnum,_tmp_curnum)dnl
++ define([$1]f,$1_[]eval($1_curnum[]+1) )dnl
++ define([$1]b,$1_[]$1_curnum[] )
++$1[]_[]$1_curnum[]dnl
++])
++
++divert(0)dnl
+--- /dev/null
++++ b/drivers/macintosh/mol/include/asm_offsets.inc
+@@ -0,0 +1,136 @@
++/* -*-c-*-
++ * Creation Date: <2001/10/20 17:51:10 samuel>
++ * Time-stamp: <2004/02/21 21:38:27 samuel>
++ *
++ * <asm_offsets.inc>
++ *
++ * Extra offsets (included from asm_offsets.c)
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++extern void extra( void );
++
++void
++extra( void )
++{
++ DEFINE(SIZE_OF_KERNEL_VARS, sizeof( kernel_vars_t ));
++
++ ST_DEF( ST_MAGIC, magic );
++ ST_DEF( ST_KVARS_PH, kvars_ph );
++
++ K_DEF( K_EMUACCEL_PAGE, emuaccel_page );
++ K_DEF( K_EMUACCEL_PAGE_PHYS, emuaccel_page_phys );
++ K_DEF( K_EMUACCEL_MPHYS, emuaccel_mphys );
++ K_DEF( K_EMUACCEL_SIZE, emuaccel_size );
++
++ K_DEF( K_KERNEL_VARS, kvars_virt );
++ K_DEF( K_BREAK_FLAGS, break_flags );
++#ifdef PERFORMANCE_INFO
++ K_DEF( K_ASM_BUMP_CNTRS, asm_bump_cntr );
++ K_DEF( K_ASM_TICK_STAMPS, asm_tick_stamp );
++#endif
++ /* MMU */
++ K_DEF( K_ILLEGAL_SR, mmu.illegal_sr );
++ K_DEF( K_CUR_SR_BASE, mmu.cur_sr_base );
++ K_DEF( K_SR_DATA, mmu.sr_data );
++ K_DEF( K_SR_INST, mmu.sr_inst );
++
++ K_DEF( K_UNMAPPED_SR_BASE, mmu.unmapped_sr[0] );
++ K_DEF( K_USER_SR_BASE, mmu.user_sr[0] );
++ K_DEF( K_SV_SR_BASE, mmu.sv_sr[0] );
++ K_DEF( K_SPLIT_SR_BASE, mmu.split_sr[0] );
++ K_DEF( K_VSID_ENT_BASE, mmu.vsid[0] );
++
++#ifdef __darwin__
++ K_DEF( K_OS_SDR1, mmu.os_sdr1 );
++ K_DEF( K_MOL_SDR1, mmu.mol_sdr1 );
++ DEFINE( SKIPLIST_NEXT, offsetof(skiplist_el_t, level[0].next_phys) );
++#endif
++#ifdef __linux__
++ DEFINE( SKIPLIST_NEXT, offsetof(skiplist_el_t, level[0].next) );
++#endif
++ DEFINE( SKIPLIST_LEVELSIZE, sizeof(skiplist_level_t) );
++ DEFINE( SKIPLIST_KEY, offsetof(skiplist_el_t, key) );
++ DEFINE( K_VSID_SL_ROOT_ELEM, offsetof(kernel_vars_t, mmu.vsid_sl.root)
++ - offsetof(skiplist_el_t, level));
++ K_DEF( K_VSID_SL_SLEVEL, mmu.vsid_sl.slevel );
++
++ K_DEF( K_IBAT0U_SAVE, _bp.ibat_save[0].word[0] );
++ K_DEF( K_IBAT0L_SAVE, _bp.ibat_save[0].word[1] );
++ K_DEF( K_IBAT1U_SAVE, _bp.ibat_save[1].word[0] );
++ K_DEF( K_IBAT1L_SAVE, _bp.ibat_save[1].word[1] );
++ K_DEF( K_IBAT2U_SAVE, _bp.ibat_save[2].word[0] );
++ K_DEF( K_IBAT2L_SAVE, _bp.ibat_save[2].word[1] );
++ K_DEF( K_IBAT3U_SAVE, _bp.ibat_save[3].word[0] );
++ K_DEF( K_IBAT3L_SAVE, _bp.ibat_save[3].word[1] );
++
++ K_DEF( K_DBAT0U_SAVE, _bp.dbat_save[0].word[0] );
++ K_DEF( K_DBAT0L_SAVE, _bp.dbat_save[0].word[1] );
++ K_DEF( K_DBAT1U_SAVE, _bp.dbat_save[1].word[0] );
++ K_DEF( K_DBAT1L_SAVE, _bp.dbat_save[1].word[1] );
++ K_DEF( K_DBAT2U_SAVE, _bp.dbat_save[2].word[0] );
++ K_DEF( K_DBAT2L_SAVE, _bp.dbat_save[2].word[1] );
++ K_DEF( K_DBAT3U_SAVE, _bp.dbat_save[3].word[0] );
++ K_DEF( K_DBAT3L_SAVE, _bp.dbat_save[3].word[1] );
++
++ K_DEF( K_SPLIT_DBAT0U, mmu.split_dbat0.word[0] );
++ K_DEF( K_SPLIT_DBAT0L, mmu.split_dbat0.word[1] );
++ K_DEF( K_TRANSL_DBAT0U, mmu.transl_dbat0.word[0] );
++ K_DEF( K_TRANSL_DBAT0L, mmu.transl_dbat0.word[1] );
++
++ K_DEF( K_TLBHASH_SR, mmu.pthash_sr );
++ K_DEF( K_TLBHASH_BASE_EA, mmu.pthash_ea_base );
++ K_DEF( K_HASH_MASK, mmu.hash_mask );
++ K_DEF( K_PTHASH_INUSE_PH, mmu.pthash_inuse_bits_ph );
++
++ /* fields private to the assembly files */
++ K_DEF( K_MSR, _bp._msr );
++
++ K_DEF( K_DEC_STAMP, _bp.dec_stamp );
++ K_DEF( K_INT_STAMP, _bp.int_stamp);
++
++ K_DEF( K_TMP_SCRATCH0, _bp.tmp_scratch[0] );
++ K_DEF( K_TMP_SCRATCH1, _bp.tmp_scratch[1] );
++ K_DEF( K_TMP_SCRATCH2, _bp.tmp_scratch[2] );
++ K_DEF( K_TMP_SCRATCH3, _bp.tmp_scratch[3] );
++
++ K_DEF( K_SPLIT_NIP_SEGMENT, _bp.split_nip_segment );
++
++ K_DEF( K_SPR_HOOKS, _bp.spr_hooks );
++
++ /* private to emulation.S */
++ K_DEF( K_MSR_SR_TABLE, _bp.msr_sr_table[0] );
++
++ /* io_page_t type */
++ IO_DEF( IOP_MAGIC, magic );
++ IO_DEF( IOP_MAGIC2, magic2 );
++ IO_DEF( IOP_MPHYS, mphys );
++ IO_DEF( IOP_ME_PHYS, me_phys );
++ IO_DEF( IOP_NEXT, next );
++ IO_DEF( IOP_USR_DATA, usr_data );
++
++ /****** New Low-level assembly implementation ***********/
++
++ K_DEF( K_EMULATOR_STACK, _bp.emulator_stack );
++ K_DEF( K_EMULATOR_TOC, _bp.emulator_toc );
++ K_DEF( K_EMULATOR_NIP, _bp.emulator_nip );
++ K_DEF( K_EMULATOR_MSR, _bp.emulator_msr );
++ K_DEF( K_EMULATOR_SPRG2, _bp.emulator_sprg2 );
++ K_DEF( K_EMULATOR_SPRG3, _bp.emulator_sprg3 );
++ K_DEF( K_EMULATOR_KCALL_NIP, _bp.emulator_kcall_nip );
++
++ K_DEF( K_EMULATOR_SR, mmu.emulator_sr );
++
++#ifdef __darwin__
++ K_DEF( K_KCALL_ROUTINE, kcall_routine );
++ K_DEF( K_KCALL_ARG0, kcall_args[0] );
++ K_DEF( K_KCALL_ARG1, kcall_args[1] );
++ K_DEF( K_KCALL_ARG2, kcall_args[2] );
++#endif
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/include/asmdbg.h
+@@ -0,0 +1,184 @@
++/*
++ * Creation Date: <2004/01/29 20:12:41 samuel>
++ * Time-stamp: <2004/03/06 13:17:36 samuel>
++ *
++ * <asmdbg.h>
++ *
++ * debug support
++ *
++ * Copyright (C) 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_ASMDBG
++#define _H_ASMDBG
++
++
++/************************************************************************/
++/* Performance Statistics */
++/************************************************************************/
++
++#ifdef PERFORMANCE_INFO
++ define([_bump_ind_], 0)
++
++#define __BUMP( str ) \
++ .text 92 ;\
++debug_str_[]_bump_ind_: ;\
++ .if (_bump_ind_ >= NUM_ASM_BUMP_CNTRS) ;\
++ .print "** too many BUMP counters **" ; .fail 1 ;\
++ .endif ;\
++ .ascii str "\0" ;\
++ balign_4 ;\
++ .text 90 ;\
++ .long (debug_str_[]_bump_ind_-__start_bumptable) ;\
++ .text ;\
++ stw r3,xDEBUG_SCR1(r1) ;\
++ lwz r3,(K_ASM_BUMP_CNTRS+4*_bump_ind_)(r1) ;\
++ addi r3,r3,1 ;\
++ stw r3,(K_ASM_BUMP_CNTRS+4*_bump_ind_)(r1) ;\
++ lwz r3,xDEBUG_SCR1(r1) ;\
++ define([_bump_ind_],eval(_bump_ind_+1))
++
++
++ define([_tick_ind_], 0)
++
++#define __ZERO_TICK_CNT(cntr) \
++ ifdef([##cntr##_ind_],[],[ \
++ define([##cntr##_ind_], _tick_ind_) \
++ define([_tick_ind_], eval(_tick_ind_+1)) \
++ ]) \
++ .if (_tick_ind_ > NUM_ASM_TICK_CNTRS) ;\
++ .print "** too many TICK counters **" ; .fail 1 ;\
++ .endif ;\
++ stw r3,xDEBUG_SCR1(r1) ;\
++ mftb r3 ;\
++ stw r3,(K_ASM_TICK_STAMPS + 4*cntr##_ind_)(r1) ;\
++ lwz r3,xDEBUG_SCR1(r1)
++
++#define __GET_TICK_CNT(cntr, name) \
++ .text 92 ;\
++debug_str_[]_bump_ind_: ;\
++ .if (_bump_ind_ >= NUM_ASM_BUMP_CNTRS) ;\
++ .print "** too many BUMP counters **" ; .fail 1 ;\
++ .endif ;\
++ .ascii name "_ticks\0" ;\
++ balign_4 ;\
++ .text 90 ;\
++ .long (debug_str_[]_bump_ind_-__start_bumptable) ;\
++ .text ;\
++ ifdef([##cntr##_ind_],[],[ \
++ define([##cntr##_ind_], _tick_ind_) \
++ define([_tick_ind_], eval(_tick_ind_+1)) \
++ ]) \
++ .if (_tick_ind_ > NUM_ASM_TICK_CNTRS) ;\
++ .print "** too many TICK counters **" ; .fail 1 ;\
++ .endif ;\
++ stw r3,xDEBUG_SCR1(r1) ;\
++ mftb r3 ;\
++ stw r4,xDEBUG_SCR2(r1) ;\
++ lwz r4,(K_ASM_TICK_STAMPS + 4*cntr##_ind_)(r1) ;\
++ sub r3,r3,r4 ;\
++ lwz r4,(K_ASM_BUMP_CNTRS+4*_bump_ind_)(r1) ;\
++ add r4,r4,r3 ;\
++ stw r4,(K_ASM_BUMP_CNTRS+4*_bump_ind_)(r1) ;\
++ lwz r3,xDEBUG_SCR1(r1) ;\
++ mftb r4 ;\
++ stw r4,(K_ASM_TICK_STAMPS + 4*cntr##_ind_)(r1) ;\
++ lwz r4,xDEBUG_SCR2(r1) ;\
++ define([_bump_ind_],eval(_bump_ind_+1))
++
++#endif /* PERFORMANCE_INFO */
++
++#ifndef PERFORMANCE_INFO_LIGHT
++#define BUMP(s) __BUMP(s)
++#define ZERO_TICK_CNT(c) __ZERO_TICK_CNT(c)
++#define GET_TICK_CNT(c, name) __GET_TICK_CNT(c,name)
++#else
++#define BUMP(s)
++#define ZERO_TICK_CNT(c)
++#define GET_TICK_CNT(c, name)
++#endif
++
++#ifndef __BUMP
++#define __BUMP(str)
++#define __ZERO_TICK_CNT(cntr)
++#define __GET_TICK_CNT(cntr, name)
++#endif
++
++#ifdef PERF_MONITOR
++MACRO(PERF_MONITOR_GET, [
++ stw r5,xDEBUG_SCR1(r1)
++ mfspr r5,S_PMC2
++ stw r4,xDEBUG_SCR2(r1)
++ mfmsr r4
++ ori r4,r4,MSR_PE
++ mtmsr r4
++ stw r5,xDEBUG0(r1)
++ li r5,0
++ mtspr S_PMC2,r5
++ lwz r4,xDEBUG_SCR2(r1)
++ lwz r5,xDEBUG_SCR1(r1)
++])
++MACRO(PERF_MONITOR_SETUP, [scr], [
++ LOADI _scr,BIT(2) | BIT(3) | BIT(31) // count in SV-mode if PM is zero.
++ mtspr S_MMCR0,_scr
++ li _scr,0
++ mtspr S_MMCR1,_scr
++ li _scr,0
++ mtspr S_PMC2,_scr
++])
++#else /* PERF_MONITOR */
++#define PERF_MONITOR_GET
++MACRO(PERF_MONITOR_SETUP, [scr], [])
++#endif
++
++
++/************************************************************************/
++/* debug */
++/************************************************************************/
++
++MACRO(STOP_EMULATION, [val], [
++ stw r3,xDEBUG_SCR1(r1)
++ li r3,_val
++ stw r3,xKERNEL_DBG_STOP(r1)
++ li r3,1
++ stw r3,xINTERRUPT(r1)
++ lwz r3,xDEBUG_SCR1(r1)
++])
++
++MACRO(DEBUG_TRACE, [num, dummy], [
++ stw r3,xDEBUG_SCR1(r1)
++ lwz r3,xDEBUG_TRACE(r1)
++ addi r3,r3,1
++ stw r3,xDEBUG_TRACE(r1)
++ stw r3,(xDEBUG0+4*_num)(r1)
++ lwz r3,xDEBUG_SCR1(r1)
++])
++
++MACRO(TRACE_VAL, [val, dummy], [
++#if DBG_TRACE
++ stw r30,xDEBUG_SCR1(r1)
++ stw r29,xDEBUG_SCR2(r1)
++ lwz r30,xDEBUG_TRACE(r1)
++ rlwinm r30,r30,0,24,31 // 256 entries
++ rlwinm r30,r30,2,22,29
++ addi r30,r30,xDBG_TRACE_SPACE
++ lis r29,HA(_val)
++ addi r29,r29,LO(_val)
++ stwx r29,r30,r1
++ lwz r30,xDEBUG_TRACE(r1)
++ addi r30,r30,1
++ rlwinm r30,r30,0,24,31 // 256 entries
++ stw r30,xDEBUG_TRACE(r1)
++ lwz r29,xDEBUG_SCR2(r1)
++ lwz r30,xDEBUG_SCR1(r1)
++#endif
++])
++#define TRACE( a,b ) TRACE_VAL a,b
++
++
++#endif /* _H_ASMDBG */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/asmdefs.h
+@@ -0,0 +1,397 @@
++/* -*- asm -*-
++ *
++ * Creation Date: <2001/02/03 19:38:07 samuel>
++ * Time-stamp: <2004/02/22 15:36:20 samuel>
++ *
++ * <asmdefs.h>
++ *
++ * Common assembly definitions
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_ASMDEFS
++#define _H_ASMDEFS
++
++#include "asm.m4"
++
++#ifndef __ASSEMBLY__
++#error This file is only to be included from assembler code!
++#endif
++
++
++/************************************************************************/
++/* High/low halfword compatibility macros */
++/************************************************************************/
++
++#ifdef __linux__
++#define ha16( v ) (v)##@ha
++#define hi16( v ) (v)##@h
++#define lo16( v ) (v)##@l
++#endif
++#define HA(v) ha16(v)
++#define HI(v) hi16(v)
++#define LO(v) lo16(v)
++
++/************************************************************************/
++/* Stack Frame */
++/************************************************************************/
++
++#ifdef __linux__
++#define STACK_LR_OFFSET 4
++#endif
++
++#ifdef __darwin__
++#define STACK_LR_OFFSET 8 /* 4 is the CR offset */
++#endif
++
++/************************************************************************/
++/* Register name prefix */
++/************************************************************************/
++
++#ifdef __linux__
++define([rPREFIX], [])
++define([fPREFIX], [])
++define([srPREFIX], [])
++#else
++define([rPREFIX], [r])
++define([fPREFIX], [f])
++define([srPREFIX], [sr])
++/* frN -> fN */
++mFORLOOP([i],0,31,[define(fr[]i,f[]i)])
++#endif
++
++/************************************************************************/
++/* Macros and definitions */
++/************************************************************************/
++
++#ifdef __darwin__
++#define balign_4 .align 2,0
++#define balign_8 .align 3,0
++#define balign_16 .align 4,0
++#define balign_32 .align 5,0
++#endif
++
++#ifdef __linux__
++#define balign_4 .balign 4,0
++#define balign_8 .balign 8,0
++#define balign_16 .balign 16,0
++#define balign_32 .balign 32,0
++#endif
++
++MACRO(LOADVAR, [dreg, variable], [
++ lis _dreg,HA(_variable)
++ lwz _dreg,LO(_variable)(_dreg)
++])
++
++MACRO(LOADI, [dreg, addr], [
++ lis _dreg,HA(_addr)
++ addi _dreg,_dreg,LO(_addr)
++])
++
++MACRO(LOAD_GPR_RANGE, [start, endx, offs, base], [
++ mFORLOOP([i],0,31,[ .if (i >= _start) & (i <= _endx)
++ lwz rPREFIX[]i,_offs+i[]*4(_base)
++ .endif
++])])
++
++MACRO(STORE_GPR_RANGE, [start, endx, offs, base], [
++ mFORLOOP([i],0,31,[ .if (i >= _start) & (i <= _endx)
++ stw rPREFIX[]i,_offs+i[]*4(_base)
++ .endif
++])])
++
++MACRO(LOAD_FPR_RANGE, [start, endx, offs, base], [
++ mFORLOOP([i],0,31,[ .if (i >= _start) & (i <= _endx)
++ lfd fPREFIX[]i,_offs+i[]*8(_base)
++ .endif
++])])
++
++MACRO(STORE_FPR_RANGE, [start, endx, offs, base], [
++ mFORLOOP([i],0,31,[ .if (i >= _start) & (i <= _endx)
++ stfd fPREFIX[]i,_offs+i[]*8(_base)
++ .endif
++])])
++
++/************************************************************************/
++/* FPU load/save macros */
++/************************************************************************/
++
++ // The FPU macros are used both in the kernel and in
++ // mainloop_asm.h.
++
++MACRO(xFPR_LOAD_RANGE, [from, to, mbase], [
++ LOAD_FPR_RANGE _from,_to,xFPR_BASE,_mbase
++])
++MACRO(xFPR_SAVE_RANGE, [from, to, mbase], [
++ STORE_FPR_RANGE _from,_to,xFPR_BASE,_mbase
++])
++ // The low half of the fpu is fr0-fr12. I.e. the FPU registers
++ // that might be overwritten when a function call is taken
++ // (fr13 and fpscr are treated specially).
++
++MACRO(xLOAD_LOW_FPU, [mbase], [
++ xFPR_LOAD_RANGE 0,12,_mbase
++])
++
++MACRO(xLOAD_TOPHALF_FPU, [mbase], [
++ xFPR_LOAD_RANGE 14,31,_mbase
++])
++MACRO(xLOAD_FULL_FPU, [mbase], [
++ xLOAD_LOW_FPU _mbase
++ xLOAD_TOPHALF_FPU _mbase
++])
++
++MACRO(xSAVE_LOW_FPU, [mbase], [
++ xFPR_SAVE_RANGE 0,12,_mbase
++])
++MACRO(xSAVE_TOPHALF_FPU, [mbase], [
++ xFPR_SAVE_RANGE 14,31,_mbase
++])
++MACRO(xSAVE_FULL_FPU, [mbase], [
++ xSAVE_LOW_FPU _mbase
++ xSAVE_TOPHALF_FPU _mbase
++])
++
++
++/************************************************************************/
++/* GPR load/save macros */
++/************************************************************************/
++
++MACRO(xGPR_SAVE_RANGE, [from, to, mbase], [
++ STORE_GPR_RANGE _from, _to, xGPR0, _mbase
++])
++
++MACRO(xGPR_LOAD_RANGE, [from, to, mbase], [
++ LOAD_GPR_RANGE _from, _to, xGPR0, _mbase
++])
++
++
++/************************************************************************/
++/* AltiVec */
++/************************************************************************/
++
++#ifdef __linux__
++
++define(vPREFIX,[])
++
++#ifndef HAVE_ALTIVEC
++#define VEC_OPCODE( op1,op2,A,B,C ) \
++ .long (((op1) << (32-6)) | (op2) | ((A) << (32-11)) | ((B) << (32-16)) | ((C) << (32-21))) ;
++
++#define __stvx( vS,rA,rB ) VEC_OPCODE( 31,0x1ce,vS,rA,rB )
++#define __lvx( vD,rA,rB ) VEC_OPCODE( 31,0xce, vD,rA,rB )
++#define __mfvscr( vD ) VEC_OPCODE( 4,1540,vD,0,0 )
++#define __mtvscr( vB ) VEC_OPCODE( 4,1604,0,0,vB )
++#define __stvewx( vS,rA,rB ) VEC_OPCODE( 31,(199<<1), vS,rA,rB )
++
++mFORLOOP([i],0,31,[define(v[]i,[]i)])
++MACRO(stvx, [vS,rA,rB], [ __stvx( _vS,_rA,_rB ) ; ])
++MACRO(lvx, [vD,rA,rB], [ __lvx( _vD,_rA,_rB ) ; ])
++MACRO(mfvscr, [vD], [ __mfvscr( _vD ) ; ])
++MACRO(mtvscr, [vB], [ __mtvscr( _vB ) ; ])
++MACRO(stvewx, [vS,rA,rB], [ __stvewx( _vS,_rA,_rB ) ; ])
++#endif
++#else /* __linux__ */
++
++define(vPREFIX,[v])
++
++#endif /* __linux__ */
++
++
++// NOTE: Writing to VSCR won't cause exceptions (this
++// is different compared to FPSCR).
++
++MACRO(xVEC_SAVE, [mbase, scr], [
++ addi _scr,_mbase,xVEC_BASE
++ mFORLOOP([i],0,31,[
++ stvx vPREFIX[]i,0,_scr
++ addi _scr,_scr,16
++ ])
++ addi _scr,_mbase,xVSCR-12
++ mfvscr v0
++ stvx v0,0,_scr
++ addi _scr,_mbase,xVEC0
++ lvx v0,0,_scr
++ mfspr _scr,S_VRSAVE
++ stw _scr,xVRSAVE(_mbase)
++])
++
++MACRO(xVEC_LOAD, [mbase, scr], [
++ addi _scr,_mbase,xVSCR-12
++ lvx v0,0,_scr
++ mtvscr v0
++ addi _scr,_mbase,xVEC_BASE
++ mFORLOOP([i],0,31,[
++ lvx vPREFIX[]i,0,_scr
++ addi _scr,_scr,16
++ ])
++ lwz _scr,xVRSAVE(_mbase)
++ mtspr S_VRSAVE,_scr
++])
++
++/************************************************************************/
++/* Instructions */
++/************************************************************************/
++
++#ifdef __darwin__
++MACRO(mtsprg0, [reg], [mtspr SPRG0,_reg] )
++MACRO(mtsprg1, [reg], [mtspr SPRG1,_reg] )
++MACRO(mtsprg2, [reg], [mtspr SPRG2,_reg] )
++MACRO(mtsprg3, [reg], [mtspr SPRG3,_reg] )
++MACRO(mfsprg0, [reg], [mfspr _reg,SPRG0] )
++MACRO(mfsprg1, [reg], [mfspr _reg,SPRG1] )
++MACRO(mfsprg2, [reg], [mfspr _reg,SPRG2] )
++MACRO(mfsprg3, [reg], [mfspr _reg,SPRG3] )
++#endif
++
++/************************************************************************/
++/* Register names */
++/************************************************************************/
++
++#define cr0_lt 0
++#define cr0_gt 1
++#define cr0_eq 2
++#define cr0_so 3
++
++#define cr1_lt 4
++#define cr1_gt 5
++#define cr1_eq 6
++#define cr1_so 7
++
++#define cr2_lt 8
++#define cr2_gt 9
++#define cr2_eq 10
++#define cr2_so 11
++
++#define cr3_lt 12
++#define cr3_gt 13
++#define cr3_eq 14
++#define cr3_so 15
++
++#ifdef __darwin__
++/* IMPORTANT: we may *not* define crN on darwin; If we do, the
++ * assembler will generate bogus code. For instance,
++ * bne cr1,1f is not equivalent to bne 1,1f but to
++ * bne 4,1f...
++ */
++#define lt 0 /* Less than */
++#define gt 1 /* Greater than */
++#define eq 2 /* Equal */
++#define so 3 /* Summary Overflow */
++#define un 3 /* Unordered (after floating point) */
++#endif
++
++/* FPU register names (to be used as macro arguments) */
++#define FR0 0
++#define FR1 1
++#define FR2 2
++#define FR3 3
++#define FR4 4
++#define FR5 5
++#define FR6 6
++#define FR7 7
++#define FR8 8
++#define FR9 9
++#define FR10 10
++#define FR11 11
++#define FR12 12
++#define FR13 13
++#define FR14 14
++#define FR15 15
++#define FR16 16
++#define FR17 17
++#define FR18 18
++#define FR19 19
++#define FR20 20
++#define FR21 21
++#define FR22 22
++#define FR23 23
++#define FR24 24
++#define FR25 25
++#define FR26 26
++#define FR27 27
++#define FR28 28
++#define FR29 29
++#define FR30 30
++#define FR31 31
++
++/* GPR register names (to be used as macro arguments) */
++#define R0 0
++#define R1 1
++#define R2 2
++#define R3 3
++#define R4 4
++#define R5 5
++#define R6 6
++#define R7 7
++#define R8 8
++#define R9 9
++#define R10 10
++#define R11 11
++#define R12 12
++#define R13 13
++#define R14 14
++#define R15 15
++#define R16 16
++#define R17 17
++#define R18 18
++#define R19 19
++#define R20 20
++#define R21 21
++#define R22 22
++#define R23 23
++#define R24 24
++#define R25 25
++#define R26 26
++#define R27 27
++#define R28 28
++#define R29 29
++#define R30 30
++#define R31 31
++
++#ifndef __darwin__
++
++/* GPR register names, rN -> N, frN -> N, vN -> N */
++mFORLOOP([i],0,31,[define(r[]i,[]i)])
++mFORLOOP([i],0,31,[define(fr[]i,[]i)])
++mFORLOOP([i],0,31,[define(v[]i,[]i)])
++
++#endif /* __darwin__ */
++
++
++/************************************************************************/
++/* useful macros */
++/************************************************************************/
++
++MACRO(ori_, [reg1, reg2, value], [
++ .if (_value & 0xffff)
++ ori _reg1, _reg2, (_value) & 0xffff
++ .endif
++ .if (_value & ~0xffff)
++ oris _reg1, _reg2, (_value) >> 16
++ .endif
++])
++
++/************************************************************************/
++/* MISC */
++/************************************************************************/
++
++#ifdef __linux__
++#define GLOBL( name ) .globl name ; name
++#define EXTERN( name ) name
++#else
++/* an underscore is needed on Darwin */
++#define GLOBL( name ) .globl _##name ; name: ; _##name
++#define EXTERN( name ) _##name
++#endif
++
++#define BIT(n) (1<<(31-(n)))
++
++#endif /* _H_ASMDEFS */
++
+--- /dev/null
++++ b/drivers/macintosh/mol/include/asmfuncs.h
+@@ -0,0 +1,80 @@
++/*
++ * Creation Date: <1999/09/26 01:02:58 samuel>
++ * Time-stamp: <2003/07/27 19:20:24 samuel>
++ *
++ * <asmfuncs.h>
++ *
++ * Exports from <base.S>
++ *
++ * Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_ASMFUNCS
++#define _H_ASMFUNCS
++
++#include "kernel_vars.h"
++#include "tlbie.h"
++
++
++/* globl variable defined in actions.c */
++extern int reloc_virt_offs;
++#define reloc_ptr( v ) ((ulong)(v) + (ulong)reloc_virt_offs)
++
++
++/* The code in base.o (all low-level assembly) are copied to a physically
++ * continuous memory area. The following inline functions maps function calls
++ * to the relocated area.
++ */
++
++static inline void msr_altered( kernel_vars_t *kv ) {
++ typedef void ftype( kernel_vars_t * );
++ extern ftype r__msr_altered;
++ (*(ftype*)reloc_ptr( r__msr_altered ))( kv );
++}
++
++static inline void invalidate_splitmode_sr( kernel_vars_t *kv ) {
++ typedef void ftype( kernel_vars_t *);
++ extern ftype r__invalidate_splitmode_sr;
++ (*(ftype*)reloc_ptr( r__invalidate_splitmode_sr ))( kv );
++}
++
++static inline void initialize_spr_table( kernel_vars_t *kv ) {
++ typedef void ftype( kernel_vars_t *);
++ extern ftype r__initialize_spr_table;
++ (*(ftype*)reloc_ptr( r__initialize_spr_table ))( kv );
++}
++
++
++/************************************************************************/
++/* misc inlines */
++/************************************************************************/
++
++#define _sync() ({ asm volatile("sync ;\n isync" : : ); })
++
++static inline ulong _get_sdr1( void ) {
++ ulong sdr1;
++ asm volatile("mfsdr1 %0" : "=r" (sdr1) : );
++ return sdr1;
++}
++static inline void _set_sdr1( ulong sdr1 ) {
++ asm volatile("mtsdr1 %0" : : "r" (sdr1) );
++}
++
++static inline int cpu_is_601( void ) {
++ ulong pvr;
++ asm volatile("mfpvr %0" : "=r" (pvr) : );
++ return (pvr>>16)==1;
++}
++
++static inline int cpu_is_603( void ) {
++ ulong pvr;
++ asm volatile("mfpvr %0" : "=r" (pvr) : );
++ pvr = pvr >> 16;
++ return pvr==3 || pvr==6 || pvr==7; /* 603, 603e, 603ev */
++}
++#endif /* _H_ASMFUNCS */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/atomic.h
+@@ -0,0 +1,26 @@
++/*
++ * Creation Date: <2004/01/25 17:00:12 samuel>
++ * Time-stamp: <2004/01/29 22:32:30 samuel>
++ *
++ * <atomic.h>
++ *
++ *
++ *
++ * Copyright (C) 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_ATOMIC
++#define _H_ATOMIC
++
++#define mol_atomic_t atomic_t
++#define atomic_inc_return_mol(x) atomic_inc_return(x)
++#define atomic_inc_mol(x) atomic_inc(x)
++#define atomic_dec_mol(x) atomic_dec(x)
++#define atomic_read_mol(x) atomic_read(x)
++
++#endif /* _H_ATOMIC */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/config.h
+@@ -0,0 +1,90 @@
++/* config.h. Generated from config.h.in by configure. */
++/* config.h.in. Generated from configure.in by autoheader. */
++
++/* Define if _syscall macro is borken */
++#define BROKEN_SYSCALL 1
++
++/* Defined if ALSA support is available */
++#define HAVE_ALSA 1
++
++/* Define if clearenv is available */
++#define HAVE_CLEARENV 1
++
++/* Define to 1 if you have the <getopt.h> header file. */
++#define HAVE_GETOPT_H 1
++
++/* Define to 1 if you have the <inttypes.h> header file. */
++#define HAVE_INTTYPES_H 1
++
++/* Define to 1 if you have the <memory.h> header file. */
++#define HAVE_MEMORY_H 1
++
++/* Define to 1 if you have the <obstack.h> header file. */
++#define HAVE_OBSTACK_H 1
++
++/* Define if libpng support is to be included */
++#define HAVE_PNG 1
++
++/* Define to 1 if you have the <poll.h> header file. */
++#define HAVE_POLL_H 1
++
++/* Define to 1 if you have the <stdint.h> header file. */
++#define HAVE_STDINT_H 1
++
++/* Define to 1 if you have the <stdlib.h> header file. */
++#define HAVE_STDLIB_H 1
++
++/* Define to 1 if you have the <strings.h> header file. */
++#define HAVE_STRINGS_H 1
++
++/* Define to 1 if you have the <string.h> header file. */
++#define HAVE_STRING_H 1
++
++/* Define to 1 if you have the <sys/stat.h> header file. */
++#define HAVE_SYS_STAT_H 1
++
++/* Define to 1 if you have the <sys/types.h> header file. */
++#define HAVE_SYS_TYPES_H 1
++
++/* Define to 1 if you have the <unistd.h> header file. */
++#define HAVE_UNISTD_H 1
++
++/* Define if X11 is available */
++#define HAVE_X11 1
++
++/* Define if XDGA support is to be included */
++#define HAVE_XDGA 1
++
++/* Defined for the MOL user binary */
++#define MOL_PROPER 1
++
++/* Define if off_t is known to be 64-bit */
++#define OFF_T_IS_64 1
++
++/* Define to the address where bug reports for this package should be sent. */
++#define PACKAGE_BUGREPORT ""
++
++/* Define to the full name of this package. */
++#define PACKAGE_NAME ""
++
++/* Define to the full name and version of this package. */
++#define PACKAGE_STRING ""
++
++/* Define to the one symbol short name of this package. */
++#define PACKAGE_TARNAME ""
++
++/* Define to the version of this package. */
++#define PACKAGE_VERSION ""
++
++/* Define to 1 if you have the ANSI C header files. */
++#define STDC_HEADERS 1
++
++/* Define if uc_context has gregs field */
++/* #undef UCCONTEXT_HAS_GREGS */
++
++/* Define to 1 if the X Window System is missing or not being used. */
++/* #undef X_DISPLAY_MISSING */
++
++/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a
++ `char[]'. */
++#define YYTEXT_POINTER 1
+--- /dev/null
++++ b/drivers/macintosh/mol/include/constants.h
+@@ -0,0 +1,36 @@
++/*
++ * Creation Date: <2001/02/11 18:19:42 samuel>
++ * Time-stamp: <2003/07/27 18:58:35 samuel>
++ *
++ * <constants.h>
++ *
++ * Constants used both in the kernel module and in the emulator
++ *
++ * Copyright (C) 2001, 2002, 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_CONSTANTS
++#define _H_CONSTANTS
++
++/* flags for _breakpoint_flags() */
++#define BREAK_RFI 1 /* break at next rfi */
++#define BREAK_SINGLE_STEP 2 /* singlestep */
++#define BREAK_EA_PAGE 4 /* break when mdbg_break_ea is mapped */
++#define BREAK_USER 8 /* break when MSR_PR is set */
++#define BREAK_SINGLE_STEP_CONT 16 /* single step (but don't continue running) */
++
++/* action for _tune_spr() */
++#define kTuneSPR_Illegal 1 /* SPR is illegal */
++#define kTuneSPR_Privileged 2 /* SPR is privileged */
++#define kTuneSPR_Unprivileged 3 /* SPR is unprivileged */
++#define kTuneSPR_ReadWrite 4 /* SPR is read-write */
++#define kTuneSPR_ReadOnly 5 /* SPR is read-only */
++
++#endif /* _H_CONSTANTS */
++
++
+--- /dev/null
++++ b/drivers/macintosh/mol/include/context.h
+@@ -0,0 +1,62 @@
++/*
++ * Creation Date: <2004/01/25 17:48:51 samuel>
++ * Time-stamp: <2004/01/25 22:12:13 samuel>
++ *
++ * <context.h>
++ *
++ *
++ *
++ * Copyright (C) 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_CONTEXT
++#define _H_CONTEXT
++
++#define CTX_MASK 0xfffff /* VSID_MASK >> 4 */
++
++/*
++ * Three types of contexts are used:
++ *
++ * VSIDs (24 bit, loaded into the CPU register)
++ * mol_contexts (number between FIRST_ and LAST_MOL_CONTEXT)
++ * arch_contexts (context number used by the kernel)
++ *
++ * The relation between them is
++ *
++ * mol_context = (os_context << 4) + segment#
++ * VSID_context = MUNGE_CONTEXT(mol_context>>4) + ESID_ADD * (mol_context & 0xf)
++ */
++
++/*
++ * The new MM implementation (about 2.4.14 (?)) uses the following algorithm
++ *
++ * VSID = (((context * 897) << 4) + ((va>>28) * 0x111)) & 0xffffff
++ *
++ * Only context 0..32767 are used. We can use context 32768..0xfffff.
++ * The old MM implementation used
++ *
++ * VSID = (((context * 897) << 4) + (va>>28)) & 0xffffff
++ */
++
++#define MUNGE_ADD_NEXT 897
++#define MUNGE_MUL_INVERSE 2899073 /* Multiplicative inverse of 897 (modulo VSID_MASK+1) */
++#define MUNGE_ESID_ADD 0x111
++#define MUNGE_CONTEXT(c) (((c) * (MUNGE_ADD_NEXT * 16)) & (CTX_MASK <<4))
++
++/* mol_contexts == linux_context * 16 + esid */
++#define PER_SESSION_CONTEXTS 0x10000 /* more than we will need (10^6) */
++#define FIRST_MOL_CONTEXT(sess) ((CTX_MASK - PER_SESSION_CONTEXTS*((sess)+1)) << 4)
++#define LAST_MOL_CONTEXT(sess) (((CTX_MASK - PER_SESSION_CONTEXTS*(sess)) << 4) | 0xf)
++
++
++#if FIRST_MOL_CONTEXT(MAX_NUM_SESSIONS-1) < (32768 << 4)
++#error "Too many MOL contexts..."
++#endif
++
++
++#endif /* _H_CONTEXT */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/dbg.h
+@@ -0,0 +1,31 @@
++/*
++ * Creation Date: <2004/04/10 22:14:43 samuel>
++ * Time-stamp: <2004/04/10 22:26:24 samuel>
++ *
++ * <dbg.h>
++ *
++ *
++ *
++ * Copyright (C) 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ */
++
++#ifndef _H_DBG
++#define _H_DBG
++
++#ifdef CONFIG_MOL_HOSTED
++
++#ifdef printk
++#undef printk
++#endif
++
++#define printk printm
++extern int printm( const char *fmt, ... );
++extern void debugger( int n );
++
++#endif /* CONFIG_MOL_HOSTED */
++
++#endif /* _H_DBG */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/debugger.h
+@@ -0,0 +1,96 @@
++/*
++ * Creation Date: <1999/02/22 22:46:22 samuel>
++ * Time-stamp: <2003/07/27 14:42:05 samuel>
++ *
++ * <debugger.h>
++ *
++ * World interface of the debugger
++ *
++ * Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_DEBUGGER
++#define _H_DEBUGGER
++
++#ifdef CONFIG_DEBUGGER
++extern void debugger_init( void );
++extern void debugger_cleanup( void );
++extern int debugger_enabled( void );
++extern int debugger_attached( void );
++#else
++static inline int debugger_enabled( void ) { return 0; }
++static inline int debugger_attached( void ) { return 0; }
++static inline void debugger_init( void ) {}
++static inline void debugger_cleanup( void ) {}
++#endif
++
++/*******************************************/
++/* montior.c / nub.c */
++/*******************************************/
++
++extern void refresh_debugger_window( void );
++extern void refresh_debugger( void );
++extern void redraw_inst_win( void );
++extern int debugger_in_68k_mode( void );
++extern void debugger_nub_poll( void );
++
++/* debug actions */
++enum{
++ kDbgNOP=0, kDbgGo, kDbgGoRFI, kDbgStep, kDbgExit, kDbgStop, kDbgGoUser
++};
++
++
++/*******************************************/
++/* cmdline.c / nub.c */
++/*******************************************/
++
++/* put functions used exclusively in debugger mode in the dbg section */
++#ifdef __linux__
++#ifdef CONFIG_DEBUGGER
++#define __dbg __attribute__ ((__section__ (".moldbg")))
++#define __dcmd __dbg
++#else
++#define __dbg __attribute__ ((__section__ (".moldbg")))
++#define __dcmd inline __attribute__ ((__section__ (".moldbg")))
++#endif
++#else
++#define __dbg
++#define __dcmd inline
++#endif
++
++typedef struct {
++ const char *name;
++ int (*func)( int argc, char **argv );
++ const char *help;
++} dbg_cmd_t;
++
++typedef int (*dbg_cmd_fp)( int argc, char **argv );
++
++#ifdef CONFIG_DEBUGGER
++extern void add_cmd( const char *cmdname, const char *help,
++ int dummy, dbg_cmd_fp func );
++extern void add_dbg_cmds( dbg_cmd_t *table, int tablesize );
++#else
++#define add_cmd( a,b,c,d ) do {} while(0)
++static inline void add_dbg_cmds( dbg_cmd_t *table, int tablesize ) {}
++#endif
++
++/* for debugging */
++#define HARD_BREAKPOINT \
++ ({ printm("Hardcoded breakpoint in '"__FILE__"'\n"); stop_emulation(); })
++
++
++/*******************************************/
++/* mmu_cmds.c */
++/*******************************************/
++
++extern int __dbg get_inst_context( void );
++extern int __dbg get_data_context( void );
++extern int __dbg ea_to_lvptr( ulong ea, int context, char **lvptr, int data_access );
++
++#endif /* _H_DEBUGGER */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/emu.h
+@@ -0,0 +1,29 @@
++/*
++ * Creation Date: <2003/01/26 00:45:55 samuel>
++ * Time-stamp: <2003/01/27 01:26:25 samuel>
++ *
++ * <emu.h>
++ *
++ * Emulation of some assembly functions
++ *
++ * Copyright (C) 1998, 2000, 2001, 2002, 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_EMU
++#define _H_EMU
++
++#include "mmu.h"
++
++extern int do_mtsdr1( kernel_vars_t *kv, ulong value );
++extern int do_mtbat( kernel_vars_t *kv, int sprnum, ulong value, int force );
++
++extern int alloc_emuaccel_slot( kernel_vars_t *kv, int inst_flags, int param, int inst_addr );
++extern int mapin_emuaccel_page( kernel_vars_t *kv, int mphys );
++
++
++#endif /* _H_EMU */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/emuaccel_sh.h
+@@ -0,0 +1,41 @@
++/*
++ * Creation Date: <2003/01/25 14:57:36 samuel>
++ * Time-stamp: <2003/01/27 23:11:29 samuel>
++ *
++ * <emuaccel.h>
++ *
++ * Acceleration of the emulation of certain privileged instructions
++ *
++ * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_EMUACCEL
++#define _H_EMUACCEL
++
++/* OSI_AllocInstAccelSlot( EMULATE_xxx + source_reg, NIP ) */
++
++#define EMUACCEL_MAPIN_PAGE 0
++#define EMUACCEL_MTSRR0 (1 << 5)
++#define EMUACCEL_MTSRR1 (2 << 5)
++#define EMUACCEL_MTSPRG0 (3 << 5)
++#define EMUACCEL_MTSPRG1 (4 << 5)
++#define EMUACCEL_MTSPRG2 (5 << 5)
++#define EMUACCEL_MTSPRG3 (6 << 5)
++#define EMUACCEL_MTMSR (7 << 5)
++#define EMUACCEL_RFI (8 << 5)
++#define EMUACCEL_UPDATE_DEC (9 << 5) /* update xDEC */
++#define EMUACCEL_MTSR ((10 << 5) | EMUACCEL_HAS_PARAM)
++#define EMUACCEL_NOP (11 << 5)
++#define EMUACCEL_MTHID0 (12 << 5)
++
++#define EMUACCEL_HAS_PARAM (1 << 10)
++#define EMUACCEL_INST_MASK 0xffe0
++#define EMUACCEL_DESTREG_MASK 0x1f
++
++
++#endif /* _H_EMUACCEL */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/extralib.h
+@@ -0,0 +1,70 @@
++/*
++ * Creation Date: <1997/07/02 19:52:18 samuel>
++ * Time-stamp: <2004/04/03 18:29:26 samuel>
++ *
++ * <extralib.h>
++ *
++ *
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_EXTRALIB
++#define _H_EXTRALIB
++
++#define CLEAR( x ) memset( &x, 0, sizeof(x))
++
++/* in extralib.h */
++extern int ilog2( int val );
++
++extern char *num_to_string( ulong num );
++extern int is_number_str( char *str );
++extern ulong string_to_ulong( char * );
++extern ulong hexbin( int num );
++
++extern char *strncpy0( char *dest, const char *str, size_t n );
++extern char *strncat0( char *dest, const char *str, size_t n );
++extern char *strncat3( char *dest, const char *s1, const char *s2, size_t n );
++extern char *strnpad( char *dest, const char *s1, size_t n );
++
++struct iovec;
++extern int memcpy_tovec( const struct iovec *vec, size_t nvec, const char *src, unsigned int len );
++extern int memcpy_fromvec( char *dst, const struct iovec *vec, size_t nvec, unsigned int len );
++extern int iovec_getbyte( int offs, const struct iovec *vec, size_t nvec );
++extern int iovec_skip( int skip, struct iovec *vec, size_t nvec );
++
++extern void open_logfile( const char *filename );
++extern void close_logfile( void );
++
++#define __printf_format __attribute__ ((format (printf, 1, 2)))
++extern int printm( const char *fmt,...) __printf_format;
++extern int aprint( const char *fmt,... ) __printf_format;
++extern void perrorm(const char *fmt,... ) __printf_format;
++extern void fatal(const char *fmt,... ) __printf_format __attribute__((noreturn));
++#define fatal_err(fmt, args...) \
++ do { printm("Fatal error: "); perrorm(fmt, ## args ); exit(1); } while(0)
++extern void fail_nil( void *p );
++extern void set_print_hook( int (*hook)(char *buf) );
++extern void set_print_guard( void (*hook)(void) );
++
++extern int script_exec( char *name, char *arg1, char *arg2 );
++
++/* in unicode.c */
++extern int asc2uni( unsigned char *ustr, const char *astr, int maxlen );
++extern int uni2asc( char *astr, const unsigned char *ustr, int ustrlen, int maxlen );
++
++/* in backtrace.c */
++extern void print_btrace_sym( ulong addr, const char *sym_filename );
++
++/* color output support */
++#define C_GREEN "\33[1;32m"
++#define C_YELLOW "\33[1;33m"
++#define C_NORMAL "\33[0;39m"
++#define C_RED "\33[1;31m"
++
++#endif /* _H_EXTRALIB */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/hash.h
+@@ -0,0 +1,36 @@
++/*
++ * Creation Date: <2004/02/14 11:45:23 samuel>
++ * Time-stamp: <2004/02/21 21:24:46 samuel>
++ *
++ * <hash.h>
++ *
++ *
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_HASH
++#define _H_HASH
++
++typedef struct {
++ ulong sdr1; /* sdr1 used by MOL */
++
++ ulong pteg_mask; /* pteg offset mask (e.g. 0xffc0) */
++ ulong pte_mask; /* pte offset mask (e.g. 0xfff8) */
++
++ ulong *base; /* kernel mapping of hash */
++ ulong physbase; /* physical address of hash */
++} hash_info_t;
++
++extern hash_info_t ptehash;
++
++extern int init_hash( void );
++extern void cleanup_hash( void );
++
++
++#endif /* _H_HASH */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/kernel_vars.h
+@@ -0,0 +1,225 @@
++/*
++ * Creation Date: <97/07/14 15:53:06 samuel>
++ * Time-stamp: <2004/02/21 21:37:37 samuel>
++ *
++ * <kernel_vars.h>
++ *
++ * Variables used by the kernel
++ *
++ * Copyright (C) 1997-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef H_KERNEL_VARS
++#define H_KERNEL_VARS
++
++#define MAX_NUM_SESSIONS 8
++
++#include "mac_registers.h"
++
++#ifdef PERFORMANCE_INFO
++#define NUM_ASM_BUMP_CNTRS 64
++#define NUM_ASM_TICK_CNTRS 6
++#endif
++
++#ifndef __ASSEMBLY__
++#include "mmu_mappings.h"
++#include "skiplist.h"
++
++#ifndef DUMPVARS
++#include "alloc.h"
++#include "locks.h"
++#include "atomic.h"
++#else
++typedef int mol_mutex_t;
++typedef int mol_atomic_t;
++#endif
++
++typedef struct {
++ ulong word[2]; /* upper, lower */
++} ppc_bat_t;
++
++typedef struct mac_bat {
++ int valid; /* record in use */
++ ulong base;
++ ulong mbase;
++ ulong size;
++
++ ulong wimg:4; /* WIMG-bits */
++ ulong vs:1; /* valid supervisor mode */
++ ulong vp:1; /* valid user mode */
++ ulong ks:1; /* key superuser */
++ ulong ku:1; /* key user */
++ ulong pp:2; /* page protection */
++
++ /* possibly we should track inserted PTEs here... */
++} mac_bat_t;
++
++typedef struct {
++ struct vsid_ent *vsid[16]; /* entries might be NULL */
++ struct vsid_ent *unmapped_vsid[16]; /* entries might be NULL, linux_vsid_sv used */
++
++ ulong emulator_sr[16]; /* segment registers used by the userspace process */
++
++ ulong user_sr[16]; /* segment registers for MSR=user */
++ ulong sv_sr[16]; /* segment registers for MSR=sv */
++ ulong unmapped_sr[16]; /* segment registers for unmapped mode */
++ ulong split_sr[16]; /* segment registers used in split mode */
++
++ ulong cur_sr_base; /* (physical) pointer to user_sr or sv_sr */
++ ulong sr_inst; /* (physical) pointer to us user_sr or sv_sr */
++ ulong sr_data; /* (physical) pointer to us user_sr or sv_sr */
++
++ ulong illegal_sr; /* used for the lazy segment register impl. */
++
++ ppc_bat_t split_dbat0; /* loaded to DBAT0 (used in splitmode) */
++ ppc_bat_t transl_dbat0; /* DBAT0 mapping the framebuffer */
++
++ ulong emulator_context; /* context of emulator (equals VSID >> 4) in Linux */
++
++ ulong userspace_ram_base; /* userspace RAM base */
++ size_t ram_size;
++
++ ulong bat_hack_count; /* HACK to speed up MacOS 9.1 */
++ mac_bat_t bats[8]; /* 4 IBAT + 4 DBAT */
++
++#ifdef EMULATE_603
++ ulong ptes_d_ea_603[64]; /* EA4-EA19 of dPTE */
++ mPTE_t ptes_d_603[64]; /* Data on-chip PTEs (603-emulation) */
++ ulong ptes_i_ea_603[64]; /* EA4-EA19 of iPTE */
++ mPTE_t ptes_i_603[64]; /* Instruction on-chip PTEs (603-emulation) */
++#endif
++ /* emulated PTE hash */
++ ulong hash_mbase; /* mac physical hash base */
++ ulong *hash_base; /* kernel pointer to mac hash */
++ ulong hash_mask; /* hash mask (0x000fffff etc) */
++
++ /* context number allocation */
++ int next_mol_context; /* in the range FIRST .. LAST_MOL_CONTEXT(n) */
++ int first_mol_context; /* first context number this session may use */
++ int last_mol_context; /* last context number this session may use */
++
++ ulong pthash_sr; /* segment register corresponding to */
++ ulong pthash_ea_base; /* pthash_ea_base */
++ void *pthash_inuse_bits; /* bitvector (one bit per PTE) */
++ ulong pthash_inuse_bits_ph; /* physical base address */
++
++ /* various tables */
++ struct io_data *io_data; /* translation info */
++ struct fb_data *fb_data; /* ea -> PTE table */
++ struct tracker_data *tracker_data; /* Keeps track of modified pages */
++
++ /* mtable stuff */
++ skiplist_t vsid_sl; /* skiplist (with vsid_ent_t objects) */
++ struct vsid_info *vsid_info; /* mtable data */
++
++ char *lvptr_reservation; /* lvptr associated with PTE to be inserted */
++ int lvptr_reservation_lost; /* set if reservation is lost (page out) */
++
++#ifdef __darwin__
++ ulong os_sdr1; /* SDR1 used by the host OS */
++ ulong mol_sdr1; /* SDR1 used by MOL */
++#endif
++} mmu_vars_t;
++
++
++/* variables which are private to the low level assembly code */
++typedef struct {
++ ulong spr_hooks[NUM_SPRS]; /* hooks */
++
++ ppc_bat_t ibat_save[4]; /* kernel values of the BAT-registers */
++ ppc_bat_t dbat_save[4];
++
++ ulong _msr; /* MSR used in mac-mode (_not_ the emulated msr) */
++
++ /* saved kernel/emulator registers */
++ ulong emulator_nip;
++ ulong emulator_msr;
++ ulong emulator_sprg2;
++ ulong emulator_sprg3;
++ ulong emulator_kcall_nip;
++ ulong emulator_stack;
++ ulong emulator_toc; /* == r2 on certain systems */
++
++ /* DEC and timebase */
++ ulong dec_stamp; /* linux DEC = dec_stamp - tbl */
++ ulong int_stamp; /* next DEC event = int_stamp - tbl */
++
++ /* splitmode */
++ int split_nip_segment; /* segment (top 4) used for inst. fetches */
++
++ /* segment register offset table */
++ ulong msr_sr_table[ 4*8 ]; /* see emulation.S */
++
++ ulong tmp_scratch[4]; /* temporary storage */
++} base_private_t;
++
++
++#ifdef PERFORMANCE_INFO
++#define MAX_ACC_CNTR_DEPTH 8
++typedef struct acc_counter {
++ ulong stamp;
++ ulong subticks;
++} acc_counter_t;
++#endif
++
++typedef struct kernel_vars {
++ struct mac_regs mregs; /* must go first */
++ char page_filler[0x1000 - (sizeof(mac_regs_t)&0xfff) ];
++
++ base_private_t _bp;
++ char aligner[32 - (sizeof(base_private_t)&0x1f) ];
++ mmu_vars_t mmu;
++ char aligner2[16 - (sizeof(mmu_vars_t)&0xf) ];
++
++ ulong emuaccel_mphys; /* mphys address of emuaccel_page */
++ int emuaccel_size; /* size used */
++ ulong emuaccel_page_phys; /* phys address of page */
++ ulong emuaccel_page; /* page used for instruction acceleration */
++
++ int break_flags;
++ ulong kvars_tophys_offs; /* physical - virtual address of kvars */
++ struct kernel_vars *kvars_virt; /* me */
++ int session_index;
++
++ mol_mutex_t ioctl_sem; /* ioctl lock */
++#ifdef __darwin__
++ void *kcall_routine;
++ int kcall_args[3];
++ char *mregs_virtual; /* mregs address used by client */
++
++#endif
++
++#ifdef PERFORMANCE_INFO
++ ulong asm_bump_cntr[NUM_ASM_BUMP_CNTRS];
++ ulong asm_tick_stamp[NUM_ASM_TICK_CNTRS];
++ int num_acntrs;
++ acc_counter_t acntrs[MAX_ACC_CNTR_DEPTH];
++#endif
++
++ void *main_thread; /* pointer to the main thread task_struct */
++
++} kernel_vars_t;
++
++#define NUM_KVARS_PAGES ((sizeof(kernel_vars_t)+0xfff)/0x1000)
++
++typedef struct {
++ kernel_vars_t *kvars[MAX_NUM_SESSIONS];
++ int magic;
++ ulong kvars_ph[MAX_NUM_SESSIONS];
++ mol_mutex_t lock;
++ mol_atomic_t external_thread_cnt;
++} session_table_t;
++
++#define SESSION_LOCK down_mol( &g_sesstab->lock )
++#define SESSION_UNLOCK up_mol( &g_sesstab->lock )
++
++extern session_table_t *g_sesstab;
++
++
++#endif /* __ASSEMBLY__ */
++#endif
+--- /dev/null
++++ b/drivers/macintosh/mol/include/locks.h
+@@ -0,0 +1,39 @@
++/*
++ * Creation Date: <2004/01/25 16:31:13 samuel>
++ * Time-stamp: <2004/01/29 22:33:29 samuel>
++ *
++ * <locks.h>
++ *
++ *
++ *
++ * Copyright (C) 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_LOCKS
++#define _H_LOCKS
++
++/* mutex locks */
++
++typedef struct semaphore mol_mutex_t;
++#define init_MUTEX_mol(mu) init_MUTEX( mu )
++#define free_MUTEX_mol(mu) do {} while(0)
++#define down_mol(x) down(x)
++#define up_mol(x) up(x)
++
++
++/* spinlocks */
++
++typedef spinlock_t mol_spinlock_t;
++#define spin_lock_mol(x) spin_lock(x)
++#define spin_unlock_mol(x) spin_unlock(x)
++//#define spin_lock_irqsave_mol(x, flags) spin_lock_irqsave(x, flags)
++//#define spin_unlock_irqrestore_mol(x,flags) spin_unlock_irqrestore(x, flags)
++#define spin_lock_init_mol(x) spin_lock_init(x)
++
++
++#endif /* _H_LOCKS */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mac_registers.h
+@@ -0,0 +1,168 @@
++/*
++ * Creation Date: <97/06/24 22:25:04 samuel>
++ * Time-stamp: <2004/02/08 20:32:59 samuel>
++ *
++ * <mac_registers.h>
++ *
++ *
++ *
++ * Copyright (C) 1997-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _MAC_REGISTERS_H
++#define _MAC_REGISTERS_H
++
++#ifndef __ASSEMBLY__
++
++#include "mmutypes.h"
++#include "processor.h"
++
++typedef struct {
++ unsigned long h,l;
++} fpu_register;
++
++#define NUM_DEBUG_REGS 10
++
++typedef struct {
++ unsigned long words[4];
++} altivec_reg_t;
++
++#define NR_HOST_IRQS 64
++
++typedef struct irq_bitfield {
++ unsigned long irqs[(NR_HOST_IRQS - 1) / sizeof(unsigned long) + 1];
++} irq_bitfield_t;
++
++typedef struct mac_regs { /* this struct is page aligned */
++ /* the sprs should be page aligned (occupies one page) */
++ unsigned long spr[NUM_SPRS]; /* special purpose registers */
++
++ unsigned long segr[16]; /* segment registers */
++ altivec_reg_t vec[32]; /* AltiVec vector registers */
++ fpu_register fpr[32]; /* FPU registers (64 bits) */
++
++ unsigned long vscr_pad[3], vscr; /* AltiVec status control register */
++ unsigned long pad_fpscr, fpscr; /* fp. status and control register */
++ unsigned long pad_ef, emulator_fpscr; /* emulator fp. status and control reg */
++
++ /* Keep this cache-block aligned (typcically 8 words) */
++ unsigned long cr; /* Condition register */
++ unsigned long link; /* Link register */
++ unsigned long flag_bits; /* Various flags (fbXXXXX) */
++ unsigned long inst_opcode; /* opcode of instruction */
++ unsigned long gpr[32]; /* gprs */
++
++ unsigned long ctr; /* Count register */
++ unsigned long xer; /* Integer exception register */
++ unsigned long nip; /* Instruction pointer */
++ unsigned long msr; /* Machine state register (virtual) */
++
++ /* interrupts and timers */
++ int interrupt; /* set if the kernel should return to the emulator */
++ int in_virtual_mode; /* set while MOL is in virtualization mode */
++ ulong dec_stamp; /* xDEC = dec_stamp - tbl */
++ ulong timer_stamp; /* TIMER = dec_stamp - tbl */
++ int obsolete_irq; /* unused */
++
++ /* RVEC parameters */
++#ifdef __darwin__
++ ulong rvec_vector; /* Used in kernel C-mode */
++#endif
++ ulong rvec_param[3]; /* Used in kernel C-mode */
++
++ /* misc */
++ int fpu_state; /* FPU_STATE_xxx (see below) */
++ int processor; /* processor to emulate, 1=601, 4=604 */
++ int altivec_used; /* useful for altivec detection */
++ int no_altivec; /* Don't use altivec (e.g. no kernel support) */
++
++ int use_bat_hack; /* Newworld BAT optimization (HACK) */
++
++#ifdef EMULATE_603
++ unsigned long gprsave_603[4]; /* GPR0-3 (for 603 DMISS/IMISS) */
++#endif
++ /* moldeb support */
++ unsigned long mdbg_ea_break; /* used together with BREAK_EA_PAGE */
++
++ /* DEBUG */
++ unsigned long debug[NUM_DEBUG_REGS];
++ unsigned long debug_scr1; /* dbg scratch register */
++ unsigned long debug_scr2; /* dbg scratch register */
++ unsigned long debug_trace; /* dbg trace register */
++ unsigned long dbg_trace_space[256];
++ unsigned long dbg_last_rvec; /* useful for tracing segfaults etc. */
++ unsigned long dbg_last_osi;
++
++ unsigned long kernel_dbg_stop; /* stop emulation flag */
++
++ /* host irq mapping data */
++ irq_bitfield_t mapped_irqs; /* keeps track of used host irqs */
++ irq_bitfield_t active_irqs; /* irqs that are up are marked here */
++ int hostirq_update; /* whether userspace should update the pic */
++ /* should be mol_atomic_t but causes trouble... */
++ int hostirq_active_cnt; /* number of active host irq lines */
++
++} mac_regs_t;
++
++#define NUM_MREGS_PAGES ((sizeof(mac_regs_t)+0xfff)/0x1000)
++
++#define BIT(n) (1U<<(31-(n))) /* bit 0 is MSB */
++
++#ifndef __KERNEL__
++extern mac_regs_t *mregs;
++#endif
++#endif /* __ASSEMBLY__ */
++
++/* mregs->fpu_state (only valid when FBIT_FPUInUse is false) */
++#define FPU_STATE_HALF_SAVED 0 /* fpscr & fr0-fr13 saved */
++#define FPU_STATE_DIRTY 1 /* fpscr & fr13 saved */
++#define FPU_STATE_SAVED 3 /* everything is saved to mregs */
++
++/* flag_bits (loaded into cr4-7). TOUCH THESE *ONLY* FROM THE MAIN THREAD! */
++#ifdef __KERNEL__
++/* these must be in cr7 (set through a mtcrf) */
++#define MMU_CR_FIELD 0x01
++#define FBIT_InSplitmode 31 /* (S) */
++#define FBIT_PrepareSplitmode 30 /* (S) */
++#define FBIT_LoadSegreg 29 /* (S) */
++
++/* must be in cr6; (set through a mtcrf) */
++#define TRACE_CR_FIELD 0x02
++#define FBIT_DbgTrace 27 /* (S) equals BREAK_SINGLE_STEP */
++#define FBIT_Trace 26 /* (S) */
++
++#define FBIT_MolDecLoaded 23 /* (S) */
++#define FBIT_DecSeenZero 22 /* (S) */
++#define FBIT_DecINT 21 /* (S) */
++#define FBIT_FPUInUse 20 /* (S) Set when fpu is mac-owned (only valid in the kernel) */
++
++#endif
++
++#define FBIT_MsrModified 19 /* (U) */
++#define FBIT_RecalcDecInt 18 /* (U) */
++#define FBIT_IRQPending 17 /* (U) IRQ is pending */
++#ifdef EMULATE_603
++#define FBIT_603_AltGPR 16 /* (U) Alternate GPR0-3 in use */
++#endif
++
++
++#ifdef __KERNEL__
++#define fb_DbgTrace BIT( FBIT_DbgTrace )
++#define fb_Trace BIT( FBIT_Trace )
++#define fb_PrepareSplitmode BIT( FBIT_PrepareSplitmode )
++#define fb_InSplitmode BIT( FBIT_InSplitmode )
++#define fb_LoadSegreg BIT( FBIT_LoadSegreg )
++#endif
++#define fb_MsrModified BIT( FBIT_MsrModified )
++#define fb_RecalcDecInt BIT( FBIT_RecalcDecInt )
++#define fb_IRQPending BIT( FBIT_IRQPending )
++#ifdef EMULATE_603
++#define fb_603_AltGPR BIT( FBIT_603_AltGPR )
++#endif
++
++#endif /* _MAC_REGISTERS_H */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/map.h
+@@ -0,0 +1,43 @@
++/*
++ * Creation Date: <2004/03/13 13:25:42 samuel>
++ * Time-stamp: <2004/03/13 14:07:11 samuel>
++ *
++ * <map.h>
++ *
++ *
++ *
++ * Copyright (C) 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ */
++
++#ifndef _H_MAP
++#define _H_MAP
++
++/* map a userspace address into the kernel address space */
++extern void *map_phys_range( ulong paddr, ulong size, char **ret_addr );
++extern void unmap_phys_range( void *handle );
++
++/* map a userspace address into the kernel address space */
++extern void *map_virt_range( ulong va, ulong size, char **ret_addr );
++extern void unmap_virt_range( void *handle );
++
++/* map the virtualized PTE hash into the kernel address space */
++extern ulong *map_emulated_hash( kernel_vars_t *kv, ulong mbase, ulong size );
++extern void unmap_emulated_hash( kernel_vars_t *kv );
++
++#ifdef __linux__
++static inline ulong* map_hw_hash( ulong physbase, int size ) {
++ return phys_to_virt( physbase );
++}
++static inline void unmap_hw_hash( ulong *base ) {}
++#endif
++
++#ifdef __darwin__
++static inline void *map_hw_hash( ulong physbase, int size ) { return NULL; }
++static inline void unmap_hw_hash( ulong *base ) {}
++#endif
++
++#endif /* _H_MAP */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/misc.h
+@@ -0,0 +1,105 @@
++/*
++ * Creation Date: <97/06/16 18:02:12 samuel>
++ * Time-stamp: <2004/03/13 14:03:30 samuel>
++ *
++ * <misc.h>
++ *
++ *
++ *
++ * Copyright (C) 1997-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_MOD
++#define _H_MOD
++
++#ifdef __linux__
++
++#include <linux/version.h>
++#include <asm/uaccess.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++#define compat_verify_area(a,b,c) ( ! access_ok(a,b,c) )
++#else
++#define compat_verify_area(a,b,c) verify_area(a,b,c)
++#endif
++
++#endif /* __linux__ */
++
++extern int g_num_sessions; /* number of active sessions */
++
++struct kernel_vars;
++
++/* init.c */
++extern int common_init( void );
++extern void common_cleanup( void );
++extern int initialize_session( unsigned int sess_index );
++extern void destroy_session( unsigned int sess_index );
++extern uint get_session_magic( uint random_magic );
++
++/* arch specific functions */
++extern int arch_common_init( void );
++extern void arch_common_cleanup( void );
++extern struct kernel_vars *alloc_kvar_pages( void );
++extern void free_kvar_pages( struct kernel_vars *kv );
++extern void prevent_mod_unload( void );
++
++/* misc.c */
++struct dbg_op_params;
++struct perf_ctr;
++extern int do_debugger_op( kernel_vars_t *kv, struct dbg_op_params *pb );
++extern int handle_ioctl( kernel_vars_t *kv, int what, int arg1, int arg2, int arg3 );
++
++/* hostirq.c */
++extern int grab_host_irq(kernel_vars_t *kv, int irq);
++extern int release_host_irq(kernel_vars_t *kv, int irq);
++extern void init_host_irqs(kernel_vars_t *kv);
++extern void cleanup_host_irqs(kernel_vars_t *kv);
++
++/* actions.c */
++extern int perform_actions( void );
++extern void cleanup_actions( void );
++
++/* bit table manipulation */
++static inline void
++set_bit_mol( int nr, char *addr )
++{
++ ulong mask = 1 << (nr & 0x1f);
++ ulong *p = ((ulong*)addr) + (nr >> 5);
++ *p |= mask;
++}
++
++static inline void
++clear_bit_mol( int nr, char *addr )
++{
++ ulong mask = 1 << (nr & 0x1f);
++ ulong *p = ((ulong*)addr) + (nr >> 5);
++ *p &= ~mask;
++}
++
++static inline int
++check_bit_mol( int nr, char *addr )
++{
++ ulong mask = 1 << (nr & 0x1f);
++ ulong *p = ((ulong*)addr) + (nr >> 5);
++ return (*p & mask) != 0;
++}
++
++/* typesafe min/max (stolen from kernel.h) */
++#define min_mol(x,y) ({ \
++ const typeof(x) _x = (x); \
++ const typeof(y) _y = (y); \
++ (void) (&_x == &_y); \
++ _x < _y ? _x : _y; })
++
++#define max_mol(x,y) ({ \
++ const typeof(x) _x = (x); \
++ const typeof(y) _y = (y); \
++ (void) (&_x == &_y); \
++ _x > _y ? _x : _y; })
++
++#endif
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mmu.h
+@@ -0,0 +1,102 @@
++/*
++ * Creation Date: <1998-11-11 13:55:49 samuel>
++ * Time-stamp: <2004/02/28 19:20:23 samuel>
++ *
++ * <mmu.h>
++ *
++ * MMU related things
++ *
++ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_MMU
++#define _H_MMU
++
++#ifndef __ASSEMBLY__
++#include "kernel_vars.h"
++#include "mac_registers.h"
++#include "mmu_mappings.h"
++#endif
++
++#ifdef CONFIG_MOL_HOSTED
++#define IO_PAGE_MAGIC_1 0x10BADA57
++#define IO_PAGE_MAGIC_2 0x136AB779
++#else
++#define IO_PAGE_MAGIC_1 0x10BACA57
++#define IO_PAGE_MAGIC_2 0x135AB779
++#endif
++
++#ifndef __ASSEMBLY__
++typedef struct io_page { /* Must fit within a single 4K page */
++ ulong magic; /* identifier 1 */
++ ulong magic2; /* identifier 2 */
++
++ ulong me_phys; /* physical address of this block */
++ ulong mphys; /* mac-physical address of block */
++ struct io_page *next; /* next io_page */
++
++ void *usr_data[512]; /* usr data (grain=double word) */
++} io_page_t;
++
++/* from mmu.c */
++extern int init_mmu( kernel_vars_t *kv );
++extern void cleanup_mmu( kernel_vars_t *kv );
++extern void do_flush( ulong vsid, ulong va, ulong *dummy, int num );
++extern void mmu_altered( kernel_vars_t *kv );
++extern void clear_vsid_refs( kernel_vars_t *kv );
++
++/* arch/mmu.c */
++extern int arch_mmu_init( kernel_vars_t *kv );
++
++/* from mmu_io.c */
++struct pte_lvrange;
++extern int init_mmu_io( kernel_vars_t *kv );
++extern void cleanup_mmu_io( kernel_vars_t *kv );
++extern int add_io_trans( kernel_vars_t *kv, ulong mbase, int size, void *usr_data );
++extern int remove_io_trans( kernel_vars_t *kv, ulong mbase, int size );
++extern int mphys_to_pte( kernel_vars_t *kv, ulong mphys, ulong *pte1, int is_write, struct pte_lvrange **lvrange );
++extern void mmu_add_map( kernel_vars_t *kv, struct mmu_mapping *m );
++extern void mmu_remove_map( kernel_vars_t *kv, struct mmu_mapping *m );
++
++/* from context.c */
++extern int init_contexts( kernel_vars_t *kv );
++extern void cleanup_contexts( kernel_vars_t *kv );
++extern int alloc_context( kernel_vars_t *kv );
++extern void handle_context_wrap( kernel_vars_t *kv, int nvsid_needed );
++
++/* from mmu_fb.c */
++extern int init_mmu_fb( kernel_vars_t *kv );
++extern void cleanup_mmu_fb( kernel_vars_t *kv );
++extern void video_pte_inserted( kernel_vars_t *kv, ulong lvptr, ulong *slot,
++ ulong pte0, ulong pte1, ulong ea );
++extern void setup_fb_acceleration( kernel_vars_t *kv, char *lvbase, int bytes_per_row, int height );
++extern int get_dirty_fb_lines( kernel_vars_t *kv, short *retbuf, int num_bytes );
++
++/* from mmu_tracker.c */
++extern int init_mmu_tracker( kernel_vars_t *kv );
++extern void cleanup_mmu_tracker( kernel_vars_t *kv );
++extern int track_lvrange( kernel_vars_t *kv );
++extern size_t get_track_buffer( kernel_vars_t *kv, char *retbuf );
++extern void set_track_buffer( kernel_vars_t *kv, char *buf );
++extern void lvpage_dirty( kernel_vars_t *kv, ulong lvpage );
++
++/* These functions should be used by the debugger only */
++struct dbg_page_info;
++extern int dbg_get_PTE( kernel_vars_t *kv, int context, ulong va, mPTE_t *ret );
++extern int dbg_get_linux_page( ulong va, struct dbg_page_info *r );
++extern int dbg_translate_ea( kernel_vars_t *kv, int context, ulong va, int *ret_mphys, int data_access );
++
++/* arch functions */
++extern ulong get_phys_page( kernel_vars_t *kv, ulong lvptr, int request_rw );
++
++/* VSID stuff */
++#define VSID_MASK 0xffffff
++
++
++#endif /* __ASSEMBLY__ */
++#endif /* _H_MMU */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mmu_contexts.h
+@@ -0,0 +1,55 @@
++/*
++ * Creation Date: <97/07/17 14:26:14 samuel>
++ * Time-stamp: <2003/06/06 19:17:26 samuel>
++ *
++ * <mmu_contexts.h>
++ *
++ *
++ *
++ * Copyright (C) 1997, 2001, 2002, 2003 Samuel Rydh
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_MMU_CONTEXTS
++#define _H_MMU_CONTEXTS
++
++/**********************************************************
++ * EVERYTHING IN THIS FILE IS UESED FOR DEBUGGING ONLY
++ *********************************************************/
++
++/* MMU context identifiers */
++#define kContextUnmapped 1
++#define kContextMapped_S 2
++#define kContextMapped_U 3
++
++#define kContextEmulator 20 /* for debugging purposes ONLY !!!! */
++#define kContextKernel 21 /* for debugging purposes ONLY !!!! */
++
++
++/* Flags returned by _get_physical_page(). The first flags should
++ * equal the _PAGE_XXX of the old MM implementation (<2.4.6).
++ */
++#define M_PAGE_PRESENT 0x001 /* software: pte contains a translation */
++#define M_PAGE_USER 0x002 /* usermode access allowed */
++#define M_PAGE_RW 0x004 /* usermode access allowed */
++#define M_PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define M_PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
++#define M_PAGE_NO_CACHE 0x020 /* I: cache inhibit */
++#define M_PAGE_WRITETHRU 0x040 /* W: cache write-through */
++#define M_PAGE_DIRTY 0x080 /* C: page changed */
++#define M_PAGE_ACCESSED 0x100 /* R: page referenced */
++/* new linux-MM implementation */
++#define M_PAGE_HASHPTE 0x1000 /* hash_page has made an HPTE for this pte */
++#define M_PAGE_EXEC 0x2000 /* software: i-cache coherency required */
++
++#ifdef __KERNEL__
++#define DBG_TRANSL_PAGE_FLAG( val, flagname ) \
++ (((val) & flagname )? M##flagname : 0)
++#endif
++
++
++#endif /* _H_MMU_CONTEXTS */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mmu_mappings.h
+@@ -0,0 +1,48 @@
++/*
++ * Creation Date: <1998-10-31 03:11:06 samuel>
++ * Time-stamp: <2004/03/13 16:44:58 samuel>
++ *
++ * <mmu_mappings.h>
++ *
++ * Mappings mac physical <-> linux virtual
++ *
++ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_MMU_MAPPINGS
++#define _H_MMU_MAPPINGS
++
++typedef struct mmu_mapping {
++ ulong mbase; /* mac physical base */
++ char *lvbase; /* linux virtual base */
++ size_t size; /* size (in bytes) */
++ int flags; /* MAPPING_xxx */
++
++ int id; /* set to zero, returned by the kerrnel module */
++} mmu_mapping_t;
++
++/* mmu_mapping flags */
++
++#define MAPPING_RO 1 /* read only memory (ROM etc) */
++#define MAPPING_PHYSICAL 2 /* physical (ROM etc) */
++#define MAPPING_SCRATCH 4 /* (block transl) scratch area */
++#define MAPPING_FORCE_CACHE 8 /* force write-through caching */
++#define MAPPING_FB_ACCEL 16 /* offscreen framebuffer, track changes */
++#define MAPPING_FB 32 /* framebuffer (ea assumed to be constant) */
++#define MAPPING_DBAT 64 /* allow use of a DBAT register */
++#define MAPPING_MACOS_CONTROLS_CACHE 128 /* do not force WIM bits to 001 */
++#define MAPPING_PUT_FIRST 256 /* take precedence over other translations */
++#define MAPPING_MREGS 512 /* map mregs into emulated process */
++#define MAPPING_FORCE_WRITABLE 4096 /* guess what... */
++
++#ifdef __KERNEL__
++#define MAPPING_IO 1024 /* I/O translation */
++#define MAPPING_VALID 2048 /* valid bit */
++#endif
++
++#endif /* _H_MMU_MAPPINGS */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mmutypes.h
+@@ -0,0 +1,76 @@
++/*
++ * Creation Date: <2002/01/13 13:53:14 samuel>
++ * Time-stamp: <2002/01/27 19:56:11 samuel>
++ *
++ * <mmutypes.h>
++ *
++ * MMU definitions
++ *
++ * Most of these declarations originate from the Linux Kernel
++ *
++ * Copyright (C) 2002 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_MMUTYPES
++#define _H_MMUTYPES
++
++/* Hardware Page Table Entry */
++typedef struct mPTE {
++ unsigned long v:1; /* Entry is valid */
++ unsigned long vsid:24; /* Virtual segment identifier */
++ unsigned long h:1; /* Hash algorithm indicator */
++ unsigned long api:6; /* Abbreviated page index */
++
++ unsigned long rpn:20; /* Real (physical) page number */
++ unsigned long :3; /* Unused */
++ unsigned long r:1; /* Referenced */
++ unsigned long c:1; /* Changed */
++ unsigned long w:1; /* Write-thru cache mode */
++ unsigned long i:1; /* Cache inhibited */
++ unsigned long m:1; /* Memory coherence */
++ unsigned long g:1; /* Guarded */
++ unsigned long :1; /* Unused */
++ unsigned long pp:2; /* Page protection */
++} mPTE_t;
++
++
++typedef struct _mBATU { /* Upper part of BAT (all except 601) */
++ unsigned long bepi:15; /* Effective page index (virtual address) */
++ unsigned long :4; /* Unused */
++ unsigned long bl:11; /* Block size mask */
++ unsigned long vs:1; /* Supervisor valid */
++ unsigned long vp:1; /* User valid */
++} mBATU;
++
++typedef struct _mBATL { /* Lower part of BAT (all except 601) */
++ unsigned long brpn:15; /* Real page index (physical address) */
++ unsigned long :10; /* Unused */
++ unsigned long w:1; /* Write-thru cache */
++ unsigned long i:1; /* Cache inhibit */
++ unsigned long m:1; /* Memory coherence */
++ unsigned long g:1; /* Guarded (MBZ in IBAT) */
++ unsigned long :1; /* Unused */
++ unsigned long pp:2; /* Page access protections */
++} mBATL;
++
++typedef struct _mBAT {
++ mBATU batu; /* Upper register */
++ mBATL batl; /* Lower register */
++} mBAT;
++
++typedef struct _mSEGREG {
++ unsigned long t:1; /* Normal or I/O type */
++ unsigned long ks:1; /* Supervisor 'key' (normally 0) */
++ unsigned long kp:1; /* User 'key' (normally 1) */
++ unsigned long n:1; /* No-execute */
++ unsigned long :4; /* Unused */
++ unsigned long vsid:24; /* Virtual Segment Identifier */
++} mSEGREG;
++
++
++#endif /* _H_MMUTYPES */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mol-ioctl.h
+@@ -0,0 +1,121 @@
++/*
++ * Creation Date: <2003/08/26 10:53:07 samuel>
++ * Time-stamp: <2004/02/08 20:17:58 samuel>
++ *
++ * <mol-ioctl.h>
++ *
++ *
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_MOL_IOCTL
++#define _H_MOL_IOCTL
++
++#ifndef __ASSEMBLY__
++#include "mmutypes.h"
++
++typedef struct {
++ int version; /* MOL version */
++ int smp_kernel; /* compiled with CONFIG_SMP */
++ int pvr; /* cpu version/revision (PVR) */
++ int rombase;
++ int romsize;
++ unsigned int tb_freq;
++} mol_kmod_info_t;
++
++typedef struct perf_ctr {
++ unsigned int ctr;
++ char name[40];
++} perf_ctr_t;
++
++typedef struct dbg_page_info {
++ int phys;
++ int mflags; /* M_PAGE_XXX */
++} dbg_page_info_t;
++
++typedef struct dbg_op_params {
++ /* input */
++ int operation; /* DBG_OP_xxx */
++ int ea;
++ int context;
++ int param;
++
++ /* output */
++ union {
++ int phys;
++ dbg_page_info_t page;
++ mPTE_t pte;
++ } ret;
++} dbg_op_params_t;
++
++typedef struct mol_ioctl_pb {
++ int arg1, arg2, arg3;
++#ifdef __darwin__
++ int ret;
++#endif
++} mol_ioctl_pb_t;
++
++#endif /* __ASSEMBLY__ */
++
++/* ioctls that do not use the mol_ioctl_pb arg */
++#define MOL_IOCTL_SMP_SEND_IPI _IO('M', 1) /* void ( void ) */
++#ifdef __darwin__
++#define MOL_IOCTL_CALL_KERNEL _IO('M', 2) /* void ( void ) */
++#endif
++
++/* debugger ioctls */
++#define MOL_IOCTL_DEBUGGER_OP _IOWR('M', 10, mol_ioctl_pb_t) /* int ( dbg_op_params *p ) */
++#define DBG_OP_EMULATE_TLBIE 1 /* void ( ulong pageindex ) */
++#define DBG_OP_EMULATE_TLBIA 2 /* void ( void ) */
++#define DBG_OP_GET_PTE 3 /* lvptr, context, int ( ulong vsid, ulong va, PTE *retpte ) */
++#define DBG_OP_GET_PHYS_PAGE 4 /* int ( ulong lvptr, ulong *retptr ) */
++#define DBG_OP_BREAKPOINT_FLAGS 5 /* void ( ulong flags ) */
++#define DBG_OP_TRANSLATE_EA 6 /* ea, context, is_data -- mphys */
++#define MOL_IOCTL_CLEAR_PERF_INFO _IOWR('M', 11, mol_ioctl_pb_t) /* void ( void ) */
++#define MOL_IOCTL_GET_PERF_INFO _IOWR('M', 12, mol_ioctl_pb_t) /* int ( int index, perf_ctr_t *ctr ) */
++
++/* config selectors */
++#define MOL_IOCTL_CREATE_SESSION _IOWR('M', 30, mol_ioctl_pb_t) /* int ( int session_index ) */
++#define MOL_IOCTL_GET_INFO _IOWR('M', 31, mol_ioctl_pb_t) /* int ( mol_kmod_info_t *retinfo, int size ) */
++#define MOL_IOCTL_SET_RAM _IOWR('M', 33, mol_ioctl_pb_t) /* void ( ulong ram_start, ulong ram_end ) */
++#define MOL_IOCTL_COPY_LAST_ROMPAGE _IOWR('M', 34, mol_ioctl_pb_t) /* void ( char *destpage ) */
++#define MOL_IOCTL_SPR_CHANGED _IOWR('M', 35, mol_ioctl_pb_t) /* void ( void ) */
++#define MOL_IOCTL_IDLE_RECLAIM_MEMORY _IOWR('M', 36, mol_ioctl_pb_t) /* void ( void ) */
++#define MOL_IOCTL_MMU_MAP _IOWR('M', 37, mol_ioctl_pb_t) /* void ( struct mmu_mapping *m, int add_map ) */
++#define MOL_IOCTL_ADD_IORANGE _IOWR('M', 39, mol_ioctl_pb_t) /* void ( ulong mbase, size_t size, io_ops_t *) */
++#define MOL_IOCTL_REMOVE_IORANGE _IOWR('M', 40, mol_ioctl_pb_t) /* void ( ulong mbase, size_t size ) */
++#define MOL_IOCTL_SETUP_FBACCEL _IOWR('M', 41, mol_ioctl_pb_t) /* void * ( char *lvbase, int bytes_per_row, int height ) */
++#define MOL_IOCTL_GET_DIRTY_FBLINES _IOWR('M', 42, mol_ioctl_pb_t) /* int ( short *rettable, int table_size_in_bytes ) */
++#define MOL_IOCTL_TRACK_DIRTY_RAM _IOWR('M', 43, mol_ioctl_pb_t) /* int ( char *lvbase, size_t size ) */
++#define MOL_IOCTL_GET_DIRTY_RAM _IOWR('M', 44, mol_ioctl_pb_t) /* size_t ( char *retbuf ) */
++#define MOL_IOCTL_SET_DIRTY_RAM _IOWR('M', 45, mol_ioctl_pb_t) /* void ( char *dirtybuf ) */
++#define MOL_IOCTL_GET_MREGS_PHYS _IOWR('M', 46, mol_ioctl_pb_t) /* ulong ( void ) */
++#define MOL_IOCTL_ALLOC_EMUACCEL_SLOT _IOWR('M', 47, mol_ioctl_pb_t) /* int ( int inst_flags, int param, int inst_addr ) */
++#define MOL_IOCTL_MAPIN_EMUACCEL_PAGE _IOWR('M', 48, mol_ioctl_pb_t) /* int ( int mphys ) */
++#define MOL_IOCTL_TUNE_SPR _IOWR('M', 49, mol_ioctl_pb_t) /* int ( int spr, int action ) */
++#define MOL_IOCTL_GET_SESSION_MAGIC _IOWR('M', 50, mol_ioctl_pb_t) /* uint ( uint new_random_magic ) */
++
++#define MOL_IOCTL_DBG_COPY_KVARS _IOWR('M', 51, mol_ioctl_pb_t) /* int ( int session, kernel_vars_t *dest ) */
++
++#ifdef __darwin__
++#define MOL_IOCTL_GET_MREGS_VIRT _IOWR('M', 52, mol_ioctl_pb_t) /* int ( mac_regs_t **ret ) */
++#endif
++
++#define MOL_IOCTL_GRAB_IRQ _IOWR('M', 53, mol_ioctl_pb_t) /* int ( int irq ) */
++#define MOL_IOCTL_RELEASE_IRQ _IOWR('M', 54, mol_ioctl_pb_t) /* int ( int irq ) */
++#define MOL_IOCTL_GET_IRQS _IOWR('M', 55, mol_ioctl_pb_t) /* int ( irq_bitfield_t * ) */
++
++
++/* MOL error codes */
++#define EMOLGENERAL 100
++#define EMOLINUSE 101
++#define EMOLINVAL 102
++#define EMOLSECURITY 103
++
++#endif /* _H_MOL_IOCTL */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mol_config.h
+@@ -0,0 +1,76 @@
++/*
++ * Creation Date: <1999/05/30 15:30:25 samuel>
++ * Time-stamp: <2004/06/05 19:47:33 samuel>
++ *
++ * <mol_config.h>
++ *
++ * Header to be included first...
++ *
++ * Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_MOL_CONFIG
++#define _H_MOL_CONFIG
++
++/* Some debugging flags */
++#define COLLECT_RVEC_STATISTICS
++//#define EMULATE_603
++//#define ENABLE_ASSERT
++
++#define _GNU_SOURCE
++#define _REENTRANT
++#define _LARGEFILE64_SOURCE
++
++#if defined(__powerpc__) && !defined(__ppc__)
++#define __ppc__
++#endif
++#if defined(__ppc__) && !defined(__powerpc__)
++#define __powerpc__
++#endif
++
++#if !defined(__linux__) && !defined(__ASSEMBLY__)
++typedef unsigned long ulong;
++#endif
++
++#if !defined(__ASSEMBLY__) && !defined(__KERNEL__)
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <unistd.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#include <errno.h>
++#include <ctype.h>
++
++#include "config.h"
++#include "autoconf.h"
++#include "unconfig.h"
++
++#ifdef CONFIG_OLDWORLD
++#define OLDWORLD_SUPPORT
++#endif
++
++#include "platform.h"
++
++/* from emulaiton/main.c */
++extern int in_security_mode;
++
++/* common MOL header fiels */
++
++#include "debugger.h" /* for printm */
++#include "extralib.h"
++
++#endif /* __ASSEMBLY__ && __KERNEL__ */
++
++#ifdef __ASSEMBLY__
++changequote([[[[[,]]]]])
++[[[[[ /* shield includes from m4-expansion */
++#endif
++
++#endif /* _H_MOL_CONFIG */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/molasm.h
+@@ -0,0 +1,138 @@
++/* -*- asm -*-
++ *
++ * Creation Date: <2001/01/28 20:33:22 samuel>
++ * Time-stamp: <2004/01/29 19:29:10 samuel>
++ *
++ * <molasm.h>
++ *
++ * Utility assembly macros
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_MOLASM
++#define _H_MOLASM
++
++#define GLOBAL_SYMBOL( sym_name ) \
++GLOBL(sym_name)
++
++
++/************************************************************************/
++/* SPRG usage */
++/************************************************************************/
++
++/* Darwin and Linux uses the sprg's differently. Linux uses sprg0/1 in
++ * the exception vectors while Darwin uses sprg2/3.
++ */
++#ifdef __linux__
++define([mfsprg_a0], [mfsprg0])
++define([mfsprg_a1], [mfsprg1])
++define([mfsprg_a2], [mfsprg2])
++define([mfsprg_a3], [mfsprg3])
++define([mtsprg_a0], [mtsprg0])
++define([mtsprg_a1], [mtsprg1])
++define([mtsprg_a2], [mtsprg2])
++define([mtsprg_a3], [mtsprg3])
++#else
++define([mfsprg_a0], [mfsprg2])
++define([mfsprg_a1], [mfsprg3])
++define([mfsprg_a2], [mfsprg0])
++define([mfsprg_a3], [mfsprg1])
++define([mtsprg_a0], [mtsprg2])
++define([mtsprg_a1], [mtsprg3])
++define([mtsprg_a2], [mtsprg0])
++define([mtsprg_a3], [mtsprg1])
++#endif
++
++
++/************************************************************************/
++/* Utility */
++/************************************************************************/
++
++MACRO(LOAD_VARIABLE, [reg, offs], [
++ lis _reg,HA(k_mol_stack + _offs)
++ lwz _reg,LO(k_mol_stack + _offs)(_reg)
++])
++
++MACRO(SET_SESSION_TABLE, [reg], [
++ lis _reg,HA(EXTERN(Symbol_SESSION_TABLE))
++ addi _reg,_reg,LO(EXTERN(Symbol_SESSION_TABLE))
++])
++
++
++/************************************************************************/
++/* GPR save / restore */
++/************************************************************************/
++
++MACRO(xGPR_SAVE, [reg], [
++ stw rPREFIX[]_reg,(xGPR0 + _reg*4)(r1)
++])
++
++MACRO(xGPR_LOAD, [reg], [
++ lwz rPREFIX[]_reg,(xGPR0 + _reg*4)(r1)
++])
++
++
++/************************************************************************/
++/* FPU misc */
++/************************************************************************/
++
++MACRO(ENABLE_MSR_FP, [scr], [
++ mfmsr _scr
++ ori _scr,_scr,MSR_FP
++ mtmsr _scr
++ isync
++])
++
++/************************************************************************/
++/* Segment registers */
++/************************************************************************/
++
++MACRO(LOAD_SEGMENT_REGS, [base, scr, scr2], [
++ mFORLOOP([i],0,7,[
++ lwz _scr,eval(i * 8)(_base)
++ lwz _scr2,eval((i * 8)+4)(_base)
++ mtsr srPREFIX[]eval(i*2),_scr
++ mtsr srPREFIX[]eval(i*2+1),_scr2
++ ])
++])
++
++MACRO(SAVE_SEGMENT_REGS, [base, scr, scr2], [
++ mFORLOOP([i],0,7,[
++ mfsr _scr,srPREFIX[]eval(i*2)
++ mfsr _scr2,srPREFIX[]eval(i*2+1)
++ stw _scr,eval(i * 8)(_base)
++ stw _scr2,eval((i * 8) + 4)(_base)
++ ])
++])
++
++/************************************************************************/
++/* BAT register */
++/************************************************************************/
++
++MACRO(SAVE_DBATS, [varoffs, scr1], [
++ mfpvr _scr1
++ srwi _scr1,_scr1,16
++ cmpwi r3,1
++ beq 9f
++ mFORLOOP([nn],0,7,[
++ mfspr _scr1, S_DBAT0U + nn
++ stw _scr1,(_varoffs + (4 * nn))(r1)
++ ])
++9:
++])
++
++MACRO(SAVE_IBATS, [varoffs, scr1], [
++ mFORLOOP([nn],0,7,[
++ mfspr _scr1, S_IBAT0U + nn
++ stw _scr1,(_varoffs + (4 * nn))(r1)
++ ])
++])
++
++
++#endif /* _H_MOLASM */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/molversion.h
+@@ -0,0 +1,6 @@
++#define MOL_BUILD_DATE "Okt 13 2007 11:49"
++#define MOL_VERSION_STR "0.9.73"
++#define MOL_RELEASE "0.9.73-SVN"
++#define MOL_MAJOR_VERSION 0
++#define MOL_MINOR_VERSION 9
++#define MOL_PATCHLEVEL 73
+--- /dev/null
++++ b/drivers/macintosh/mol/include/mtable.h
+@@ -0,0 +1,71 @@
++/*
++ * Creation Date: <2002/05/26 15:52:50 samuel>
++ * Time-stamp: <2004/02/21 16:18:11 samuel>
++ *
++ * <mtable.h>
++ *
++ * Keeps track of the PTEs
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_MTABLE
++#define _H_MTABLE
++
++#ifndef __ASSEMBLY__
++typedef struct pte_lvrange pte_lvrange_t;
++typedef struct vsid_info vsid_info_t;
++typedef struct vsid_ent vsid_ent_t;
++
++extern int init_mtable( kernel_vars_t *kv );
++extern void cleanup_mtable( kernel_vars_t *kv );
++
++extern pte_lvrange_t *register_lvrange( kernel_vars_t *kv, char *lvbase, int size );
++extern void free_lvrange( kernel_vars_t *kv, pte_lvrange_t *pte_range );
++
++extern vsid_ent_t *vsid_get_user_sv( kernel_vars_t *kv, int mac_vsid, ulong *user_sr, ulong *sv_sr );
++
++extern int mtable_memory_check( kernel_vars_t *kv );
++extern void pte_inserted( kernel_vars_t *kv, ulong ea, char *lvptr,
++ pte_lvrange_t *lvrange, ulong *pte, vsid_ent_t *r,
++ int segreg );
++
++extern void flush_vsid_ea( kernel_vars_t *kv, int vsid, ulong ea );
++extern void flush_ea_range( kernel_vars_t *kv, ulong ea, int size );
++extern void flush_lvptr( kernel_vars_t *kv, ulong lvptr );
++extern void flush_lv_range( kernel_vars_t *kv, ulong lvbase, int size );
++
++extern void clear_all_vsids( kernel_vars_t *kv );
++extern void clear_pte_hash_table( kernel_vars_t *kv );
++
++extern void mtable_reclaim( kernel_vars_t *kv );
++extern void mtable_tune_alloc_limit( kernel_vars_t *kv, int ramsize_mb );
++
++static inline void
++make_lvptr_reservation( kernel_vars_t *kv, char *lvptr ) {
++ kv->mmu.lvptr_reservation = lvptr;
++ kv->mmu.lvptr_reservation_lost = 0;
++}
++
++
++#endif /* __ASSEMBLY__ */
++
++/* offsets to linux_vsid and linux_vsid_sv (used from assembly) */
++#define VSID_MYSELF_VIRT 0
++#define VSID_USER_OFFS 4
++#define VSID_SV_OFFS 8
++#define SIZEOF_VSID_ENT (64*4 + 12)
++
++#define VSID_OFFSETS_OK \
++ ((offsetof(vsid_ent_t, linux_vsid) == VSID_USER_OFFS ) || \
++ (offsetof(vsid_ent_t, linux_vsid_sv) == VSID_SV_OFFS ) || \
++ (sizeof(vsid_ent_t) == SIZEOF_VSID_ENT))
++
++#endif /* _H_MTABLE */
++
++
+--- /dev/null
++++ b/drivers/macintosh/mol/include/osi.h
+@@ -0,0 +1,170 @@
++/*
++ * Creation Date: <1999/03/18 03:19:43 samuel>
++ * Time-stamp: <2003/12/26 16:58:19 samuel>
++ *
++ * <os_interface.h>
++ *
++ * This file includes definitions for drivers
++ * running in the "emulated" OS. (Mainly the 'sc'
++ * mechanism of communicating)
++ *
++ * Copyright (C) 1999, 2000, 2001, 2002, 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_OSI
++#define _H_OSI
++
++/* Magic register values loaded into r3 and r4 before the 'sc' assembly instruction */
++#define OSI_SC_MAGIC_R3 0x113724FA
++#define OSI_SC_MAGIC_R4 0x77810F9B
++
++
++/************************************************************************/
++/* Selectors (passed in r5) */
++/************************************************************************/
++
++#define OSI_CALL_AVAILABLE 0
++#define OSI_DEBUGGER 1 /* enter debugger */
++/* obsolete OSI_LOG_STR 3 */
++#define OSI_CMOUNT_DRV_VOL 4 /* conditionally mount driver volume */
++/* obsolete OSI_SCSI_xxx 5-6 */
++#define OSI_GET_GMT_TIME 7
++#define OSI_MOUSE_CNTRL 8
++#define OSI_GET_LOCALTIME 9 /* return time in secs from 01/01/04 */
++
++#define OSI_ENET_OPEN 10
++#define OSI_ENET_CLOSE 11
++#define OSI_ENET_GET_ADDR 12
++#define OSI_ENET_GET_STATUS 13
++#define OSI_ENET_CONTROL 14
++#define OSI_ENET_ADD_MULTI 16
++#define OSI_ENET_DEL_MULTI 17
++#define OSI_ENET_GET_PACKET 18
++#define OSI_ENET_SEND_PACKET 19
++
++#define OSI_OF_INTERFACE 20
++#define OSI_OF_TRAP 21
++#define OSI_OF_RTAS 22
++
++#define OSI_SCSI_CNTRL 23
++#define OSI_SCSI_SUBMIT 24
++#define OSI_SCSI_ACK 25
++
++#define OSI_GET_MOUSE 26 /* -- r3 status, r4-r8 mouse data */
++#define OSI_ACK_MOUSE_IRQ 27 /* -- int */
++
++#define OSI_SET_VMODE 28 /* modeID, depth -- error */
++#define OSI_GET_VMODE_INFO 29 /* mode, depth -- r3 status, r4-r9 pb */
++#define OSI_GET_MOUSE_DPI 30 /* -- mouse_dpi */
++
++#define OSI_SET_VIDEO_POWER 31
++#define OSI_GET_FB_INFO 32 /* void -- r3 status, r4-r8 video data */
++
++#define OSI_SOUND_WRITE 33
++/* #define OSI_SOUND_FORMAT 34 */
++#define OSI_SOUND_SET_VOLUME 35
++#define OSI_SOUND_CNTL 36
++/* obsolete OSI_SOUND call 37 */
++
++#define OSI_VIDEO_ACK_IRQ 38
++#define OSI_VIDEO_CNTRL 39
++
++#define OSI_SOUND_IRQ_ACK 40
++#define OSI_SOUND_START_STOP 41
++
++#define OSI_REGISTER_IRQ 42 /* reg_property[0] appl_int -- irq_cookie */
++/* obsolete OSI_IRQ 43-46 */
++
++#define OSI_LOG_PUTC 47 /* char -- */
++
++#define OSI_KBD_CNTRL 50
++#define OSI_GET_ADB_KEY 51 /* -- adb_keycode (keycode | keycode_id in r4) */
++
++#define OSI_WRITE_NVRAM_BYTE 52 /* offs, byte -- */
++#define OSI_READ_NVRAM_BYTE 53 /* offs -- byte */
++
++#define OSI_EXIT 54
++
++#define OSI_KEYCODE_TO_ADB 55 /* (keycode | keycode_id) -- adb_keycode */
++#define OSI_MAP_ADB_KEY 56 /* keycode, adbcode -- */
++#define OSI_SAVE_KEYMAPPING 57 /* -- */
++#define OSI_USLEEP 58 /* usecs -- */
++#define OSI_SET_COLOR 59 /* index value -- */
++
++#define OSI_PIC_MASK_IRQ 60 /* irq -- */
++#define OSI_PIC_UNMASK_IRQ 61 /* irq -- */
++#define OSI_PIC_ACK_IRQ 62 /* irq mask_flag -- */
++#define OSI_PIC_GET_ACTIVE_IRQ 63
++
++#define OSI_GET_COLOR 64 /* index -- value */
++
++/* 65-67 old ablk implementation */
++#define OSI_IRQTEST 65
++
++#define OSI_ENET2_OPEN 68
++#define OSI_ENET2_CLOSE 69
++#define OSI_ENET2_CNTRL 70
++#define OSI_ENET2_RING_SETUP 71
++#define OSI_ENET2_KICK 72
++#define OSI_ENET2_GET_HWADDR 73
++#define OSI_ENET2_IRQ_ACK 74
++
++#define OSI_PROM_IFACE 76
++#define kPromClose 0
++#define kPromPeer 1
++#define kPromChild 2
++#define kPromParent 3
++#define kPromPackageToPath 4
++#define kPromGetPropLen 5
++#define kPromGetProp 6
++#define kPromNextProp 7
++#define kPromSetProp 8
++#define kPromChangePHandle 9
++
++#define OSI_PROM_PATH_IFACE 77
++#define kPromCreateNode 16
++#define kPromFindDevice 17
++
++#define OSI_BOOT_HELPER 78
++#define kBootHAscii2Unicode 32
++#define kBootHUnicode2Ascii 33
++#define kBootHGetStrResInd 34 /* key, buf, len -- buf */
++#define kBootHGetRAMSize 35 /* -- ramsize */
++
++#define OSI_ABLK_RING_SETUP 79
++#define OSI_ABLK_CNTRL 80
++#define OSI_ABLK_DISK_INFO 81
++#define OSI_ABLK_KICK 82
++#define OSI_ABLK_IRQ_ACK 83
++#define OSI_ABLK_SYNC_READ 84
++#define OSI_ABLK_SYNC_WRITE 85
++#define OSI_ABLK_BLESS_DISK 86
++
++#define OSI_EMUACCEL 89 /* EMULATE_xxx, nip -- index */
++#define OSI_MAPIN_MREGS 90 /* mphys */
++#define OSI_NVRAM_SIZE 91
++
++#define OSI_MTICKS_TO_USECS 92
++#define OSI_USECS_TO_MTICKS 93
++
++/* obsolete OSI_BLK 94-95 */
++
++#define OSI_PSEUDO_FS 96
++#define kPseudoFSOpen 1
++#define kPseudoFSClose 2
++#define kPseudoFSGetSize 3
++#define kPseudoFSRead 4
++#define kPseudoFSIndex2Name 5
++
++#define OSI_TTY_PUTC 97
++#define OSI_TTY_GETC 98
++#define OSI_TTY_IRQ_ACK 99
++
++#define NUM_OSI_SELECTORS 100 /* remember to increase this... */
++
++#endif /* _H_OSI */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/osi_calls.h
+@@ -0,0 +1,475 @@
++/*
++ * Creation Date: <2002/06/16 01:40:57 samuel>
++ * Time-stamp: <2004/02/23 01:04:31 samuel>
++ *
++ * <osi_calls.h>
++ *
++ * OSI call inlines
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_OSI_CALLS
++#define _H_OSI_CALLS
++
++#include "osi.h"
++
++/* Old gcc versions have a limit on the number of registers used.
++ * Newer gcc versions (gcc 3.3) require that the clobber list does
++ * not overlap declared registers.
++ */
++#if __GNUC__ == 2 || ( __GNUC__ == 3 && __GNUC_MINOR__ < 3 )
++#define SHORT_REGLIST
++#endif
++
++
++/************************************************************************/
++/* OSI call instantiation macros */
++/************************************************************************/
++
++#define dreg(n) __oc_##n __asm__ (#n)
++#define ir(n) "r" (__oc_##n)
++#define rr(n) "=r" (__oc_##n)
++
++#define _oc_head( input_regs... ) \
++{ \
++ int _ret=0; \
++ { \
++ register unsigned long dreg(r3); \
++ register unsigned long dreg(r4); \
++ register unsigned long dreg(r5) \
++ ,##input_regs ;
++
++#define _oc_syscall( number, extra_ret_regs... ) \
++ __oc_r3 = OSI_SC_MAGIC_R3; \
++ __oc_r4 = OSI_SC_MAGIC_R4; \
++ __oc_r5 = number; \
++ __asm__ __volatile__ ( \
++ "sc " : rr(r3) ,## extra_ret_regs
++
++#define _oc_input( regs... ) \
++ : ir(r3), ir(r4), ir(r5) \
++ , ## regs \
++ : "memory" );
++
++/* the tail memory clobber is necessary since we violate the strict
++ * aliasing rules when we return structs through the registers.
++ */
++#define _oc_tail \
++ asm volatile ( "" : : : "memory" ); \
++ _ret = __oc_r3; \
++ } \
++ return _ret; \
++}
++
++
++/************************************************************************/
++/* Alternatives */
++/************************************************************************/
++
++#ifdef SHORT_REGLIST
++#ifdef __linux__
++#define _oc_syscall_r10w6( number, inputregs... ) \
++ __oc_r3 = OSI_SC_MAGIC_R3; \
++ __oc_r4 = OSI_SC_MAGIC_R4; \
++ __oc_r5 = number; \
++ __asm__ __volatile__ ( \
++ "sc \n" \
++ "stw 4,0(10) \n" \
++ "stw 5,4(10) \n" \
++ "stw 6,8(10) \n" \
++ "stw 7,12(10) \n" \
++ "stw 8,16(10) \n" \
++ "stw 9,20(10) \n" \
++ : rr(r3) \
++ : ir(r3), ir(r4), ir(r5), ir(r10) \
++ ,## inputregs \
++ : "memory", \
++ "r4", "r5", "r6", "r7", "r8", "r9" );
++#endif
++#ifdef __darwin__
++#define _oc_syscall_r10w6( number, inputregs... ) \
++ __oc_r3 = OSI_SC_MAGIC_R3; \
++ __oc_r4 = OSI_SC_MAGIC_R4; \
++ __oc_r5 = number; \
++ __asm__ __volatile__ ( \
++ "sc \n" \
++ "stw r4,0(r10) \n" \
++ "stw r5,4(r10) \n" \
++ "stw r6,8(r10) \n" \
++ "stw r7,12(r10) \n" \
++ "stw r8,16(r10) \n" \
++ "stw r9,20(r10) \n" \
++ : rr(r3) \
++ : ir(r3), ir(r4), ir(r5), ir(r10) \
++ ,## inputregs \
++ : "memory", \
++ "r4", "r5", "r6", "r7", "r8", "r9" );
++#endif
++#endif
++
++
++/************************************************************************/
++/* Common helper functions */
++/************************************************************************/
++
++#define _osi_call0( type, name, number ) \
++type name( void ) \
++ _oc_head() \
++ _oc_syscall( number ) \
++ _oc_input() \
++ _oc_tail
++
++#define _osi_call1( type, name, number, type1, arg1 ) \
++type name( type1 arg1 ) \
++ _oc_head( dreg(r6) ) \
++ __oc_r6 = (ulong)arg1; \
++ _oc_syscall( number ) \
++ _oc_input( ir(r6) ) \
++ _oc_tail
++
++#define _osi_call2( type, name, number, t1, a1, t2, a2 ) \
++type name( t1 a1, t2 a2 ) \
++ _oc_head( dreg(r6), dreg(r7) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ _oc_syscall( number ) \
++ _oc_input( ir(r6), ir(r7) ) \
++ _oc_tail
++
++#define _osi_call3( type, name, number, t1, a1, t2, a2, t3, a3 ) \
++type name( t1 a1, t2 a2, t3 a3 ) \
++ _oc_head( dreg(r6), dreg(r7), dreg(r8) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ __oc_r8 = (ulong)a3; \
++ _oc_syscall( number ) \
++ _oc_input( ir(r6), ir(r7), ir(r8) ) \
++ _oc_tail
++
++#define _osi_call4( type, name, number, t1, a1, t2, a2, t3, a3, t4, a4 ) \
++type name( t1 a1, t2 a2, t3 a3, t4 a4 ) \
++ _oc_head( dreg(r6), dreg(r7), dreg(r8), dreg(r9) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ __oc_r8 = (ulong)a3; \
++ __oc_r9 = (ulong)a4; \
++ _oc_syscall( number ) \
++ _oc_input( ir(r6), ir(r7), ir(r8), ir(r9) ) \
++ _oc_tail
++
++#define _osi_call5( type, name, number, t1, a1, t2, a2, t3, a3, t4, a4, t5, a5 ) \
++type name( t1 a1, t2 a2, t3 a3, t4 a4, t5 a5 ) \
++ _oc_head( dreg(r6), dreg(r7), dreg(r8), dreg(r9), dreg(r10) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ __oc_r8 = (ulong)a3; \
++ __oc_r9 = (ulong)a4; \
++ __oc_r10 = (ulong)a5; \
++ _oc_syscall( number ) \
++ _oc_input( ir(r6), ir(r7), ir(r8), ir(r9), ir(r10) ) \
++ _oc_tail
++
++#define _osi_call6( type, name, number, t1, a1, t2, a2, t3, a3, t4, a4, t5, a5, t6, a6 ) \
++type name( t1 a1, t2 a2, t3 a3, t4 a4, t5 a5, t6 a6 ) \
++ _oc_head( dreg(r6), dreg(r7), dreg(r8), dreg(r9), dreg(r10), dreg(r11) )\
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ __oc_r8 = (ulong)a3; \
++ __oc_r9 = (ulong)a4; \
++ __oc_r10 = (ulong)a5; \
++ __oc_r11 = (ulong)a6; \
++ _oc_syscall( number ) \
++ _oc_input( ir(r6), ir(r7), ir(r8), ir(r9), ir(r10), ir(r11) ) \
++ _oc_tail
++
++
++/************************************************************************/
++/* Special */
++/************************************************************************/
++
++/* r4 returned in retarg1 pointer */
++#define _osi_call0_w1( type, name, number, type1, retarg1 ) \
++type name( type1 retarg1 ) \
++ _oc_head() \
++ _oc_syscall( number, rr(r4) ) \
++ _oc_input() \
++ *retarg1 = __oc_r4; \
++ _oc_tail
++
++#define _osi_call0_w2( type, name, number, type1, retarg1 ) \
++type name( type1 retarg1 ) \
++ _oc_head() \
++ _oc_syscall( number, rr(r4), rr(r5) ) \
++ _oc_input() \
++ ((ulong*)retarg1)[0] = __oc_r4; \
++ ((ulong*)retarg1)[1] = __oc_r5; \
++ _oc_tail
++
++/* r4-r8 returned in retarg1 pointer */
++#define _osi_call0_w5( type, name, number, type1, retarg1 ) \
++type name( type1 retarg1 ) \
++ _oc_head( dreg(r6), dreg(r7), dreg(r8) ) \
++ _oc_syscall( number, \
++ rr(r4), rr(r5), rr(r6), rr(r7), rr(r8) ) \
++ _oc_input() \
++ ((ulong*)retarg1)[0] = __oc_r4; \
++ ((ulong*)retarg1)[1] = __oc_r5; \
++ ((ulong*)retarg1)[2] = __oc_r6; \
++ ((ulong*)retarg1)[3] = __oc_r7; \
++ ((ulong*)retarg1)[4] = __oc_r8; \
++ _oc_tail
++
++/* r4 returned in retarg pointer */
++#define _osi_call1_w1( type, name, number, t1, a1, t2, retarg ) \
++type name( t1 a1, t2 retarg ) \
++ _oc_head( dreg(r6) ) \
++ __oc_r6 = (ulong)a1; \
++ _oc_syscall( number, rr(r4) ) \
++ _oc_input( ir(r6) ) \
++ ((ulong*)retarg)[0] = __oc_r4; \
++ _oc_tail
++
++/* r4,r5 returned in retarg1, retarg2 */
++#define _osi_call1_w1w1( type, name, number, t1, a1, t2, retarg1, t3, retarg2 ) \
++type name( t1 a1, t2 retarg1, t3 retarg2 ) \
++ _oc_head( dreg(r6) ) \
++ __oc_r6 = (ulong)a1; \
++ _oc_syscall( number, rr(r4), rr(r5) ) \
++ _oc_input( ir(r6) ) \
++ ((ulong*)retarg1)[0] = __oc_r4; \
++ ((ulong*)retarg2)[0] = __oc_r5; \
++ _oc_tail
++
++/* r4,r5 returned in retarg1, retarg2, retarg3 */
++#define _osi_call1_w1w1w1( type, name, number, t1, a1, t2, retarg1, t3, retarg2, t4, retarg3 ) \
++type name( t1 a1, t2 retarg1, t3 retarg2, t4 retarg3 ) \
++ _oc_head( dreg(r6) ) \
++ __oc_r6 = (ulong)a1; \
++ _oc_syscall( number, rr(r4), rr(r5), rr(r6) ) \
++ _oc_input( ir(r6) ) \
++ ((ulong*)retarg1)[0] = __oc_r4; \
++ ((ulong*)retarg2)[0] = __oc_r5; \
++ ((ulong*)retarg3)[0] = __oc_r6; \
++ _oc_tail
++
++/* r4,r5 returned in retarg pointer */
++#define _osi_call1_w2( type, name, number, t1, a1, t2, retarg ) \
++type name( t1 a1, t2 retarg ) \
++ _oc_head( dreg(r6) ) \
++ __oc_r6 = (ulong)a1; \
++ _oc_syscall( number, rr(r4), rr(r5) ) \
++ _oc_input( ir(r6) ) \
++ ((ulong*)retarg)[0] = __oc_r4; \
++ ((ulong*)retarg)[1] = __oc_r5; \
++ _oc_tail
++
++/* r4-r7 returned in retarg pointer */
++#define _osi_call1_w4( type, name, number, t1, a1, t2, retarg ) \
++type name( t1 a1, t2 retarg ) \
++ _oc_head( dreg(r6), dreg(r7) ) \
++ __oc_r6 = (ulong)a1; \
++ _oc_syscall( number, rr(r4), rr(r5), rr(r6), rr(r7) ) \
++ _oc_input( ir(r6) ) \
++ ((ulong*)retarg)[0] = __oc_r4; \
++ ((ulong*)retarg)[1] = __oc_r5; \
++ ((ulong*)retarg)[2] = __oc_r6; \
++ ((ulong*)retarg)[3] = __oc_r7; \
++ _oc_tail
++
++
++/* r4-r5 returned in retarg pointer */
++#define _osi_call2_w2( type, name, number, t1, a1, t2, a2, t3, retarg ) \
++type name( t1 a1, t2 a2, t3 retarg ) \
++ _oc_head( dreg(r6), dreg(r7) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ _oc_syscall( number, rr(r4), rr(r5) ) \
++ _oc_input( ir(r6), ir(r7) ) \
++ ((ulong*)retarg)[0] = __oc_r4; \
++ ((ulong*)retarg)[1] = __oc_r5; \
++ _oc_tail
++
++/* r4-r7 returned in retarg pointer */
++#define _osi_call2_w4( type, name, number, t1, a1, t2, a2, t3, retarg ) \
++type name( t1 a1, t2 a2, t3 retarg ) \
++ _oc_head( dreg(r6), dreg(r7) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ _oc_syscall( number, rr(r4), rr(r5), rr(r6), rr(r7) ) \
++ _oc_input( ir(r6), ir(r7) ) \
++ ((ulong*)retarg)[0] = __oc_r4; \
++ ((ulong*)retarg)[1] = __oc_r5; \
++ ((ulong*)retarg)[2] = __oc_r6; \
++ ((ulong*)retarg)[3] = __oc_r7; \
++ _oc_tail
++
++#ifdef SHORT_REGLIST
++/* r4-r9 returned in retarg pointer */
++#define _osi_call2_w6( type, name, number, t1, a1, t2, a2, t3, retarg ) \
++type name( t1 a1, t2 a2, t3 retarg ) \
++ _oc_head( dreg(r6), dreg(r7), dreg(r10) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ __oc_r10 = (ulong)retarg; \
++ _oc_syscall_r10w6( number, ir(r6), ir(r7) ) \
++ _oc_tail
++
++#else /* SHORT_REGLIST */
++
++/* r4-r9 returned in retarg pointer */
++#define _osi_call2_w6( type, name, number, t1, a1, t2, a2, t3, retarg ) \
++type name( t1 a1, t2 a2, t3 retarg ) \
++ _oc_head( dreg(r6), dreg(r7), dreg(r8), dreg(r9) ) \
++ __oc_r6 = (ulong)a1; \
++ __oc_r7 = (ulong)a2; \
++ _oc_syscall( number, rr(r4), rr(r5), rr(r6), rr(r7), rr(r8), rr(r9) ) \
++ _oc_input( ir(r6), ir(r7) ) \
++ ((ulong*)retarg)[0] = __oc_r4; \
++ ((ulong*)retarg)[1] = __oc_r5; \
++ ((ulong*)retarg)[2] = __oc_r6; \
++ ((ulong*)retarg)[3] = __oc_r7; \
++ ((ulong*)retarg)[4] = __oc_r8; \
++ ((ulong*)retarg)[5] = __oc_r9; \
++ _oc_tail
++
++#endif /* SHORT_REGLIST */
++
++
++/************************************************************************/
++/* OSI call inlines */
++/************************************************************************/
++
++static inline _osi_call1( int, OSI_CallAvailable, OSI_CALL_AVAILABLE, int, osi_num );
++
++static inline _osi_call1( int, OSI_PutC, OSI_LOG_PUTC, int, ch );
++
++static inline _osi_call1( int, OSI_Debugger, OSI_DEBUGGER, int, num );
++static inline _osi_call0( int, OSI_Exit, OSI_EXIT );
++
++/* misc */
++static inline _osi_call0( ulong, OSI_GetLocalTime, OSI_GET_LOCALTIME );
++static inline _osi_call0( ulong, OSI_GetGMTTime, OSI_GET_GMT_TIME );
++static inline _osi_call1( int, OSI_USleep, OSI_USLEEP, int, usecs );
++
++/* NVRAM */
++static inline _osi_call0( int, OSI_NVRamSize, OSI_NVRAM_SIZE );
++static inline _osi_call1( int, OSI_ReadNVRamByte, OSI_READ_NVRAM_BYTE, int, offs );
++static inline _osi_call2( int, OSI_WriteNVRamByte, OSI_WRITE_NVRAM_BYTE, int, offs,
++ unsigned char, ch );
++
++/* keyboard stuff */
++static inline _osi_call0_w1( int, OSI_GetAdbKey2, OSI_GET_ADB_KEY, int *, raw_key );
++static inline _osi_call1( int, OSI_KbdCntrl, OSI_KBD_CNTRL, int, cmd );
++
++static inline int OSI_GetAdbKey( void )
++ { int dummy_raw_key; return OSI_GetAdbKey2( &dummy_raw_key ); }
++static inline _osi_call2( int, OSI_MapAdbKey, OSI_MAP_ADB_KEY, int, keycode, int, adbkey )
++static inline _osi_call1( int, OSI_KeycodeToAdb, OSI_KEYCODE_TO_ADB, int, keycode );
++static inline _osi_call0( int, OSI_SaveKeymapping, OSI_SAVE_KEYMAPPING );
++
++/* mouse support */
++struct osi_mouse;
++static inline _osi_call0_w5( int, OSI_GetMouse, OSI_GET_MOUSE, struct osi_mouse *, ret );
++static inline _osi_call0( int, OSI_GetMouseDPI, OSI_GET_MOUSE_DPI );
++
++/* video */
++static inline _osi_call2( int, OSI_SetVMode_, OSI_SET_VMODE, int, mode, int, depth_mode );
++struct osi_get_vmode_info;
++static inline _osi_call2_w6( int, OSI_GetVModeInfo_, OSI_GET_VMODE_INFO, int, mode, int, depth_mode,
++ struct osi_get_vmode_info *, ret );
++static inline _osi_call1( int, OSI_SetVPowerState, OSI_SET_VIDEO_POWER, int, power_state );
++static inline _osi_call2( int, OSI_SetColor, OSI_SET_COLOR, int, index, int, rgb );
++static inline _osi_call0_w1( int, OSI_VideoAckIRQ, OSI_VIDEO_ACK_IRQ, int *, events );
++
++static inline void OSI_RefreshPalette( void ) { OSI_SetColor(-1,0); }
++
++/* PIC (mac-io replacement) */
++static inline _osi_call1( int, OSI_PICMaskIRQ, OSI_PIC_MASK_IRQ, int, irq );
++static inline _osi_call1( int, OSI_PICUnmaskIRQ, OSI_PIC_UNMASK_IRQ, int, irq );
++static inline _osi_call2( int, OSI_PICAckIRQ, OSI_PIC_ACK_IRQ, int, irq, int, mask_it );
++static inline _osi_call0( int, OSI_PICGetActiveIRQ, OSI_PIC_GET_ACTIVE_IRQ );
++
++/* sound */
++static inline _osi_call1( int, OSI_SoundCntl, OSI_SOUND_CNTL, int, cmd );
++static inline _osi_call2( int, OSI_SoundCntl1, OSI_SOUND_CNTL, int, cmd, int, p1 );
++static inline _osi_call3( int, OSI_SoundCntl2, OSI_SOUND_CNTL, int, cmd, int, p1, int, p2 );
++static inline _osi_call0_w2( int, OSI_SoundIRQAck, OSI_SOUND_IRQ_ACK, ulong *, timestamp );
++static inline _osi_call3( int, OSI_SoundWrite, OSI_SOUND_WRITE, int, physbuf, int, len, int, restart );
++static inline _osi_call3( int, OSI_SoundSetVolume, OSI_SOUND_SET_VOLUME, int, hwvol, int, speakervol, int, mute );
++
++/* async block driver */
++struct ablk_disk_info;
++static inline _osi_call2_w4( int, OSI_ABlkDiskInfo, OSI_ABLK_DISK_INFO, int, channel, int, unit,
++ struct ablk_disk_info *, retinfo );
++static inline _osi_call1( int, OSI_ABlkKick, OSI_ABLK_KICK, int, channel );
++static inline _osi_call1_w1w1w1( int, OSI_ABlkIRQAck, OSI_ABLK_IRQ_ACK, int, channel, int *, req_count,
++ int *, active, int *, events );
++static inline _osi_call3( int, OSI_ABlkRingSetup, OSI_ABLK_RING_SETUP, int, channel, int, mphys, int, n_el );
++static inline _osi_call2( int, OSI_ABlkCntrl, OSI_ABLK_CNTRL, int, channel, int, cmd );
++static inline _osi_call3( int, OSI_ABlkCntrl1, OSI_ABLK_CNTRL, int, channel, int, cmd, int, param );
++static inline _osi_call5( int, OSI_ABlkSyncRead, OSI_ABLK_SYNC_READ, int, channel, int, unit,
++ int, blk, ulong, mphys, int, size );
++static inline _osi_call5( int, OSI_ABlkSyncWrite, OSI_ABLK_SYNC_WRITE, int, channel, int, unit,
++ int, blk, ulong, mphys, int, size );
++static inline _osi_call2( int, OSI_ABlkBlessDisk, OSI_ABLK_BLESS_DISK, int, channel, int, unit );
++
++static inline _osi_call0( int, OSI_CMountDrvVol, OSI_CMOUNT_DRV_VOL );
++
++/* enet2 */
++static inline _osi_call0( int, OSI_Enet2Open, OSI_ENET2_OPEN );
++static inline _osi_call0( int, OSI_Enet2Close, OSI_ENET2_CLOSE );
++static inline _osi_call3( int, OSI_Enet2RingSetup, OSI_ENET2_RING_SETUP, int, which_ring,
++ int, ring_mphys, int, n_el );
++static inline _osi_call2( int, OSI_Enet2Cntrl1, OSI_ENET2_CNTRL, int, cmd, int, param );
++static inline _osi_call1( int, OSI_Enet2Cntrl, OSI_ENET2_CNTRL, int, cmd );
++static inline _osi_call0( int, OSI_Enet2Kick, OSI_ENET2_KICK );
++
++static inline _osi_call0_w2( int, OSI_Enet2GetHWAddr__, OSI_ENET2_GET_HWADDR, ulong *, retbuf );
++static inline int OSI_Enet2GetHWAddr( unsigned char *addr ) {
++ int ret;
++ ulong buf[2];
++
++ ret = OSI_Enet2GetHWAddr__( buf );
++
++ ((ulong*)addr)[0] = buf[0];
++ ((ushort*)addr)[2] = (buf[1] >> 16);
++ return ret;
++}
++static inline _osi_call2( int, OSI_Enet2IRQAck, OSI_ENET2_IRQ_ACK, int, irq_enable, int, rx_head );
++
++/* PROM (device-tree) */
++static inline _osi_call2( int, OSI_PromIface, OSI_PROM_IFACE, int, what, int, ph );
++static inline _osi_call3( int, OSI_PromIface1, OSI_PROM_IFACE, int, what, int, ph, int, p1 );
++static inline _osi_call4( int, OSI_PromIface2, OSI_PROM_IFACE, int, what, int, ph, int, p1, int, p2 );
++static inline _osi_call5( int, OSI_PromIface3, OSI_PROM_IFACE, int, what, int, ph, int, p1, int, p2, int, p3 );
++static inline _osi_call2( int, OSI_PromPathIface, OSI_PROM_PATH_IFACE, int, what, const char *, p );
++
++/* emulation acceleration */
++static inline _osi_call1( int, OSI_MapinMregs, OSI_MAPIN_MREGS, ulong, mphys );
++static inline _osi_call3( int, OSI_EmuAccel, OSI_EMUACCEL, int, emuaccel_flags, int, param, int, inst_addr );
++
++/* timer frequency */
++static inline _osi_call1( int, OSI_MticksToUsecs, OSI_MTICKS_TO_USECS, ulong, mticks );
++static inline _osi_call1( int, OSI_UsecsToMticks, OSI_USECS_TO_MTICKS, ulong, usecs );
++
++/* fb info */
++struct osi_fb_info;
++static inline _osi_call0_w5( int, OSI_GetFBInfo, OSI_GET_FB_INFO, struct osi_fb_info *, retinfo );
++
++/* SCSI */
++static inline _osi_call0( int, OSI_SCSIAck, OSI_SCSI_ACK );
++static inline _osi_call1( int, OSI_SCSISubmit, OSI_SCSI_SUBMIT, int, req_mphys );
++static inline _osi_call2( int, OSI_SCSIControl, OSI_SCSI_CNTRL, int, sel, int, param );
++
++/* TTY */
++static inline _osi_call0( int, OSI_TTYGetc, OSI_TTY_GETC );
++static inline _osi_call1( int, OSI_TTYPutc, OSI_TTY_PUTC, int, ch );
++static inline _osi_call0( int, OSI_TTYIRQAck, OSI_TTY_IRQ_ACK );
++
++#endif /* _H_OSI_CALLS */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/performance.h
+@@ -0,0 +1,71 @@
++/*
++ * Creation Date: <2001/04/01 00:44:40 samuel>
++ * Time-stamp: <2003/01/27 02:42:03 samuel>
++ *
++ * <performance.h>
++ *
++ * performance counters
++ *
++ * Copyright (C) 2001, 2002 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_PERFORMANCE
++#define _H_PERFORMANCE
++
++typedef struct {
++ char *name;
++ unsigned long *ctrptr;
++} perf_info_t;
++
++extern perf_info_t g_perf_info_table[];
++
++#if defined(PERFORMANCE_INFO) && !defined(PERFORMANCE_INFO_LIGHT)
++#define BUMP(x) do { extern int gPerf__##x; gPerf__##x++; } while(0)
++#define BUMP_N(x,n) do { extern int gPerf__##x; gPerf__##x+=(n); } while(0)
++#else
++#define BUMP(x) do {} while(0)
++#define BUMP_N(x,n) do {} while(0)
++#endif
++
++
++/************************************************************************/
++/* tick counters */
++/************************************************************************/
++
++#ifdef PERFORMANCE_INFO
++
++#define TICK_CNTR_PUSH( kv ) do { \
++ int ind = (kv)->num_acntrs; \
++ acc_counter_t *c = &(kv)->acntrs[ind]; \
++ if( ind < MAX_ACC_CNTR_DEPTH ) { \
++ c->subticks=0; \
++ (kv)->num_acntrs++; \
++ asm volatile( "mftb %0" : "=r" (c->stamp) : ); \
++ } \
++} while(0)
++
++#define TICK_CNTR_POP( kv, name ) do { \
++ int ind = (kv)->num_acntrs; \
++ ulong now, ticks; \
++ asm volatile( "mftb %0" : "=r" (now) : ); \
++ if( --ind >= 0 ) { \
++ acc_counter_t *c = &(kv)->acntrs[ind]; \
++ (kv)->num_acntrs = ind; \
++ ticks = now - c->stamp - c->subticks; \
++ BUMP_N( name##_ticks, ticks ); \
++ if( ind ) \
++ (kv)->acntrs[ind-1].subticks += ticks; \
++ } \
++} while(0)
++
++#else
++#define TICK_CNTR_PUSH( kv ) do {} while(0)
++#define TICK_CNTR_POP( kv, name ) do {} while(0)
++#endif
++
++#endif /* _H_PERFORMANCE */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/platform.h
+@@ -0,0 +1,73 @@
++/*
++ * Creation Date: <2001/12/29 19:46:46 samuel>
++ * Time-stamp: <2004/02/07 16:51:57 samuel>
++ *
++ * <platform.h>
++ *
++ * Misc definitions needed on certain platforms
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_PLATFORM
++#define _H_PLATFORM
++
++#ifndef NULL
++#define NULL 0
++#endif /* NULL */
++
++typedef unsigned long long ullong;
++typedef long long llong;
++
++typedef signed char s8;
++typedef unsigned char u8;
++typedef signed short s16;
++typedef unsigned short u16;
++typedef signed int s32;
++typedef unsigned int u32;
++typedef signed long long s64;
++typedef unsigned long long u64;
++
++#define TO_ULLONG( hi, lo ) (((ullong)(hi)<< 32 ) | (lo) )
++#define TO_LLONG( hi, lo ) (((llong)(hi)<< 32 ) | (lo) )
++
++#ifndef TEMP_FAILURE_RETRY
++# define TEMP_FAILURE_RETRY(expression) \
++ (__extension__ \
++ ({ long int __result; \
++ do __result = (long int) (expression); \
++ while (__result == -1L && errno == EINTR); \
++ __result; }))
++#endif
++
++/*
++ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
++ * warning for each use, in hopes of speeding the functions removal.
++ * Usage is:
++ * int __deprecated foo(void)
++ */
++#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
++#define __builtin_expect(x, expected_value) (x)
++#endif
++
++#define likely(x) __builtin_expect((x),1)
++#define unlikely(x) __builtin_expect((x),0)
++
++#ifndef HAVE_CLEARENV
++static inline int clearenv( void ) { extern char **environ; environ=NULL; return 0; }
++#endif
++
++#ifdef __darwin__
++#define ARCH_STR "osx"
++#endif
++
++#ifdef __linux__
++#define ARCH_STR "linux"
++#endif
++
++#endif /* _H_PLATFORM */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/processor.h
+@@ -0,0 +1,409 @@
++/*
++ * Creation Date: <2000/10/29 01:43:29 samuel>
++ * Time-stamp: <2003/07/27 22:37:49 samuel>
++ *
++ * <processor.h>
++ *
++ * Extract from <asm/processor.h>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_PROCESSOR
++#define _H_PROCESSOR
++
++
++#define PTE0_VSID(s) (((s)>>7) & 0xffffff)
++#define PTE0_V BIT(0)
++#define PTE0_H BIT(25)
++#define PTE0_API 0x3f
++
++#define PTE1_R BIT(23)
++#define PTE1_C BIT(24)
++#define PTE1_W BIT(25)
++#define PTE1_I BIT(26)
++#define PTE1_M BIT(27)
++#define PTE1_G BIT(28)
++#ifdef CONFIG_AMIGAONE
++/* Memory coherence locks up A1 compatible systems. */
++#define PTE1_WIMG (PTE1_W | PTE1_I | PTE1_G)
++#else
++#define PTE1_WIMG (PTE1_W | PTE1_I | PTE1_M | PTE1_G)
++#endif
++#define PTE1_PP 0x3
++#define PTE1_RPN (~0xfffUL)
++
++#define VSID_Ks BIT(1)
++#define VSID_Kp BIT(2)
++#define VSID_N BIT(3)
++
++
++
++#ifndef MSR_VEC
++
++#define MSR_VEC (1<<25) /* 6: Enable AltiVec */
++#define MSR_POW (1<<18) /* 13: Enable Power Management */
++#define MSR_TGPR (1<<17) /* 14: TLB Update registers in use */
++#define MSR_ILE (1<<16) /* 15: Interrupt Little Endian */
++#define MSR_EE (1<<15) /* 16: External Interrupt Enable */
++#define MSR_PR (1<<14) /* 17: Privilege Level */
++#define MSR_FP (1<<13) /* 18: Floating Point enable */
++#define MSR_ME (1<<12) /* 19: Machine Check Enable */
++#define MSR_FE0 (1<<11) /* 20: Floating Exception mode 0 */
++#define MSR_SE (1<<10) /* 21: Single Step */
++#define MSR_BE (1<<9) /* 22: Branch Trace */
++#define MSR_FE1 (1<<8) /* 23: Floating Exception mode 1 */
++#define MSR_IP (1<<6) /* 25: Exception prefix 0x000/0xFFF */
++#define MSR_IR (1<<5) /* 26: Instruction Relocate */
++#define MSR_DR (1<<4) /* 27: Data Relocate */
++#define MSR_PE (1<<2) /* 29: Performance Monitor Flag */
++#define MSR_RI (1<<1) /* 30: Recoverable Exception */
++#define MSR_LE (1<<0) /* 31: Little Endian */
++
++#endif /* MSR_VEC */
++
++#ifndef S_SPRG0
++
++#define NUM_SPRS 1024
++//#define S_XER 1
++#define S_RTCU_R 4 /* 601 RTC Upper/Lower (Reading) */
++#define S_RTCL_R 5
++//#define S_LR 8
++//#define S_CTR 9
++#define S_DSISR 18 /* Source Instruction Service Register */
++#define S_DAR 19 /* Data Address Register */
++#define S_RTCU_W 20 /* 601 RTC Upper/Lower (Writing) */
++#define S_RTCL_W 21
++#define S_DEC 22 /* Decrementer Register */
++#define S_SDR1 25 /* Table Search Description Register */
++#define S_SRR0 26 /* Save and Restore Register 0 */
++#define S_SRR1 27 /* Save and Restore Register 1 */
++#define S_VRSAVE 256 /* (AltiVec) Vector Register Save Register */
++#define S_TBRL 268 /* Time base Upper/Lower (Reading) */
++#define S_TBRU 269
++#define S_SPRG0 272 /* SPR General 0-3 */
++#define S_SPRG1 273
++#define S_SPRG2 274
++#define S_SPRG3 275
++#define S_SPRG4 276 /* SPR General 4-7 (7445/7455) */
++#define S_SPRG5 277
++#define S_SPRG6 278
++#define S_SPRG7 279
++#define S_EAR 282 /* External Access Register */
++#define S_TBWL 284 /* Time base Upper/Lower (Writing) */
++#define S_TBWU 285
++#define S_PVR 287 /* Processor Version Register */
++#define S_IBAT0U 528
++#define S_IBAT0L 529
++#define S_IBAT1U 530
++#define S_IBAT1L 531
++#define S_IBAT2U 532
++#define S_IBAT2L 533
++#define S_IBAT3U 534
++#define S_IBAT3L 535
++#define S_DBAT0U 536
++#define S_DBAT0L 537
++#define S_DBAT1U 538
++#define S_DBAT1L 539
++#define S_DBAT2U 540
++#define S_DBAT2L 541
++#define S_DBAT3U 542
++#define S_DBAT3L 543
++#define S_UMMCR2 928
++#define S_UPMC5 929 /* User Performance Monitor Counter Register */
++#define S_UPMC6 930
++#define S_UBAMR 935
++#define S_UMMCR0 936 /* User Monitor Mode Control Register */
++#define S_UPMC1 937
++#define S_UPMC2 938
++#define S_USIAR 939 /* User Sampled Instruction Address Register */
++#define S_UMMCR1 940
++#define S_UPMC3 941
++#define S_UPMC4 942 /* User Performance Monitor Counter Register 4 */
++#define S_USDAR 943 /* User Sampled Data Address Register */
++#define S_MMCR2 944 /* Monitor Mode Control Register */
++#define S_PMC5 945
++#define S_PMC6 946
++#define S_BAMR 951 /* Breakpoint Address Mask Register (74xx) */
++#define S_MMCR0 952 /* Monitor Mode Control Register 0 */
++#define S_PMC1 953 /* Performance Counter Register */
++#define S_PMC2 954
++#define S_SIAR 955 /* Sampled Instruction Address Register */
++#define S_MMCR1 956
++#define S_PMC3 957
++#define S_PMC4 958
++#define S_SDAR 959 /* Sampled Data Address Register */
++#define S_DMISS 976 /* 603 */
++#define S_DCMP 977 /* 603 */
++#define S_HASH1 978 /* 603 */
++#define S_HASH2 979 /* 603 */
++#define S_IMISS 980 /* 603 */
++#define S_TLBMISS 980 /* 7445/7455 */
++#define S_ICMP 981 /* 603 */
++#define S_PTEHI 981 /* 7445/7455 */
++#define S_RPA 982 /* 603 */
++#define S_PTELO 982 /* 7445/7455 */
++#define S_L3PM 983 /* L3 Private Memory Address Control Register */
++#define S_L3ITCR0 984 /* ??? */
++#define S_L3OHCR 1000 /* ??? */
++#define S_L3ITCR1 1001 /* ??? */
++#define S_L3ITCR2 1002 /* ??? */
++#define S_L3ITCR3 1003 /* ??? */
++#define S_HID0 1008 /* Hardware Implementation Registers */
++#define S_HID1 1009
++#define S_HID2 1010
++#define S_IABR S_HID2 /* HID2 - Instruction Address Breakpoint Register */
++#define S_ICTRL 1011 /* HID3 - Instruction Cache & Interrupt control reg */
++#define S_HID4 1012 /* HID4 - Instruction Address Compare 1 (?) */
++#define S_HID5 1013
++#define S_DABR S_HID5 /* HID5 - Data Address Breakpoint */
++#define S_MSSCR0 1014 /* HID6 - Memory Subsystem Control Register 0 */
++#define S_MSSCR1 1015 /* HID7 - Memory Subsystem Control Register 1 */
++#define S_LDSTCR 1016 /* HID8 - Load/Store Control Register */
++#define S_L2CR 1017 /* HID9 - Level 2 Cache Control Regsiter */
++#define S_L3CR 1018 /* HID10 - Level 3 Cache Control Regsiter (7450) */
++#define S_HID11 1019
++#define S_ICTC S_HID11 /* HID11 - Instruction Cache Throttling Control Reg */
++#define S_ICCR S_HID11 /* Instruction Cache Cacheability Reigster */
++#define S_THRM1 1020 /* HID12 - Thermal Management Register 1 */
++#define S_THRM2 1021 /* HID13 - Thermal Management Register 2 */
++#define S_THRM3 1022 /* HID14 - Thermal Management Register 3 */
++#define S_HID15 1023
++#define S_PIR S_HID15 /* HID15 - Processor Identification Register */
++
++#endif /* S_SPRG0 */
++
++/* the kernel might define these too... */
++#if !defined(__KERNEL__) || defined(__ASSEMBLY__)
++
++/* Floating Point Status and Control Register (FPSCR) Fields */
++#define FPSCR_FX 0x80000000 /* FPU exception summary */
++#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
++#define FPSCR_VX 0x20000000 /* Invalid operation summary */
++#define FPSCR_OX 0x10000000 /* Overflow exception summary */
++#define FPSCR_UX 0x08000000 /* Underflow exception summary */
++#define FPSCR_ZX 0x04000000 /* Zero-devide exception summary */
++#define FPSCR_XX 0x02000000 /* Inexact exception summary */
++#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
++#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
++#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
++#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
++#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
++#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
++#define FPSCR_FR 0x00040000 /* Fraction rounded */
++#define FPSCR_FI 0x00020000 /* Fraction inexact */
++#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
++#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
++#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
++#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
++#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
++#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
++#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
++#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
++#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
++#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
++#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
++#define FPSCR_RN 0x00000003 /* FPU rounding control */
++
++/* SPR_HID0 */
++#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
++#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
++#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
++#define HID0_SBCLK (1<<27)
++#define HID0_EICE (1<<26)
++#define HID0_ECLK (1<<25)
++#define HID0_PAR (1<<24)
++#define HID0_DOZE (1<<23)
++#define HID0_NAP (1<<22)
++#define HID0_SLEEP (1<<21)
++#define HID0_DPM (1<<20)
++#define HID0_NHR (1<<16) /* Not Hard Reset */
++#define HID0_ICE (1<<15) /* Instruction Cache Enable */
++#define HID0_DCE (1<<14) /* Data Cache Enable */
++#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */
++#define HID0_DLOCK (1<<12) /* Data Cache Lock */
++#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */
++#define HID0_DCFI (1<<10) /* Data Cache Flash Invalidate */
++#define HID0_SPD (1<<9) /* Speculative disable */
++#define HID0_SGE (1<<7) /* Store Gathering Enable */
++#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */
++#define HID0_BTIC (1<<5) /* Branch Target Instruction Cache Enable */
++#define HID0_ABE (1<<3) /* Address Broadcast Enable */
++#define HID0_BHT (1<<2) /* Branch History Table Enable */
++#define HID0_BTCD (1<<1) /* Branch target cache disable */
++
++#define L2CR_L2E BIT(0) /* L2 enable */
++#define L2CR_L2PE BIT(1) /* L2 data parity generation and checking */
++#define L2CR_L2SIZ_512K BIT(2)
++#define L2CR_L2SIZ_256K BIT(3)
++#define L2CR_L2SIZ_1MB (BIT(2)|BIT(3))
++#define L2CR_L2CLK_1 BIT(6) /* L2 clock ration */
++#define L2CR_L2CLK_15 (BIT(6)*2)
++#define L2CR_L2CLK_2 (BIT(6)*4)
++#define L2CR_L2CLK_25 (BIT(6)*5)
++#define L2CR_L2CLK_3 (BIT(6)*6)
++#define L2CR_L2RAM_FT 0 /* flow-through (reg-buf) synchronous SRAM */
++#define L2CR_L2RAM_PB BIT(7) /* Piplined (reg-reg) synchronous burst SRAM */
++#define L2CR_L2RAM_PLW (BIT(7)|BIT(8)) /* Piplined (reg-reg) synchronous late-write */
++#define L2CR_L2DO BIT(9) /* L2 data-only */
++#define L2CR_L2I BIT(10) /* L2 global invalidate */
++#define L2CR_L2CTL BIT(11) /* L2 RAM control (ZZ enable, low-power mode) */
++#define L2CR_L2WT BIT(12) /* L2 write-through */
++#define L2CR_L2TS BIT(13) /* L2 test support */
++#define L2CR_L2OH_05 0 /* L2 output hold 0.5 nS */
++#define L2CR_L2OH_10 BIT(15) /* L2 output hold 1.0 nS */
++#define L2CR_L2SL BIT(16) /* L2 DLL slow (use if bus freq < 150 MHz) */
++#define L2CR_L2DF BIT(17) /* L2 differential clock */
++#define L2CR_L2BYP BIT(18) /* L2 DLL bypass */
++#define L2CR_L2IP BIT(31) /* L2 global invalidate in progress */
++
++/* SPR_THRM1 */
++#define THRM1_TIN (1 << 31)
++#define THRM1_TIV (1 << 30)
++#define THRM1_THRES(x) ((x&0x7f)<<23)
++#define THRM3_SITV(x) ((x&0x3fff)<<1)
++#define THRM1_TID (1<<2)
++#define THRM1_TIE (1<<1)
++#define THRM1_V (1<<0)
++
++/* SPR_THRM3 */
++#define THRM3_E (1<<0)
++
++/* Processor Version Numbers */
++
++#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
++#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
++
++#define PVR_403GA 0x00200000
++#define PVR_403GB 0x00200100
++#define PVR_403GC 0x00200200
++#define PVR_403GCX 0x00201400
++#define PVR_405GP 0x40110000
++#define PVR_601 0x00010000
++#define PVR_602 0x00050000
++#define PVR_603 0x00030000
++#define PVR_603e 0x00060000
++#define PVR_603ev 0x00070000
++#define PVR_603r 0x00071000
++#define PVR_604 0x00040000
++#define PVR_604e 0x00090000
++#define PVR_604r 0x000A0000
++#define PVR_620 0x00140000
++#define PVR_740 0x00080000
++#define PVR_750 PVR_740
++#define PVR_740P 0x10080000
++#define PVR_750P PVR_740P
++#define PVR_821 0x00500000
++#define PVR_823 PVR_821
++#define PVR_850 PVR_821
++#define PVR_860 PVR_821
++#define PVR_7400 0x000C0000
++#define PVR_8240 0x00810100
++#define PVR_8260 PVR_8240
++
++/* Vector VSCR register */
++#define VSCR_NJ 0x10000
++#define VSCR_SAT 0x1
++
++#endif /* __KERNEL__ */
++
++
++#ifdef __ASSEMBLY__
++
++#define CTR S_CTR /* Counter Register */
++#define DAR S_DAR /* Data Address Register */
++#define DABR S_DABR /* Data Address Breakpoint Register */
++#define DBAT0L S_DBAT0L /* Data BAT 0 Lower Register */
++#define DBAT0U S_DBAT0U /* Data BAT 0 Upper Register */
++#define DBAT1L S_DBAT1L /* Data BAT 1 Lower Register */
++#define DBAT1U S_DBAT1U /* Data BAT 1 Upper Register */
++#define DBAT2L S_DBAT2L /* Data BAT 2 Lower Register */
++#define DBAT2U S_DBAT2U /* Data BAT 2 Upper Register */
++#define DBAT3L S_DBAT3L /* Data BAT 3 Lower Register */
++#define DBAT3U S_DBAT3U /* Data BAT 3 Upper Register */
++#define DCMP S_DCMP /* Data TLB Compare Register */
++#define DEC S_DEC /* Decrement Register */
++#define DMISS S_DMISS /* Data TLB Miss Register */
++#define DSISR S_DSISR /* Data Storage Interrupt Status Register */
++#define EAR S_EAR /* External Address Register */
++#define HASH1 S_HASH1 /* Primary Hash Address Register */
++#define HASH2 S_HASH2 /* Secondary Hash Address Register */
++#define HID0 S_HID0 /* Hardware Implementation Register 0 */
++#define HID1 S_HID1 /* Hardware Implementation Register 1 */
++#define IABR S_IABR /* Instruction Address Breakpoint Register */
++#define IBAT0L S_IBAT0L /* Instruction BAT 0 Lower Register */
++#define IBAT0U S_IBAT0U /* Instruction BAT 0 Upper Register */
++#define IBAT1L S_IBAT1L /* Instruction BAT 1 Lower Register */
++#define IBAT1U S_IBAT1U /* Instruction BAT 1 Upper Register */
++#define IBAT2L S_IBAT2L /* Instruction BAT 2 Lower Register */
++#define IBAT2U S_IBAT2U /* Instruction BAT 2 Upper Register */
++#define IBAT3L S_IBAT3L /* Instruction BAT 3 Lower Register */
++#define IBAT3U S_IBAT3U /* Instruction BAT 3 Upper Register */
++#define ICMP S_ICMP /* Instruction TLB Compare Register */
++#define IMISS S_IMISS /* Instruction TLB Miss Register */
++#define IMMR S_IMMR /* PPC 860/821 Internal Memory Map Register */
++#define L2CR S_L2CR /* PPC 750 L2 control register */
++#define PVR S_PVR /* Processor Version */
++#define RPA S_RPA /* Required Physical Address Register */
++#define SDR1 S_SDR1 /* MMU hash base register */
++#define SPR0 S_SPRG0 /* Supervisor Private Registers */
++#define SPR1 S_SPRG1
++#define SPR2 S_SPRG2
++#define SPR3 S_SPRG3
++#define SPRG0 S_SPRG0
++#define SPRG1 S_SPRG1
++#define SPRG2 S_SPRG2
++#define SPRG3 S_SPRG3
++#define SRR0 S_SRR0 /* Save and Restore Register 0 */
++#define SRR1 S_SRR1 /* Save and Restore Register 1 */
++#define TBRL S_STBRL /* Time Base Read Lower Register */
++#define TBRU S_TBRU /* Time Base Read Upper Register */
++#define TBWL S_TBWL /* Time Base Write Lower Register */
++#define TBWU S_TBWU /* Time Base Write Upper Register */
++#define ICTC S_ICTC
++#define THRM1 S_THRM1 /* Thermal Management Register 1 */
++#define THRM2 S_THRM2 /* Thermal Management Register 2 */
++#define THRM3 S_THRM3 /* Thermal Management Register 3 */
++#define SIAR S_SIAR
++#define SDAR S_SDAR
++#define XER 1
++
++#define SR0 0 /* Segment registers */
++#define SR1 1
++#define SR2 2
++#define SR3 3
++#define SR4 4
++#define SR5 5
++#define SR6 6
++#define SR7 7
++#define SR8 8
++#define SR9 9
++#define SR10 10
++#define SR11 11
++#define SR12 12
++#define SR13 13
++#define SR14 14
++#define SR15 15
++
++#endif /* __ASSEMBLY__ */
++
++/* opcode macros */
++
++#define OPCODE_PRIM(n) ( ((ulong)(n)) >> 26 )
++#define OPCODE_EXT(n) ( (((ulong)(n)) >> 1) & 0x3ff )
++#define OPCODE(op,op_ext) ( ((op)<<10) + op_ext )
++
++#define B1(n) ( (((ulong)(n)) >> 21) & 0x1f )
++#define B2(n) ( (((ulong)(n)) >> 16) & 0x1f )
++#define B3(n) ( (((ulong)(n)) >> 11) & 0x1f )
++
++#define BD(n) ((ulong)((n) & 0x7fff) + (((n) & 0x8000) ? (ulong)0xffff8000 : 0))
++
++#define SPRNUM_FLIP( v ) ( (((v)>>5) & 0x1f) | (((v)<<5) & 0x3e0) )
++
++#endif /* _H_PROCESSOR */
++
+--- /dev/null
++++ b/drivers/macintosh/mol/include/prom.h
+@@ -0,0 +1,46 @@
++/*
++ * Creation Date: <1999/02/22 23:22:17 samuel>
++ * Time-stamp: <2003/06/02 16:17:36 samuel>
++ *
++ * <prom.h>
++ *
++ * OF device tree structs
++ *
++ * Copyright (C) 1999, 2000, 2002, 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_PROM
++#define _H_PROM
++
++typedef void *p_phandle_t;
++
++typedef struct {
++ int nirq;
++ int irq[5];
++ unsigned long controller[5];
++} irq_info_t;
++
++typedef struct p_property {
++ char *name;
++ int length;
++ unsigned char *value;
++ struct p_property *next;
++} p_property_t;
++
++typedef struct mol_device_node {
++ p_phandle_t node;
++ struct p_property *properties;
++ struct mol_device_node *parent;
++ struct mol_device_node *child;
++ struct mol_device_node *sibling;
++ struct mol_device_node *next; /* next device of same type */
++ struct mol_device_node *allnext; /* next in list of all nodes */
++ char *unit_string;
++} mol_device_node_t;
++
++#endif /* _H_PROM */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/rvec.h
+@@ -0,0 +1,147 @@
++/*
++ * Creation Date: <2001/01/26 21:33:45 samuel>
++ * Time-stamp: <2004/02/08 20:08:20 samuel>
++ *
++ * <return_vectors.h>
++ *
++ * Possible mac-return vectors (see mainloop.S)
++ *
++ * Copyright (C) 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_RVEC
++#define _H_RVEC
++
++
++/* ---------------------------------------------------------------------------- */
++
++#define NRVECS_LOG2 6
++#define NUM_RVECS 64 /* = 2 ^ NRVECS_LOG2 */
++#define RVEC_MASK (NUM_RVECS-1)
++
++/* ---------------------------------------------------------------------------- */
++
++#define RVEC_NOP 0 /* Must be zero */
++#ifdef __darwin__
++#define RVEC_CALL_KERNEL 1 /* call kernel */
++#endif
++#define RVEC_ENABLE_FPU 3 /* Load up FPU */
++
++#define RVEC_TRACE_TRAP 6
++#define RVEC_ISI_TRAP 7 /* r4=nip, r5=srr1 */
++#define RVEC_DSI_TRAP 8 /* r4=dar, r5=srr1 */
++#define RVEC_ALIGNMENT_TRAP 9 /* r4=dar, r5=srr1 */
++#ifdef EMULATE_603
++#define RVEC_DMISS_LOAD_TRAP 10
++#define RVEC_DMISS_STORE_TRAP 11
++#define RVEC_IMISS_TRAP 12
++#endif
++
++#define RVEC_SPR_READ 13 /* r4=spr#, r5=gprnum */
++#define RVEC_SPR_WRITE 14 /* r4=spr#, r5=value */
++#define RVEC_PRIV_INST 15 /* r4=opcode */
++#define RVEC_ILLEGAL_INST 16 /* r4=opcode */
++
++#define RVEC_UNUSUAL_PROGRAM_EXCEP 17 /* r4=opcode, r5=srr1 */
++
++#define RVEC_ALTIVEC_UNAVAIL_TRAP 18
++#define RVEC_ALTIVEC_ASSIST 19 /* r4=srr1 */
++#define RVEC_ENABLE_ALTIVEC 20
++
++#define RVEC_EXIT 21
++/* 22 was RVEC_INTERRUPT */
++#define RVEC_OSI_SYSCALL 23
++#define RVEC_TIMER 24
++
++#define RVEC_IO_READ 25
++#define RVEC_IO_WRITE 26
++
++#define RVEC_MSR_POW 27 /* (MSR_POW 0->1) => doze */
++
++/* error/debug */
++#define RVEC_UNUSUAL_DSISR_BITS 28 /* dar, dsisr (bit 0,5,9 or 11 was set) */
++#define RVEC_MMU_IO_SEG_ACCESS 29 /* IO segment access (more or less unused) */
++#define RVEC_INTERNAL_ERROR 30
++#define RVEC_DEBUGGER 31
++#define RVEC_BREAK 32 /* r4 = break_flag */
++#define RVEC_BAD_NIP 33 /* r4 = phys_nip */
++#define RVEC_OUT_OF_MEMORY 34 /* fatal out of memory... */
++
++#define RVEC_CHECK_IRQS 35 /* check interrupts */
++
++
++/************************************************************************/
++/* MOL kernel/emulator switch magic */
++/************************************************************************/
++
++/* magic to be loaded into r4/r5 before the illegal instruction is issued */
++#define MOL_ENTRY_R4_MAGIC 0x7ba5
++#define MOL_INITIALIZE_FLAG 0x8000
++#define MOL_KERNEL_ENTRY_MAGIC mfmsr r0 /* any privileged instruction will do */
++
++
++/************************************************************************/
++/* Kernel definitions */
++/************************************************************************/
++
++#if defined(__KERNEL__) && !defined( __ASSEMBLY__ )
++
++#define RVEC_RETURN_1( mregsptr, rvec, arg1 ) \
++ ({ (mregsptr)->rvec_param[0] = (ulong)(arg1); \
++ return rvec; })
++
++#define RVEC_RETURN_2( mregsptr, rvec, arg1, arg2 ) \
++ ({ (mregsptr)->rvec_param[0] = (ulong)(arg1); \
++ (mregsptr)->rvec_param[1] = (ulong)(arg2); \
++ return rvec; })
++
++#define RVEC_RETURN_3( mregsptr, rvec, arg1, arg2, arg3 ) \
++ ({ (mregsptr)->rvec_param[0] = (ulong)(arg1); \
++ (mregsptr)->rvec_param[1] = (ulong)(arg2); \
++ (mregsptr)->rvec_param[2] = (ulong)(arg3); \
++ return rvec; })
++
++#endif /* !__ASSEMBLY__ && __KERNEL__ */
++
++
++/************************************************************************/
++/* userspace definitions */
++/************************************************************************/
++
++#if !defined(__KERNEL__) || defined(__MPC107__)
++
++#if !defined(__ASSEMBLY__)
++#if !defined(__KERNEL__)
++
++typedef struct {
++ int vnum;
++ void *vector;
++ const char *name;
++} rvec_entry_t;
++
++extern void rvec_init( void );
++extern void rvec_cleanup( void );
++extern void set_rvector( uint vnum, void *vector, const char *vector_name );
++extern void set_rvecs( rvec_entry_t *table, int tablesize );
++#endif
++
++/* this struct is private to rvec.c/mainloop.S (offsets are HARDCODED) */
++typedef struct {
++ int (*rvec)( int rvec /*, arguments */ );
++ int dbg_count;
++ const char *name;
++ int filler;
++} priv_rvec_entry_t;
++#endif /* __ASSEMBLY__ */
++
++#define RVEC_ESIZE_LOG2 4 /* 2^n = sizeof(priv_rvec_entry_t) */
++#define RVEC_ESIZE 16 /* sizeof(priv_rvec_entry_t) */
++
++#endif /* __KERNEL__ || __MPC107__ */
++
++#endif /* _H_RVEC */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/skiplist.h
+@@ -0,0 +1,87 @@
++/*
++ * Creation Date: <2003/03/03 22:59:04 samuel>
++ * Time-stamp: <2004/02/21 12:17:38 samuel>
++ *
++ * <skiplist.h>
++ *
++ * Skiplist implementation
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_SKIPLIST
++#define _H_SKIPLIST
++
++#define SKIPLIST_MAX_HEIGHT 16
++
++typedef struct skiplist_el skiplist_el_t;
++
++typedef struct {
++#ifdef __darwin__
++ ulong next_phys; /* for usage from assembly */
++#endif
++ skiplist_el_t *next;
++} skiplist_level_t;
++
++/* data (of datasize) is stored before the skiplist_el */
++typedef struct skiplist_el {
++ int key;
++ skiplist_level_t level[1]; /* level 0 */
++ /* level 1..n are optionally stored here */
++} *skiplist_iter_t;
++
++typedef struct {
++ int nel;
++ int slevel; /* start level */
++ int datasize; /* size of data (stored before each key) */
++
++ skiplist_level_t root[SKIPLIST_MAX_HEIGHT];
++ skiplist_el_t nil_el;
++
++ skiplist_level_t freelist; /* key = level, linked list in next[0] */
++} skiplist_t;
++
++static inline int
++skiplist_getnext( skiplist_t *sl, skiplist_iter_t *iterator, char **data )
++{
++ skiplist_el_t *el = *iterator;
++ *data = (char*)el - sl->datasize;
++ *iterator = el->level[0].next;
++ return el != &sl->nil_el;
++}
++
++static inline int
++skiplist_iter_getkey( skiplist_t *sl, char *data )
++{
++ return ((skiplist_el_t*)(data + sl->datasize))->key;
++}
++
++static inline skiplist_iter_t
++skiplist_iterate( skiplist_t *sl )
++{
++ return sl->root[0].next;
++}
++
++static inline int
++skiplist_needalloc( skiplist_t *sl )
++{
++ return !sl->freelist.next;
++}
++
++typedef void (*skiplist_el_callback)( char *data, int ind, int n, void *usr1, void *usr2 );
++
++extern void skiplist_init( skiplist_t *sl, int datasize );
++extern int skiplist_prealloc( skiplist_t *sl, char *buf, unsigned int size,
++ skiplist_el_callback callback, void *usr1, void *usr2 );
++
++extern char *skiplist_insert( skiplist_t *sl, int key );
++extern char *skiplist_delete( skiplist_t *sl, int key );
++extern char *skiplist_lookup( skiplist_t *sl, int key );
++
++
++#endif /* _H_SKIPLIST */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/tlbie.h
+@@ -0,0 +1,102 @@
++/*
++ * Creation Date: <2003/05/27 16:56:10 samuel>
++ * Time-stamp: <2003/08/16 16:55:31 samuel>
++ *
++ * <tlbie.h>
++ *
++ * tlbie and PTE operations
++ *
++ * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_TLBIE
++#define _H_TLBIE
++
++
++#ifdef CONFIG_SMP
++extern void (*xx_tlbie_lowmem)( void /* special */ );
++extern void (*xx_store_pte_lowmem)( void /* special */ );
++extern int compat_hash_table_lock;
++
++static inline void
++__tlbie( int ea )
++{
++ register ulong _ea __asm__ ("r3");
++ register ulong _lock __asm__ ("r7");
++ register ulong _func __asm__ ("r9");
++
++ _func = (ulong)xx_tlbie_lowmem;
++ _lock = (ulong)&compat_hash_table_lock;
++ _ea = ea;
++
++ asm volatile (
++ "mtctr 9 \n"
++ "li 8,0x1235 \n" /* lock value */
++ "mfmsr 10 \n"
++ "rlwinm 0,10,0,17,15 \n" /* clear MSR_EE */
++ "mtmsr 0 \n"
++ "bctrl \n" /* modifies r0 */
++ "mtmsr 10 \n"
++ :
++ : "r" (_ea), "r" (_lock), "r" (_func)
++ : "ctr", "lr", "cc", "r8", "r0", "r10"
++ );
++}
++
++static inline void
++__store_PTE( int ea, unsigned long *slot, int pte0, int pte1 )
++{
++ register ulong _ea __asm__ ("r3");
++ register ulong _pte_slot __asm__ ("r4");
++ register ulong _pte0 __asm__ ("r5");
++ register ulong _pte1 __asm__ ("r6");
++ register ulong _lock __asm__ ("r7");
++ register ulong _func __asm__ ("r9");
++
++ _func = (ulong)xx_store_pte_lowmem;
++ _ea = ea;
++ _pte_slot = (ulong)slot;
++ _pte0 = pte0;
++ _pte1 = pte1;
++ _lock = (ulong)&compat_hash_table_lock;
++
++ asm volatile (
++ "mtctr 9 \n"
++ "li 8,0x1234 \n" /* lock value */
++ "mfmsr 10 \n"
++ "rlwinm 0,10,0,17,15 \n" /* clear MSR_EE */
++ "mtmsr 0 \n"
++ "bctrl \n" /* modifies r0 */
++ "mtmsr 10 \n"
++ :
++ : "r" (_ea), "r" (_pte_slot), "r" (_pte0), "r" (_pte1), "r" (_lock), "r" (_func)
++ : "ctr", "lr", "cc", "r0", "r8", "r10"
++ );
++}
++
++#else /* CONFIG_SMP */
++extern void (*xx_store_pte_lowmem)( unsigned long *slot, int pte0, int pte1 );
++
++static inline void __tlbie( int ea ) {
++ asm volatile ("tlbie %0" : : "r"(ea));
++}
++
++static inline void
++__store_PTE( int ea, unsigned long *slot, int pte0, int pte1 )
++{
++ ulong flags;
++ local_irq_save(flags);
++ (*xx_store_pte_lowmem)( slot, pte0, pte1 );
++ local_irq_restore(flags);
++ __tlbie( ea );
++}
++
++#endif /* CONFIG_SMP */
++
++
++#endif /* _H_TLBIE */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/uaccess.h
+@@ -0,0 +1,36 @@
++/*
++ * Creation Date: <2004/02/01 20:02:11 samuel>
++ * Time-stamp: <2004/02/01 20:02:11 samuel>
++ *
++ * <uaccess.h>
++ *
++ *
++ *
++ * Copyright (C) 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_UACCESS
++#define _H_UACCESS
++
++
++static inline unsigned int copy_to_user_mol( void *to, const void *from, ulong len ) {
++ return copy_to_user( to, from, len );
++}
++static inline unsigned int copy_from_user_mol( void *to, const void *from, ulong len ) {
++ return copy_from_user( to, from, len );
++}
++
++static inline unsigned int copy_int_to_user( int *to, int val ) {
++ return copy_to_user_mol( to, &val, sizeof(int) );
++}
++static inline unsigned int copy_int_from_user( int *retint, int *userptr ) {
++ return copy_from_user_mol( retint, userptr, sizeof(int) );
++}
++
++
++#endif /* _H_UACCESS */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/vector.h
+@@ -0,0 +1,189 @@
++/*
++ * Creation Date: <2003/05/26 00:00:28 samuel>
++ * Time-stamp: <2004/03/07 14:44:50 samuel>
++ *
++ * <vector.h>
++ *
++ * Vector hooks
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#ifndef _H_VECTOR
++#define _H_VECTOR
++
++#define MOL_SPRG2_MAGIC 0x1779
++
++#ifdef __MPC107__
++#include "mpcvector.h"
++#else
++
++#define PERFMON_VECTOR 0xf00
++
++
++/************************************************************************/
++/* Vector entry point definitions */
++/************************************************************************/
++
++/*
++ * This code uses the dynamic linkage/action symbol functionality of
++ * the MOL kernel loader to automatically install the hooks. Refer to
++ * hook.c for the actual implementation.
++ */
++
++/* Description of ACTION_RELOC_HOOK:
++ *
++ * .long ACTION_RELOC_HOOK
++ * .long vector
++ * .long #bytes to copy to lowmem
++ * .long offset to vret function
++ * -- offsets are calculated from here --
++ */
++
++mDEFINE(VECTOR_HOOK, [v], [
++ balign_32
++ ACTION_PB( ACTION_RELOC_HOOK )
++ .long _v
++ .long vhook_end_[]_v - vhook_[]_v
++ .long vret_[]_v - vhook_[]_v
++vhook_[]_v:
++ mtsprg_a0 r3
++ addis r3,0,0 /* [1] hook address inserted */
++ mtsprg_a1 r1
++ ori r3,r3,0 /* [3] at module initialization */
++ mfctr r1
++ mtctr r3
++ bctr
++
++vret_[]_v:
++ nop /* overwritten instruction is inserted here */
++ ba _v + 0x4
++vhook_end_[]_v:
++
++ .text
++ /* entrypoint */
++])
++
++/* these macros are to be used from the not_mol vector hook */
++#define CONTINUE_TRAP( v ) \
++ mfsprg_a0 r3 ; \
++ fix_sprg2 /**/ R1 /* sprg2 == sprg_a0 */ ; \
++ mfsprg_a1 r1 ; \
++ ACTION_1( ACTION_VRET, v ) /* ba vret_xxx */
++
++#define ABORT_TRAP( dummy_v ) \
++ mfsprg_a0 r3 ; \
++ fix_sprg2 /**/ R1 /* sprg2 == sprg_a0 */ ; \
++ mfsprg_a1 r1 ; \
++ rfi
++
++/* SPRG0,1 = saved r3,r1, r1 = saved lr */
++mDEFINE(VECTOR_, [v, dummy_str, secondary, not_mol_label], [
++
++not_mol_[]_v:
++ mtcr r3
++ CONTINUE_TRAP( _v )
++
++secondary_int_[]_v:
++ li r3,_v
++ b _secondary
++
++ VECTOR_HOOK( _v )
++
++ /* entrypoint */
++ mtctr r1
++ mfcr r3
++ mfsprg_a2 r1
++ cmpwi r1,MOL_SPRG2_MAGIC
++ bne- _not_mol_label
++soft_603_entry_[]_v:
++ mfsrr1 r1
++ andi. r1,r1,MSR_PR /* MSR_PR set? */
++ mfsprg_a3 r1
++ beq- secondary_int_[]_v /* if not, take a secondary trap? */
++])
++
++#define VECTOR(v, dummy_str, secondary) \
++ VECTOR_(v, dummy_str, secondary, not_mol_##v )
++
++/* this macro takes an exception from mac mode (call save_middle_regs first) */
++#define TAKE_EXCEPTION( v ) \
++ bl take_exception ; \
++ ACTION_1( ACTION_VRET, v )
++
++/* no need to relocate the 0xf00 trap */
++#define PERFMON_VECTOR_RELOCATION( newvec )
++
++
++/************************************************************************/
++/* 603 vector HOOKs (r0-r3, cr0 saved by hardware) */
++/************************************************************************/
++
++mDEFINE(VECTOR_603, [v, dummy_str], [
++ balign_32
++ ACTION_PB( ACTION_RELOC_HOOK )
++ .long _v
++ .long vhook_end_[]_v - vhook_[]_v
++ .long vret_[]_v - vhook_[]_v
++vhook_[]_v:
++ mfsprg_a2 r1
++ addis r3,0,0 /* [1] hook address inserted */
++ cmpwi r1,MOL_SPRG2_MAGIC
++ ori r3,r3,0 /* [3] at module initialization */
++ bne vret_[]_v
++ mfctr r0
++ mtctr r3
++ bctr
++
++vret_[]_v:
++ nop /* overwritten instruction is inserted here */
++ ba _v + 0x4
++vhook_end_[]_v:
++
++ .text
++ /* entrypoint */
++])
++
++
++/* all register are assumed to be unmodified */
++mDEFINE(SOFT_VECTOR_ENTRY_603, [v], [
++ mtsprg_a0 r3
++ mtsprg_a1 r1
++ mfcr r3
++ b soft_603_entry_[]_v
++])
++
++
++/************************************************************************/
++/* FUNCTION_HOOK */
++/************************************************************************/
++
++mDEFINE(FHOOK, [symind], [
++ ACTION_PB( ACTION_HOOK_FUNCTION )
++ .long _symind
++ .long fhook_end_[]_symind - fhook_[]_symind
++ .long fret_[]_symind - fhook_[]_symind
++fhook_[]_symind:
++ mflr r10
++ addis r9,0,0 /* [1] address inserted */
++ ori r9,r9,0 /* [2] at runtime */
++ mtctr r9
++ bctrl
++ mtlr r10
++fret_[]_symind:
++ nop /* overwritten instruction is inserted here */
++ nop /* return (through a relative branch) */
++fhook_end_[]_symind:
++
++ .text
++ /* hook goes here */
++])
++
++
++#endif /* MOLMPC */
++#endif /* _H_VECTOR */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/version.h
+@@ -0,0 +1,11 @@
++
++#ifndef _H_VERSION
++#define _H_VERSION
++
++#include "molversion.h"
++
++#define MOL_VERSION ((MOL_MAJOR_VERSION << 16) \
++ + (MOL_MINOR_VERSION << 8) \
++ + MOL_PATCHLEVEL )
++
++#endif /* _H_VERSION */
+--- /dev/null
++++ b/drivers/macintosh/mol/include/weaksym.h
+@@ -0,0 +1,39 @@
++/*
++ * Creation Date: <2001/08/02 23:53:57 samuel>
++ * Time-stamp: <2001/08/03 00:30:23 samuel>
++ *
++ * <weak.h>
++ *
++ * Support of weak symbols (extract, stolen from glibc)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifndef _H_WEAKSYM
++#define _H_WEAKSYM
++
++
++/* Define ALIASNAME as a strong alias for NAME. */
++#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
++#define _strong_alias(name, aliasname) \
++ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
++
++/* This comes between the return type and function name in
++ a function definition to make that definition weak. */
++#define weak_function __attribute__ ((weak))
++#define weak_const_function __attribute__ ((weak, __const__))
++
++/* Define ALIASNAME as a weak alias for NAME. */
++#define weak_alias(name, aliasname) _weak_alias (name, aliasname)
++#define _weak_alias(name, aliasname) \
++ extern __typeof (name) aliasname __attribute__ ((weak, alias (#name)));
++
++/* Declare SYMBOL as weak undefined symbol (resolved to 0 if not defined). */
++#define weak_extern(symbol) _weak_extern (symbol)
++# define _weak_extern(symbol) asm (".weak " #symbol);
++
++
++#endif /* _H_WEAKSYM */
+--- /dev/null
++++ b/drivers/macintosh/mol/init.c
+@@ -0,0 +1,191 @@
++/*
++ * Creation Date: <2002/01/13 20:45:37 samuel>
++ * Time-stamp: <2004/02/14 14:01:09 samuel>
++ *
++ * <init.c>
++ *
++ * Kernel module initialization
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "misc.h"
++#include "mmu.h"
++#include "asmfuncs.h"
++#include "performance.h"
++#include "mol-ioctl.h"
++#include "version.h"
++#include "hash.h"
++
++/* globals */
++session_table_t *g_sesstab;
++int g_num_sessions;
++
++
++/************************************************************************/
++/* init/cleanup kernel module */
++/************************************************************************/
++
++int
++common_init( void )
++{
++ if( init_hash() )
++ return 1;
++
++ if( !(g_sesstab=kmalloc_cont_mol(sizeof(*g_sesstab))) ) {
++ cleanup_hash();
++ return 1;
++ }
++
++ memset( g_sesstab, 0, sizeof(*g_sesstab) );
++ init_MUTEX_mol( &g_sesstab->lock );
++
++ if( arch_common_init() ) {
++ free_MUTEX_mol( &g_sesstab->lock );
++ kfree_cont_mol( g_sesstab );
++ cleanup_hash();
++ return 1;
++ }
++ return 0;
++}
++
++void
++common_cleanup( void )
++{
++ arch_common_cleanup();
++
++ free_MUTEX_mol( &g_sesstab->lock );
++ kfree_cont_mol( g_sesstab );
++ g_sesstab = NULL;
++
++ cleanup_hash();
++}
++
++
++/************************************************************************/
++/* initialize / destroy session */
++/************************************************************************/
++
++static int
++initialize_session_( uint index )
++{
++ kernel_vars_t *kv;
++ ulong kv_phys;
++
++ if( g_sesstab->magic == 1 )
++ return -EMOLSECURITY;
++
++ /* printk("initialize_session\n" ); */
++ if( g_sesstab->kvars[index] )
++ return -EMOLINUSE;
++
++ if( !g_num_sessions && perform_actions() )
++ return -EMOLGENERAL;
++
++ if( !(kv=alloc_kvar_pages()) )
++ goto error;
++
++ memset( kv, 0, NUM_KVARS_PAGES * 0x1000 );
++ kv->session_index = index;
++ kv->kvars_virt = kv;
++ kv_phys = tophys_mol(kv);
++ kv->kvars_tophys_offs = kv_phys - (ulong)kv;
++
++ if( init_mmu(kv) )
++ goto error;
++
++ init_host_irqs(kv);
++ initialize_spr_table( kv );
++
++ msr_altered( kv );
++
++ g_num_sessions++;
++
++ g_sesstab->kvars_ph[index] = kv_phys;
++ g_sesstab->kvars[index] = kv;
++
++ return 0;
++ error:
++ if( !g_num_sessions )
++ cleanup_actions();
++ if( kv )
++ free_kvar_pages( kv );
++ return -EMOLGENERAL;
++}
++
++int
++initialize_session( uint index )
++{
++ int ret;
++ if( index >= MAX_NUM_SESSIONS )
++ return -EMOLINVAL;
++
++ SESSION_LOCK;
++ ret = initialize_session_( index );
++ SESSION_UNLOCK;
++
++ return ret;
++}
++
++void
++destroy_session( uint index )
++{
++ kernel_vars_t *kv;
++
++ if( index >= MAX_NUM_SESSIONS )
++ return;
++
++ if( g_sesstab->magic == 1 ) {
++ printk("Security alert! Somebody other than MOL has tried to invoke\n"
++ "the MOL switch magic. The MOL infrastructure has been disabled.\n"
++ "Reboot in order to get MOL running again\n");
++ /* make it impossible to unload the module */
++ prevent_mod_unload();
++ }
++
++ SESSION_LOCK;
++ if( (kv=g_sesstab->kvars[index]) ) {
++
++ g_sesstab->kvars[index] = NULL;
++ g_sesstab->kvars_ph[index] = 0;
++
++ /* decrease before freeing anything (simplifies deallocation of shared resources) */
++ g_num_sessions--;
++ cleanup_host_irqs(kv);
++ cleanup_mmu( kv );
++
++ if( kv->emuaccel_page )
++ free_page_mol( kv->emuaccel_page );
++
++ memset( kv, 0, NUM_KVARS_PAGES * 0x1000 );
++ free_kvar_pages( kv );
++
++ if( !g_num_sessions )
++ cleanup_actions();
++ }
++ SESSION_UNLOCK;
++}
++
++uint
++get_session_magic( uint random_magic )
++{
++ if( random_magic < 2 )
++ random_magic = 2;
++ /* negative return values are interpreted as errors */
++ random_magic &= 0x7fffffff;
++
++ SESSION_LOCK;
++ if( !g_sesstab->magic )
++ g_sesstab->magic = random_magic;
++ SESSION_UNLOCK;
++
++ return g_sesstab->magic;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/misc.c
+@@ -0,0 +1,255 @@
++/*
++ * Creation Date: <2003/06/06 20:00:52 samuel>
++ * Time-stamp: <2004/03/06 13:54:26 samuel>
++ *
++ * <misc.c>
++ *
++ * Miscellaneous
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2
++ *
++ */
++
++#include "archinclude.h"
++#include "mol-ioctl.h"
++#include "mmu.h"
++#include "mtable.h"
++#include "constants.h"
++#include "asmfuncs.h"
++#include "performance.h"
++#include "misc.h"
++#include "emu.h"
++#include "alloc.h"
++#include "uaccess.h"
++
++
++/************************************************************************/
++/* Performance Info */
++/************************************************************************/
++
++#ifdef PERFORMANCE_INFO
++
++static void
++clear_performance_info( kernel_vars_t *kv )
++{
++ perf_info_t *p = g_perf_info_table;
++ int i;
++
++ for( ; p->name ; p++ )
++ *p->ctrptr = 0;
++ for( i=0; i<NUM_ASM_BUMP_CNTRS; i++ )
++ kv->asm_bump_cntr[i] = 0;
++ kv->num_acntrs = 0;
++}
++
++static int
++get_performance_info( kernel_vars_t *kv, uint ind, perf_ctr_t *r )
++{
++ perf_info_t *p;
++ int len;
++ char *name;
++
++ for( p=g_perf_info_table; p->name && ind; p++, ind-- )
++ ;
++ if( !p->name ) {
++ extern int __start_bumptable[], __end_bumptable[];
++ if( ind >= __end_bumptable - __start_bumptable )
++ return 1;
++ name = (char*)__start_bumptable + __start_bumptable[ind];
++ r->ctr = kv->asm_bump_cntr[ind];
++ } else {
++ name = p->name;
++ r->ctr = *p->ctrptr;
++ }
++
++ if( (len=strlen(name)+1) > sizeof(r->name) )
++ len = sizeof(r->name);
++ memcpy( r->name, name, len );
++ return 0;
++}
++
++#else /* PERFORMANCE_INFO */
++
++static void
++clear_performance_info( kernel_vars_t *kv )
++{
++}
++
++static int
++get_performance_info( kernel_vars_t *kv, uint ind, perf_ctr_t *r )
++{
++ return 1;
++}
++
++#endif /* PERFORMANCE_INFO */
++
++
++
++/************************************************************************/
++/* misc */
++/************************************************************************/
++
++int
++do_debugger_op( kernel_vars_t *kv, dbg_op_params_t *pb )
++{
++ int ret = 0;
++
++ switch( pb->operation ) {
++ case DBG_OP_EMULATE_TLBIE:
++ flush_ea_range( kv, (pb->ea & ~0xf0000000), 0x1000 );
++ break;
++
++ case DBG_OP_EMULATE_TLBIA:
++ clear_all_vsids( kv );
++ break;
++
++ case DBG_OP_GET_PTE:
++ ret = dbg_get_PTE( kv, pb->context, pb->ea, &pb->ret.pte );
++ break;
++
++ case DBG_OP_BREAKPOINT_FLAGS:
++ kv->break_flags = pb->param;
++ kv->mregs.flag_bits &= ~fb_DbgTrace;
++ kv->mregs.flag_bits |= (pb->param & BREAK_SINGLE_STEP)? fb_DbgTrace : 0;
++ msr_altered( kv );
++ break;
++
++ case DBG_OP_TRANSLATE_EA:
++ /* param == is_data_access */
++ ret = dbg_translate_ea( kv, pb->context, pb->ea, &pb->ret.phys, pb->param );
++ break;
++
++ default:
++ printk("Unimplemended debugger operation %d\n", pb->operation );
++ ret = -ENOSYS_MOL;
++ break;
++ }
++ return ret;
++}
++
++static void
++tune_spr( kernel_vars_t *kv, uint spr, int action )
++{
++ extern int r__spr_illegal[], r__spr_read_only[], r__spr_read_write[];
++ int hook, newhook=0;
++
++ if( spr >= 1024 )
++ return;
++
++ hook = kv->_bp.spr_hooks[spr];
++
++ /* LSB of hook specifies whether the SPR is privileged */
++ switch( action ) {
++ case kTuneSPR_Illegal:
++ newhook = (int)r__spr_illegal;
++ hook &= ~1;
++ break;
++
++ case kTuneSPR_Privileged:
++ hook |= 1;
++ break;
++
++ case kTuneSPR_Unprivileged:
++ hook &= ~1;
++ break;
++
++ case kTuneSPR_ReadWrite:
++ newhook = (int)r__spr_read_write;
++ break;
++
++ case kTuneSPR_ReadOnly:
++ newhook = (int)r__spr_read_only;
++ break;
++ }
++ if( newhook )
++ hook = (hook & 1) | tophys_mol( (char*)reloc_ptr(newhook) );
++ kv->_bp.spr_hooks[spr] = hook;
++}
++
++/* return value: <0: system error, >=0: ret value */
++int
++handle_ioctl( kernel_vars_t *kv, int cmd, int arg1, int arg2, int arg3 )
++{
++ struct mmu_mapping map;
++ perf_ctr_t pctr;
++ int ret = 0;
++
++ switch( cmd ) {
++ case MOL_IOCTL_GET_SESSION_MAGIC:
++ ret = get_session_magic( arg1 );
++ break;
++
++ case MOL_IOCTL_IDLE_RECLAIM_MEMORY:
++ mtable_reclaim( kv );
++ break;
++
++ case MOL_IOCTL_SPR_CHANGED:
++ msr_altered(kv);
++ mmu_altered(kv);
++ break;
++
++ case MOL_IOCTL_ADD_IORANGE: /* void ( ulong mbase, int size, void *usr_data )*/
++ add_io_trans( kv, arg1, arg2, (void*)arg3 );
++ break;
++ case MOL_IOCTL_REMOVE_IORANGE: /* void ( ulong mbase, int size ) */
++ remove_io_trans( kv, arg1, arg2 );
++ break;
++
++ case MOL_IOCTL_ALLOC_EMUACCEL_SLOT: /* EMULATE_xxx, param, ret_addr -- mphys */
++ ret = alloc_emuaccel_slot( kv, arg1, arg2, arg3 );
++ break;
++ case MOL_IOCTL_MAPIN_EMUACCEL_PAGE: /* arg1 = mphys */
++ ret = mapin_emuaccel_page( kv, arg1 );
++ break;
++
++ case MOL_IOCTL_SETUP_FBACCEL: /* lvbase, bytes_per_row, height */
++ setup_fb_acceleration( kv, (char*)arg1, arg2, arg3 );
++ break;
++ case MOL_IOCTL_TUNE_SPR: /* spr#, action */
++ tune_spr( kv, arg1, arg2 );
++ break;
++
++ case MOL_IOCTL_MMU_MAP: /* arg1=struct mmu_mapping *m, arg2=map/unmap */
++ if( copy_from_user_mol(&map, (struct mmu_mapping*)arg1, sizeof(map)) )
++ break;
++ if( arg2 )
++ mmu_add_map( kv, &map );
++ else
++ mmu_remove_map( kv, &map );
++ if( copy_to_user_mol((struct mmu_mapping*)arg1, &map, sizeof(map)) )
++ ret = -EFAULT_MOL;
++ break;
++
++ case MOL_IOCTL_GET_PERF_INFO:
++ ret = get_performance_info( kv, arg1, &pctr );
++ if( copy_to_user_mol((perf_ctr_t*)arg2, &pctr, sizeof(pctr)) )
++ ret = -EFAULT_MOL;
++ break;
++
++#if 0
++ case MOL_IOCTL_TRACK_DIRTY_RAM:
++ ret = track_lvrange( kv );
++ break;
++ case MOL_IOCTL_GET_DIRTY_RAM:
++ ret = get_track_buffer( kv, (char*)arg1 );
++ break;
++ case MOL_IOCTL_SET_DIRTY_RAM:
++ set_track_buffer( kv, (char*)arg1 );
++ break;
++#endif
++ /* ---------------- performance statistics ------------------ */
++
++ case MOL_IOCTL_CLEAR_PERF_INFO:
++ clear_performance_info( kv );
++ break;
++
++ default:
++ printk("unsupported MOL ioctl %d\n", cmd );
++ ret = -ENOSYS_MOL;
++ }
++ return ret;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/mmu.c
+@@ -0,0 +1,251 @@
++/*
++ * Creation Date: <1998-11-11 11:56:45 samuel>
++ * Time-stamp: <2004/03/13 14:25:26 samuel>
++ *
++ * <mmu.c>
++ *
++ * Handles page mappings and the mac MMU
++ *
++ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "mmu.h"
++#include "mmu_contexts.h"
++#include "asmfuncs.h"
++#include "emu.h"
++#include "misc.h"
++#include "mtable.h"
++#include "performance.h"
++#include "context.h"
++#include "hash.h"
++#include "map.h"
++
++#define MREGS (kv->mregs)
++#define MMU (kv->mmu)
++
++
++/************************************************************************/
++/* init / cleanup */
++/************************************************************************/
++
++int
++init_mmu( kernel_vars_t *kv )
++{
++ int success;
++
++ success =
++ !arch_mmu_init( kv ) &&
++ !init_contexts( kv ) &&
++ !init_mtable( kv ) &&
++ !init_mmu_io( kv ) &&
++ !init_mmu_fb( kv ) &&
++ !init_mmu_tracker( kv );
++
++ if( !success ) {
++ cleanup_mmu( kv );
++ return 1;
++ }
++
++ clear_vsid_refs( kv );
++
++ /* SDR1 is set from fStartEmulation */
++ return 0;
++}
++
++void
++cleanup_mmu( kernel_vars_t *kv )
++{
++ /* We have to make sure the flush thread are not using the mtable
++ * facilities. The kvars entry has been clear so we just have
++ * to wait around until no threads are using it.
++ */
++ while( atomic_read_mol(&g_sesstab->external_thread_cnt) )
++ ;
++
++ cleanup_mmu_tracker( kv );
++ cleanup_mmu_fb( kv );
++ cleanup_mmu_io( kv );
++ cleanup_mtable( kv );
++ cleanup_contexts( kv );
++
++ if( MMU.pthash_inuse_bits )
++ kfree_cont_mol( MMU.pthash_inuse_bits );
++ if( MMU.hash_base )
++ unmap_emulated_hash( kv );
++
++ memset( &MMU, 0, sizeof(mmu_vars_t) );
++}
++
++
++/************************************************************************/
++/* misc */
++/************************************************************************/
++
++/* All vsid entries have been flushed; clear dangling pointers */
++void
++clear_vsid_refs( kernel_vars_t *kv )
++{
++ int i;
++ for( i=0; i<16; i++ ) {
++ MMU.vsid[i] = NULL;
++ MMU.unmapped_vsid[i] = NULL;
++
++ MMU.user_sr[i] = MMU.illegal_sr;
++ MMU.sv_sr[i] = MMU.illegal_sr;
++ MMU.unmapped_sr[i] = MMU.illegal_sr;
++ MMU.split_sr[i] = MMU.illegal_sr;
++ }
++ invalidate_splitmode_sr( kv );
++}
++
++/*
++ * This function is called whenever the mac MMU-registers have
++ * been manipulated externally.
++ */
++void
++mmu_altered( kernel_vars_t *kv )
++{
++ int i;
++
++ for( i=0; i<16; i++ ) {
++ MMU.vsid[i] = NULL;
++ MMU.user_sr[i] = MMU.illegal_sr;
++ MMU.sv_sr[i] = MMU.illegal_sr;
++ }
++ invalidate_splitmode_sr( kv );
++
++ do_mtsdr1( kv, MREGS.spr[S_SDR1] );
++
++ for( i=0; i<16; i++ )
++ do_mtbat( kv, S_IBAT0U+i, MREGS.spr[ S_IBAT0U+i ], 1 );
++}
++
++/*
++ * A page we might be using is about to be destroyed (e.g. swapped out).
++ * Any PTEs referencing this page must be flushed. The context parameter
++ * is vsid >> 4.
++ *
++ * ENTRYPOINT!
++ */
++void
++do_flush( ulong context, ulong va, ulong *dummy, int n )
++{
++ int i;
++ kernel_vars_t *kv;
++ BUMP( do_flush );
++
++ atomic_inc_mol( &g_sesstab->external_thread_cnt );
++
++ for( i=0; i<MAX_NUM_SESSIONS; i++ ) {
++ if( !(kv=g_sesstab->kvars[i]) || context != kv->mmu.emulator_context )
++ continue;
++
++ BUMP_N( block_destroyed_ctr, n );
++ for( ; n-- ; va += 0x1000 )
++ flush_lvptr( kv, va );
++ break;
++ }
++
++ atomic_dec_mol( &g_sesstab->external_thread_cnt );
++}
++
++
++/************************************************************************/
++/* Debugger functions */
++/************************************************************************/
++
++int
++dbg_get_PTE( kernel_vars_t *kv, int context, ulong va, mPTE_t *retptr )
++{
++ ulong base, mask;
++ ulong vsid, ptmp, stmp, *pteg, *steg;
++ ulong cmp;
++ ulong *uret = (ulong*)retptr;
++ int i, num_match=0;
++
++ switch( context ) {
++ case kContextUnmapped:
++ vsid = MMU.unmapped_sr[va>>28];
++ break;
++ case kContextMapped_S:
++ vsid = MMU.sv_sr[va>>28];
++ break;
++ case kContextMapped_U:
++ vsid = MMU.user_sr[va>>28];
++ break;
++ case kContextEmulator:
++ vsid = (MUNGE_CONTEXT(MMU.emulator_context) + ((va>>28) * MUNGE_ESID_ADD)) & 0xffffff;
++ break;
++ case kContextKernel:
++ vsid = 0;
++ break;
++ default:
++ printk("get_PTE: no such context: %d\n", context );
++ return 0;
++ }
++
++ /* mask vsid and va */
++ vsid &= 0xffffff;
++ va &= 0x0ffff000;
++
++ /* get hash base and hash mask */
++ base = (ulong)ptehash.base;
++ mask = ptehash.pteg_mask >> 6;
++
++ /* hash function */
++ ptmp = (vsid ^ (va>>12)) & mask;
++ stmp = mask & ~ptmp;
++ pteg = (ulong*)((ptmp << 6) + base);
++ steg = (ulong*)((stmp << 6) + base);
++
++ /* construct compare word */
++ cmp = 0x80000000 | (vsid <<7) | (va>>22);
++
++ /* look in primary PTEG */
++ for( i=0; i<8; i++ ) {
++ if( cmp == pteg[i*2] ) {
++ if( !num_match++ && uret ) {
++ uret[0] = pteg[i*2];
++ uret[1] = pteg[i*2+1];
++ }
++ if( num_match == 2 ) {
++ printk("Internal ERROR: duplicate PTEs!\n");
++ printk("p-hash: low_pte: %08lX high_pte: %08lX\n",
++ uret ? uret[0]:0, retptr? uret[1]:0 );
++ }
++ if( num_match >= 2 ) {
++ printk("p-hash: low_pte: %08lX high_pte: %08lX\n",
++ pteg[i*2], pteg[i*2+1] );
++ }
++ }
++ }
++
++ /* look in secondary PTEG */
++ cmp |= 0x40;
++ for( i=0; i<8; i++ ) {
++ if( cmp == steg[i*2] ) {
++ if( !num_match++ && uret ) {
++ uret[0] = steg[i*2];
++ uret[1] = steg[i*2+1];
++ }
++ if( num_match == 2 ) {
++ printk("Internal ERROR: duplicate PTEs!\n");
++ printk("?-hash: low_pte: %08lX high_pte: %08lX\n",
++ uret? uret[0]:0, uret? uret[1]:0 );
++ }
++ if( num_match >= 2 ) {
++ printk("s-hash: low_pte: %08lX high_pte: %08lX\n",
++ steg[i*2], steg[i*2+1] );
++ }
++ }
++ }
++ return num_match;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/mmu_fb.c
+@@ -0,0 +1,186 @@
++/*
++ * Creation Date: <1999-12-28 14:03:18 samuel>
++ * Time-stamp: <2004/02/14 14:52:52 samuel>
++ *
++ * <mmu_fb.c>
++ *
++ * Offscreen framebuffer acceleration
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "uaccess.h"
++#include "mmu.h"
++#include "asmfuncs.h"
++#include "performance.h"
++#include "misc.h"
++
++typedef struct line_entry {
++ short y1, y2;
++ int dirty;
++ ulong *slot;
++ ulong pte0;
++ ulong pte1;
++ ulong ea;
++} line_entry_t;
++
++typedef struct fb_data {
++ line_entry_t *line_table;
++ int nrec;
++ char *lv_base; /* linux virtual of first entry in table */
++} fb_data_t;
++
++#define MMU (kv->mmu)
++#define DECLARE_FB fb_data_t *fb = MMU.fb_data
++
++#ifdef __darwin__
++static inline void
++__put_user( short val, short *destptr )
++{
++ copy_to_user_mol( destptr, &val, sizeof(short) );
++}
++#endif
++
++
++int
++init_mmu_fb( kernel_vars_t *kv )
++{
++ /* setup_fb_acceleration does the initialization */
++ return 0;
++}
++
++void
++cleanup_mmu_fb( kernel_vars_t *kv )
++{
++ DECLARE_FB;
++ if( !fb )
++ return;
++ if( fb->line_table )
++ vfree_mol( fb->line_table );
++
++ kfree_mol( fb );
++ MMU.fb_data = NULL;
++}
++
++void
++video_pte_inserted( kernel_vars_t *kv, ulong lvptr, ulong *slot, ulong pte0, ulong pte1, ulong ea )
++{
++ DECLARE_FB;
++ int i;
++
++ if( !fb )
++ return;
++
++ i = (lvptr - (ulong)fb->lv_base) >> 12;
++ if( i >= 0 && i < fb->nrec ) {
++ line_entry_t *p = &fb->line_table[i];
++
++ /* allow at most one video PTE to be mapped at any time */
++ if( p->slot && (p->slot != slot || p->pte0 != pte0) ) {
++ BUMP( video_pte_reinsert );
++ if( p->slot != slot )
++ p->slot[0] = 0;
++ __tlbie(p->ea);
++ p->dirty = 1;
++ }
++
++ p->slot = slot;
++ p->pte0 = pte0;
++ p->pte1 = pte1 & ~PTE1_C;
++ p->ea = ea;
++ } else {
++ printk("Warning: video_page outside range, %lx %p\n", lvptr, fb->lv_base );
++ }
++}
++
++/* setup/remove framebuffer acceleration */
++void
++setup_fb_acceleration( kernel_vars_t *kv, char *lvbase, int bytes_per_row, int height )
++{
++ DECLARE_FB;
++ int i, offs = (ulong)lvbase & 0xfff;
++ line_entry_t *p;
++
++ if( fb )
++ cleanup_mmu_fb( kv );
++ if( !lvbase )
++ return;
++ if( !(fb=kmalloc_mol(sizeof(fb_data_t))) )
++ return;
++ memset( fb, 0, sizeof(fb_data_t) );
++ MMU.fb_data = fb;
++
++ fb->nrec = (bytes_per_row * height + offs + 0xfff) >> 12;
++ if( !(p=(line_entry_t*)vmalloc_mol(sizeof(line_entry_t) * fb->nrec)) ) {
++ cleanup_mmu_fb( kv );
++ return;
++ }
++ memset( p, 0, sizeof(line_entry_t) * fb->nrec );
++ fb->line_table = p;
++
++ fb->lv_base = (char*)((ulong)lvbase & ~0xfff);
++ for( i=0; i<fb->nrec; i++, p++ ){
++ p->y1 = (0x1000*i - offs) / bytes_per_row;
++ p->y2 = (0x1000*(i+1)-1 -offs) / bytes_per_row;
++ if( p->y1 < 0 )
++ p->y1 = 0;
++ if( p->y2 >= height )
++ p->y2 = height-1;
++
++ /* we should make sure the page is really unmapped here! */
++ p->slot = NULL;
++ }
++}
++
++/* return format is {startline,endline} pairs */
++int
++get_dirty_fb_lines( kernel_vars_t *kv, short *userbuf, int num_bytes )
++{
++ DECLARE_FB;
++ int i, n, s, start;
++ line_entry_t *p;
++
++ s = num_bytes/sizeof(short[2]) - 1;
++
++ if( !fb || (uint)s <= 0 )
++ return -1;
++
++ p = fb->line_table;
++ for( start=-1, n=0, i=0; i<fb->nrec; i++, p++ ) {
++ if( p->slot ) {
++ if( p->slot[0] != p->pte0 ) {
++ /* evicted FB PTE */
++ p->slot = NULL;
++ p->dirty = 1;
++ __tlbie( p->ea );
++ } else if( p->slot[1] & BIT(24) ) { /* C-BIT */
++ p->dirty = 1;
++ __store_PTE( p->ea, p->slot, p->pte0, p->pte1 );
++ BUMP(fb_ptec_flush);
++ }
++ }
++ if( p->dirty && start < 0 )
++ start = p->y1;
++ else if( !p->dirty && start >= 0 ) {
++ __put_user( start, userbuf++ );
++ __put_user( p->y2, userbuf++ );
++ start = -1;
++ if( ++n >= s )
++ break;
++ }
++ p->dirty = 0;
++ }
++ if( start >= 0 ) {
++ __put_user( start, userbuf++ );
++ __put_user( fb->line_table[fb->nrec-1].y2, userbuf++ );
++ n++;
++ }
++ return n;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/mmu_io.c
+@@ -0,0 +1,470 @@
++/*
++ * Creation Date: <1998-12-02 03:23:31 samuel>
++ * Time-stamp: <2004/03/13 16:57:31 samuel>
++ *
++ * <mmu_io.c>
++ *
++ * Translate mac_phys to whatever has been mapped in at
++ * a particular address (linux ram, framebuffer, ROM, etc.)
++ *
++ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "mmu.h"
++#include "misc.h"
++#include "mtable.h"
++#include "performance.h"
++#include "processor.h"
++
++#define MAX_BLOCK_TRANS 6
++
++/* Block translations are used for ROM, RAM, VRAM and similar things.
++ *
++ * IO-translations are a different type of mappings. Whenever an IO-area
++ * is accessed, a page fault occurs. If there is a page present (although
++ * r/w prohibited), then the low-level exception handler examines if the
++ * page has a magic signature in the first 8 bytes. If there is a match,
++ * then the page is of the type io_page_t and contains the information
++ * necessary to emulate the IO. If no page is present, then the corresponding
++ * IO-page is looked up and hashed.
++ */
++
++typedef struct {
++ ulong mbase;
++ char *lvbase;
++ pte_lvrange_t *lvrange;
++
++ size_t size;
++ int flags;
++
++ int id;
++} block_trans_t;
++
++typedef struct io_data {
++ block_trans_t btable[MAX_BLOCK_TRANS];
++ int num_btrans;
++ int next_free_id;
++ io_page_t *io_page_head;
++} io_data_t;
++
++static char *scratch_page;
++
++#define MMU (kv->mmu)
++#define DECLARE_IOD io_data_t *iod = kv->mmu.io_data
++
++
++
++int
++init_mmu_io( kernel_vars_t *kv )
++{
++ if( !(MMU.io_data=kmalloc_mol(sizeof(io_data_t))) )
++ return 1;
++ memset( MMU.io_data, 0, sizeof(io_data_t) );
++ return 0;
++}
++
++void
++cleanup_mmu_io( kernel_vars_t *kv )
++{
++ DECLARE_IOD;
++ io_page_t *next2, *p2;
++ int i;
++
++ if( !iod )
++ return;
++
++ for( p2=iod->io_page_head; p2; p2=next2 ) {
++ next2 = p2->next;
++ free_page_mol( (ulong)p2 );
++ }
++ iod->io_page_head = 0;
++
++ /* release the scratch page (not always allocated) */
++ if( !g_num_sessions && scratch_page ) {
++ free_page_mol( (int)scratch_page );
++ scratch_page = 0;
++ }
++
++ /* release any lvranges */
++ for( i=0; i<iod->num_btrans; i++ )
++ if( iod->btable[i].lvrange )
++ free_lvrange( kv, iod->btable[i].lvrange );
++
++ kfree_mol( iod );
++ MMU.io_data = NULL;
++}
++
++
++/* This is primarily intended for framebuffers */
++static int
++bat_align( int flags, ulong ea, ulong lphys, ulong size, ulong bat[2] )
++{
++ ulong s;
++ ulong offs1, offs2;
++
++ s=0x20000; /* 128K */
++ if( s> size )
++ return 1;
++ /* Limit to 128MB in order not to cross segments (256MB is bat-max) */
++ if( size > 0x10000000 )
++ size = 0x10000000;
++ for( ; s<size ; s = (s<<1) )
++ ;
++ offs1 = ea & (s-1);
++ offs2 = lphys & (s-1);
++ if( offs1 != offs2 ) {
++ printk("Can't use DBAT since offsets differ (%ld != %ld)\n", offs1, offs2 );
++ return 1;
++ }
++ /* BEPI | BL | VS | VP */
++ bat[0] = (ea & ~(s-1)) | (((s-1)>>17) << 2) | 3;
++ bat[1] = (lphys & ~(s-1)) | 2; /* pp=10, R/W */
++
++#ifndef CONFIG_AMIGAONE
++ bat[1] |= BIT(27); /* [M] (memory coherence) */
++#endif
++
++ if( !(flags & MAPPING_FORCE_CACHE) ) {
++ bat[1] |= BIT(26); /* [I] (inhibit cache) */
++ } else {
++ bat[1] |= BIT(25); /* [W] (write through) */
++ }
++ return 0;
++}
++
++
++/*
++ * Handle block translations (translations of mac-physical
++ * blocks to linux virtual physical addresses)
++ */
++static int
++add_block_trans( kernel_vars_t *kv, ulong mbase, char *lvbase, ulong size, int flags )
++{
++ DECLARE_IOD;
++ block_trans_t *bt;
++ pte_lvrange_t *lvrange = NULL;
++ int ind, i;
++
++ /* warn if things are not aligned properly */
++ if( (size & 0xfff) || ((int)lvbase & 0xfff) || (mbase & 0xfff) )
++ printk("Bad block translation alignement\n");
++
++ /* we keep an unsorted list - RAM should be added first, then ROM, then VRAM etc */
++ if( iod->num_btrans >= MAX_BLOCK_TRANS ) {
++ printk("Maximal number of block translations exceeded!\n");
++ return -1;
++ }
++
++ /* remove illegal combinations */
++ flags &= ~MAPPING_IO;
++ if( (flags & MAPPING_DBAT) && !(flags & MAPPING_PHYSICAL) )
++ flags &= ~MAPPING_DBAT;
++
++ /* scratch pages are always physical - lvbase isn't used */
++ if( (flags & MAPPING_SCRATCH) ) {
++ lvbase = NULL;
++ flags |= MAPPING_PHYSICAL;
++ flags &= ~MAPPING_DBAT;
++ }
++
++ /* IMPORTANT: DBATs can _only_ be used when we KNOW that ea == mphys. */
++ if( (flags & MAPPING_DBAT) ) {
++ ulong bat[2];
++ if( !bat_align(flags, mbase, (ulong)lvbase, size, bat) ) {
++ /* printk("BATS: %08lX %08lX\n", bat[0], bat[1] ); */
++ MMU.transl_dbat0.word[0] = bat[0];
++ MMU.transl_dbat0.word[1] = bat[1];
++ }
++ }
++
++ if( !(flags & MAPPING_PHYSICAL) )
++ if( !(lvrange=register_lvrange(kv, lvbase, size)) )
++ return -1;
++
++ /* Determine where to insert the translation in the table.
++ * RAM should go right efter entries marked with MAPPING_PUT_FIRST.
++ * The MAPPING_PUT_FIRST flag is used to do magic things like
++ * embedding a copy of mregs in RAM.
++ */
++ ind = (!mbase || (flags & MAPPING_PUT_FIRST)) ? 0 : iod->num_btrans;
++ for( i=0; i<iod->num_btrans && ind <= i; i++ )
++ if( iod->btable[i].flags & MAPPING_PUT_FIRST )
++ ind++;
++ bt = &iod->btable[ind];
++ if( ind < iod->num_btrans )
++ memmove( &iod->btable[ind+1], bt, sizeof(iod->btable[0]) * (iod->num_btrans - ind) );
++ iod->num_btrans++;
++ memset( bt, 0, sizeof(block_trans_t) );
++
++ bt->mbase = mbase;
++ bt->lvbase = lvbase;
++ bt->lvrange = lvrange;
++ bt->size = size;
++ bt->flags = flags | MAPPING_VALID;
++ bt->id = ++iod->next_free_id;
++
++ /* flush everything if we a translation was overridden */
++ if( flags & MAPPING_PUT_FIRST )
++ clear_pte_hash_table( kv );
++
++ return bt->id;
++}
++
++static void
++remove_block_trans( kernel_vars_t *kv, int id )
++{
++ DECLARE_IOD;
++ block_trans_t *p;
++ int i;
++
++ /* Remove all mappings in the TLB table...
++ * (too difficult to find the entries we need to flush)
++ */
++ BUMP(remove_block_trans);
++ clear_pte_hash_table( kv );
++
++ for( p=iod->btable, i=0; i<iod->num_btrans; i++, p++ ) {
++ if( id == p->id ) {
++ if( p->flags & MAPPING_DBAT ) {
++ MMU.transl_dbat0.word[0] = 0;
++ MMU.transl_dbat0.word[1] = 0;
++ }
++ if( p->lvrange )
++ free_lvrange( kv, p->lvrange );
++
++ memmove( p,p+1, (iod->num_btrans-1-i)*sizeof(block_trans_t) );
++ iod->num_btrans--;
++ return;
++ }
++ }
++ printk("Trying to remove nonexistent block mapping!\n");
++}
++
++/* adds an I/O-translation. It is legal to add the same
++ * range multiple times (for instance, to alter usr_data)
++ */
++int
++add_io_trans( kernel_vars_t *kv, ulong mbase, int size, void *usr_data )
++{
++ DECLARE_IOD;
++ io_page_t *ip, **pre_next;
++ ulong mb;
++ int i, num;
++
++ /* align mbase and size to double word boundarys */
++ size += mbase & 7;
++ mbase -= mbase & 7;
++ size = (size+7) & ~7;
++
++ while( size > 0 ) {
++ mb = mbase & 0xfffff000;
++
++ pre_next = &iod->io_page_head;
++ for( ip=iod->io_page_head; ip && ip->mphys < mb; ip=ip->next )
++ pre_next = &ip->next;
++
++ if( !ip || ip->mphys != mb ) {
++ /* create new page */
++ if( !(ip=(io_page_t*)alloc_page_mol()) ) {
++ printk("Failed allocating IO-page\n");
++ return 1;
++ }
++ ip->next = *pre_next;
++ *pre_next = ip;
++
++ /* setup block */
++ ip->magic = IO_PAGE_MAGIC_1;
++ ip->magic2 = IO_PAGE_MAGIC_2;
++ ip->me_phys = tophys_mol(ip);
++ ip->mphys = mb;
++ }
++ /* fill in IO */
++ num = size>>3;
++ i = (mbase & 0xfff) >> 3;
++ if( i+num > 512 )
++ num = 512-i;
++ mbase += num<<3;
++ size -= num<<3;
++ while( num-- )
++ ip->usr_data[i++] = usr_data;
++ }
++ return 0;
++}
++
++int
++remove_io_trans( kernel_vars_t *kv, ulong mbase, int size )
++{
++ DECLARE_IOD;
++ io_page_t *ip, **pre_next;
++ ulong mb;
++ int i, num;
++
++ /* To remove an unused IO-page, we must make sure there are no
++ * dangling references to it. Hence we must search the PTE hash
++ * table and remove all references. We must also issue a
++ * tlbia to make sure it is not in the on-chip DTLB/ITLB cashe.
++ *
++ * XXX: Instead of seraching the hash, we simply make sure the magic
++ * constants are invalid. This is perfectly safe since the exception
++ * handler doesn't write to the page in question - and the physical
++ * page always exists even if it is allocated by somebody else.
++ * It is better to make sure there are no references of it though.
++ *
++ * XXX: This needs to be fixed... commonly, we reallocate
++ * the page ourselves for I/O so the magic constants might
++ * be valid...
++ */
++
++ /* align mbase and size to double word boundarys */
++ size += mbase & 7;
++ mbase -= mbase & 7;
++ size = (size+7) & ~7;
++
++ while( size > 0 ) {
++ mb = mbase & 0xfffff000;
++
++ pre_next = &iod->io_page_head;
++ for( ip=iod->io_page_head; ip && ip->mphys < mb; ip=ip->next )
++ pre_next = &ip->next;
++
++ if( !ip || ip->mphys != mb ) {
++ /* no page... */
++ size -= 0x1000 - (mbase & 0xfff);
++ mbase += 0x1000 - (mbase & 0xfff);
++ continue;
++ }
++ /* clear IO */
++ num = size>>3;
++ i = (mbase & 0xfff) >> 3;
++ if( i+num > 512 )
++ num = 512-i;
++ mbase += num<<3;
++ size -= num<<3;
++ while( num-- )
++ ip->usr_data[i++] = 0;
++
++ /* May we free the page? */
++ for( i=0; i<512 && !ip->usr_data[i]; i++ )
++ ;
++ if( i==512 ) {
++ /* Free page (XXX: Remove page fram hash, see above ) */
++ *pre_next = ip->next;
++ ip->magic2 = ip->magic = 0; /* IMPORTANT */
++ free_page_mol( (ulong)ip );
++ }
++ }
++ return 0;
++
++}
++
++
++/* Translate a mac-physical address (32 bit, not page-index)
++ * and fill in rpn (and _possibly_ other fields) of the pte.
++ * The WIMG bits are not modified after this call.
++ * The calling function is not supposed to alter the pte after
++ * this function call.
++ *
++ * Retuns:
++ * 0 no translation found
++ * block_flags translation found
++ */
++
++int
++mphys_to_pte( kernel_vars_t *kv, ulong mphys, ulong *the_pte1, int is_write, pte_lvrange_t **lvrange )
++{
++ DECLARE_IOD;
++ int i, num_btrans;
++ block_trans_t *p;
++ io_page_t *p2;
++ int pte1 = *the_pte1;
++
++ num_btrans = iod->num_btrans;
++ mphys &= ~0xfff;
++
++ /* check for emuaccel page */
++ if( mphys == kv->emuaccel_mphys && kv->emuaccel_page_phys ) {
++ /* printk("emuaccel - PTE-insert\n"); */
++ pte1 |= kv->emuaccel_page_phys;
++ /* supervisor r/w, no user access */
++ pte1 &= ~(PTE1_W | PTE1_I | PTE1_PP);
++ *lvrange = NULL;
++ *the_pte1 = pte1;
++ return MAPPING_VALID | MAPPING_PHYSICAL;
++ }
++
++ /* check for a block mapping. */
++ for( p=iod->btable, i=0; i<num_btrans; i++,p++ ) {
++ if( mphys - p->mbase < (ulong)p->size ) {
++ if( (p->flags & MAPPING_SCRATCH) ) {
++ /* it is OK to return silently if we run out of memory */
++ if( !scratch_page && !(scratch_page=(char*)alloc_page_mol()) )
++ return 0;
++ pte1 |= tophys_mol(scratch_page);
++ } else
++ pte1 |= (mphys - p->mbase + (ulong)p->lvbase) & PTE1_RPN;
++
++ if( p->flags & MAPPING_FORCE_CACHE ) {
++ /* use write through for now */
++ pte1 |= PTE1_W;
++ pte1 &= ~PTE1_I;
++ } else if( !(p->flags & MAPPING_MACOS_CONTROLS_CACHE) )
++ pte1 &= ~(PTE1_W | PTE1_I);
++
++ /* well, just a try... */
++ if ( p->flags & MAPPING_FORCE_WRITABLE ) {
++ /* printk("forcing mphys page %lx writable\n", mphys); */
++ pte1 = (pte1 & ~3) | 2;
++ }
++
++ *lvrange = p->lvrange;
++ *the_pte1 = pte1;
++ return p->flags;
++ }
++ }
++
++ /* check for an I/O mapping. */
++ for( p2=iod->io_page_head; p2 && p2->mphys<=mphys; p2=p2->next ) {
++ if( p2->mphys != mphys )
++ continue;
++ pte1 |= p2->me_phys;
++ /* supervisor R/W */
++ pte1 &= ~(PTE1_PP | PTE1_W | PTE1_I);
++ *lvrange = NULL;
++ *the_pte1 = pte1;
++ return MAPPING_VALID | MAPPING_IO | MAPPING_PHYSICAL;
++ }
++ return 0;
++}
++
++void
++mmu_add_map( kernel_vars_t *kv, struct mmu_mapping *m )
++{
++ if( m->flags & MAPPING_MREGS ) {
++ char *start = (char*)tophys_mol(&kv->mregs);
++ uint offs = (uint)m->lvbase;
++ m->flags &= ~MAPPING_MREGS;
++ m->flags |= MAPPING_PHYSICAL;
++ m->lvbase = start + offs;
++ m->id = -1;
++ if( offs + (uint)m->size > NUM_MREGS_PAGES * 0x1000 ) {
++ printk("Invalid mregs mapping\n");
++ return;
++ }
++ }
++ m->id = add_block_trans( kv, m->mbase, m->lvbase, m->size, m->flags );
++}
++
++void
++mmu_remove_map( kernel_vars_t *kv, struct mmu_mapping *m )
++{
++ remove_block_trans( kv, m->id );
++ m->id = 0;
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/mmu_tracker.c
+@@ -0,0 +1,128 @@
++/*
++ * Creation Date: <2000/09/07 20:36:54 samuel>
++ * Time-stamp: <2004/02/14 14:45:33 samuel>
++ *
++ * <mmu_tracker.c>
++ *
++ * Keeps track of dirty RAM pages
++ *
++ * Copyright (C) 2000, 2001, 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "alloc.h"
++#include "uaccess.h"
++#include "mmu.h"
++
++
++typedef struct tracker_data {
++ char *table;
++ size_t table_size;
++
++ int npages;
++ ulong lvbase;
++} tracker_data_t;
++
++#define MMU (kv->mmu)
++#define DECLARE_TS tracker_data_t *ts = MMU.tracker_data
++
++
++
++int
++init_mmu_tracker( kernel_vars_t *kv )
++{
++ /* track_lvrange does the initialization */
++ return 0;
++}
++
++void
++cleanup_mmu_tracker( kernel_vars_t *kv )
++{
++ DECLARE_TS;
++ if( !ts )
++ return;
++
++ if( ts->table )
++ vfree_mol( ts->table );
++
++ kfree_mol( ts );
++ MMU.tracker_data = NULL;
++}
++
++int
++track_lvrange( kernel_vars_t *kv )
++{
++ ulong lvbase = MMU.userspace_ram_base;
++ int size = MMU.ram_size;
++
++ DECLARE_TS;
++ if( ts )
++ cleanup_mmu_tracker( kv );
++ if( !size )
++ return 0;
++
++ if( !(ts=kmalloc_mol(sizeof(tracker_data_t))) )
++ return 1;
++ memset( ts, 0, sizeof(tracker_data_t) );
++ MMU.tracker_data = ts;
++
++ ts->npages = size >> 12;
++ ts->table_size = (ts->npages+7)/8;
++ ts->lvbase = lvbase;
++ if( !(ts->table=vmalloc_mol(ts->table_size)) ) {
++ cleanup_mmu_tracker( kv );
++ return 1;
++ }
++ memset( ts->table, 0, ts->table_size );
++ return 0;
++}
++
++void
++lvpage_dirty( kernel_vars_t *kv, ulong lvbase )
++{
++ DECLARE_TS;
++ int pgindex;
++
++ if( !ts )
++ return;
++
++ pgindex = (lvbase - ts->lvbase) >> 12;
++
++ if( pgindex >=0 && pgindex < ts->npages )
++ ts->table[pgindex >> 3] |= (1 << (pgindex & 7));
++}
++
++
++size_t
++get_track_buffer( kernel_vars_t *kv, char *retbuf )
++{
++ DECLARE_TS;
++
++ if( !ts )
++ return 0;
++ if( !retbuf )
++ return ts->table_size;
++
++ if( copy_to_user_mol(retbuf, ts->table, ts->table_size) )
++ return 0;
++ return ts->table_size;
++}
++
++void
++set_track_buffer( kernel_vars_t *kv, char *buf )
++{
++ DECLARE_TS;
++
++ if( !ts || !buf ) {
++ printk("set_track_buffer: error\n");
++ return;
++ }
++ if( copy_from_user_mol(ts->table, buf, ts->table_size) ) {
++ printk("set_track_buffer: Bad access\n");
++ }
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/mtable.c
+@@ -0,0 +1,960 @@
++/*
++ * Creation Date: <2002/05/26 14:46:42 samuel>
++ * Time-stamp: <2004/02/28 19:33:21 samuel>
++ *
++ * <mtable.c>
++ *
++ * Keeps track of all PTEs MOL uses.
++ *
++ * Copyright (C) 2002, 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#ifdef UL_DEBUG
++#include "mtable_dbg.c"
++#else
++#include "archinclude.h"
++#include "alloc.h"
++#include "kernel_vars.h"
++#include "asmfuncs.h"
++#include "mmu.h"
++#include "performance.h"
++#endif
++#include "mtable.h"
++#include "hash.h"
++
++/* #define DEBUG */
++
++/*
++ * Implementation notes:
++ *
++ * - It is assumed bit the ITLB/DTLB is addressed by ea bits 14-19.
++ * This holds true for all CPUs at the moment (603, 604, 750, 7400,
++ * 7410, 7450) except the 601 (which uses bits 13-19).
++ */
++
++typedef struct pterec pterec_t;
++
++struct pterec {
++ pterec_t *ea_next; /* ea ring (MUST GO FIRST) */
++ pterec_t *lv_next; /* lv ring */
++ uint pent; /* defined below */
++};
++
++#define PENT_LV_HEAD BIT(0) /* Resident - do not put on free list */
++#define PENT_UNUSED BIT(1) /* (lvhead) PTE index is not valid */
++#define PENT_EA_BIT14 BIT(2) /* for the partial ea used by tlbie */
++#define PENT_EA_LAST BIT(3) /* next entry is the pelist pointer */
++#define PENT_TOPEA_MASK 0x0f800000 /* bit 4-8 of ea */
++#define PENT_SV_BIT 0x00400000 /* PTE uses vsid_sv */
++#define PENT_INDEX_MASK 0x003fffff /* PTE index (there can be at most 2^22 PTEs) */
++#define PENT_CMP_MASK (PENT_TOPEA_MASK | PENT_SV_BIT)
++
++/* The index below corresponds to bit 15-19 of the ea. Bit 14 of the ea
++ * is stored in the pent field. Thus bits 14-19 of the ea can hence always
++ * be reconstructed (this struct is always properly aligned). Note that the
++ * pelist forms a ring (this is the reason why ea_next must be
++ * the first element in the pterec struct).
++ */
++
++typedef struct {
++ pterec_t *pelist[32]; /* always NULL if ring is empty */
++} pent_table_t;
++
++struct vsid_ent { /* record which describes a mac vsid */
++ vsid_ent_t *myself_virt; /* virtual address of this struct */
++ int linux_vsid; /* munged mac context | VSID(Kp) */
++ int linux_vsid_sv; /* munged privileged mac context | VSID(Kp) */
++ pent_table_t *lev2[64]; /* bit 9-14 of ea */
++};
++
++#define LEV2_MASK 0x0001ffff /* bit 15-31 */
++
++#define LEV2_IND(ea) (((ea) >> (12+5)) & 0x3f) /* lev2 index is bit 9-14 */
++#define PELIST_IND(ea) (((ea) >> 12) & 0x1f) /* pelist index is 15-19 */
++
++#define PTE_TO_IND(pte) ((((int)pte - (int)ptehash.base) & ptehash.pte_mask) >> 3)
++
++#define ZERO_PTE(pent) *((ulong*)ptehash.base + ((pent & PENT_INDEX_MASK) << 1)) = 0
++
++
++struct pte_lvrange {
++ pterec_t *pents;
++ ulong base; /* we want to do unsigned compares */
++ ulong size;
++ pte_lvrange_t *next; /* linked list */
++};
++
++typedef struct alloc_ent {
++ struct alloc_ent *next;
++ char *ptr;
++ int what; /* ALLOC_CONTENTS_XXX */
++} alloc_ent_t;
++
++struct vsid_info {
++ mol_spinlock_t lock; /* lvrange and pent ring lock */
++
++ pte_lvrange_t *lvrange_head;
++ pterec_t *free_pents; /* free list (lv_next is used) */
++ pent_table_t *free_pent_tables; /* pelist[0] is used for the linked list */
++
++ alloc_ent_t *allocations; /* the allocations we have performed */
++ int alloc_size; /* total size of allocations */
++ int alloc_limit; /* imposed limit */
++};
++
++/* don't change the CHUNK_SIZE unless you know what you are doing... */
++#define CHUNK_SIZE (0x1000 - sizeof(alloc_ent_t))
++
++#define ALLOC_CONT_ANY 0
++#define ALLOC_CONT_VSID 1
++#define ALLOC_CONT_PENT 2
++#define ALLOC_CONT_LEV2 3
++
++#define MMU (kv->mmu)
++
++#define LOCK spin_lock_mol( &vi->lock )
++#define UNLOCK spin_unlock_mol( &vi->lock )
++
++/*
++ * Remarks about locking: There is one asynchronous entrypoint
++ * (flush_lvptr). This function touches the lvranges as well
++ * as all pent rings. It will not free vsids or unlink
++ * level2 tables (but pents are put on the free list).
++ */
++
++
++/************************************************************************/
++/* Table Flushing */
++/************************************************************************/
++
++#define vsid_ent_lookup( kv, mvsid ) ((vsid_ent_t*)skiplist_lookup( &kv->mmu.vsid_sl, mvsid ))
++
++static void
++flush_vsid_ea_( vsid_info_t *vi, vsid_ent_t *r, ulong ea )
++{
++ pent_table_t *t = r->lev2[LEV2_IND(ea)];
++ pterec_t **pp, **headp, *pr, *next, *lvp;
++ uint topea, pent;
++ int worked;
++
++ if( !t || !(*(pp=&t->pelist[PELIST_IND(ea)])) )
++ return;
++
++ topea = (ea & PENT_TOPEA_MASK);
++ worked = 0;
++ headp = pp;
++ pr = *pp;
++ do {
++ pent = pr->pent;
++ next = pr->ea_next;
++
++ if( (pent & PENT_TOPEA_MASK) == topea ) {
++ worked = 1;
++ /* unlink ea */
++ *pp = pr->ea_next;
++
++ /* unlink it from lv ring (unless it is the lv-head) */
++ if( pent & PENT_LV_HEAD ) {
++ pr->pent = PENT_UNUSED | PENT_LV_HEAD;
++ } else {
++ /* it is not certain it belong to a lv ring at all... */
++ if( pr->lv_next ) {
++ for( lvp=pr->lv_next ; lvp->lv_next != pr ; lvp=lvp->lv_next )
++ ;
++ lvp->lv_next = pr->lv_next;
++ }
++ /* ...and put it on the free list */
++ //printk("pent released\n");
++ pr->lv_next = vi->free_pents;
++ vi->free_pents = pr;
++ }
++ ZERO_PTE( pent );
++
++ if( pent & PENT_EA_LAST ) {
++ if( pp == headp ) {
++ /* ring empty, set pelist pointer to NULL */
++ *headp = NULL;
++ } else {
++ /* put marker on previous entry */
++ ((pterec_t*)pp)->pent |= PENT_EA_LAST;
++ }
++ }
++ } else {
++ pp = &pr->ea_next;
++ }
++ pr = next;
++ } while( !(pent & PENT_EA_LAST) );
++
++ if( worked )
++ __tlbie( ea );
++}
++
++void
++flush_vsid_ea( kernel_vars_t *kv, int mac_vsid, ulong ea )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ vsid_ent_t *r;
++
++ LOCK;
++ if( (r=vsid_ent_lookup(kv, mac_vsid)) )
++ flush_vsid_ea_( vi, r, ea );
++ UNLOCK;
++}
++
++
++static void
++pent_flush_unlink_ea( pterec_t *pr )
++{
++ pterec_t **head, *prev;
++ uint ea;
++
++ //BUMP( pent_flush_unlink_ea );
++#ifdef DEBUG
++ if( pr->pent & PENT_UNUSED )
++ printk("pent_flush_unlink_ea: Internal error\n");
++#endif
++ /* find head and previous pent in ea ring */
++ for( prev=pr; !(prev->pent & PENT_EA_LAST); prev=prev->ea_next )
++ ;
++ head = (pterec_t**)prev->ea_next;
++ for( ; prev->ea_next != pr ; prev=prev->ea_next )
++ ;
++
++ if( (pr->pent & PENT_EA_LAST) ) {
++ /* just a single entry in the ea ring? */
++ if( prev == (pterec_t*)head ) {
++ pr->ea_next = NULL; /* prev->ea_next is set to this below */
++ } else {
++ prev->pent |= PENT_EA_LAST;
++ }
++ }
++ prev->ea_next = pr->ea_next;
++
++ /* OK... it is unlinked. Reconstruct EA and flush it */
++ ZERO_PTE( pr->pent );
++ ea = ((uint)head >> 2) & 0x1f; /* Bits 15-19 of ea */
++ if( pr->pent & PENT_EA_BIT14 )
++ ea |= 0x20;
++ ea = ea << 12;
++ __tlbie(ea); /* Bits 14-19 used */
++
++ // printk("lvflush: ea (bit 14-19) %08X (pent %08X)\n", ea, pr->pent );
++
++ /* caller's responsibility to free the pent */
++}
++
++static void
++flush_lvptr_( vsid_info_t *vi, ulong lvptr )
++{
++ pterec_t *head, *last, *first;
++ pte_lvrange_t *lvr;
++
++ //BUMP( pent_flush_lvptr );
++
++ for( lvr=vi->lvrange_head; lvr && lvptr - lvr->base >= lvr->size ; lvr=lvr->next )
++ ;
++ if( !lvr )
++ return;
++ // printk("flush_lvptr: %08lX\n", lvptr );
++
++ head = lvr->pents + ((lvptr - lvr->base) >> 12);
++#ifdef DEBUG
++ if( !(head->pent & PENT_LV_HEAD) ) {
++ printk("flush: Internal error\n");
++ return;
++ }
++#endif
++ /* first pent to be put on the free list */
++ first = head->lv_next;
++
++ /* not just a single entry? */
++ if( first != head ) {
++ last = head;
++ do {
++ last = last->lv_next;
++ pent_flush_unlink_ea( last );
++ } while( last->lv_next != head );
++
++ last->lv_next = vi->free_pents;
++ vi->free_pents = first;
++ }
++ if( !(head->pent & PENT_UNUSED) )
++ pent_flush_unlink_ea( head );
++
++ //head->ea_next = NULL;
++ head->lv_next = head;
++ head->pent = PENT_UNUSED | PENT_LV_HEAD;
++}
++
++/* asynchronous entrypoint (caused e.g. a swapout) */
++void
++flush_lvptr( kernel_vars_t *kv, ulong lvptr )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ LOCK;
++ if( (char*)lvptr == MMU.lvptr_reservation )
++ MMU.lvptr_reservation_lost = 1;
++ flush_lvptr_( vi, lvptr );
++ UNLOCK;
++}
++
++
++void
++flush_lv_range( kernel_vars_t *kv, ulong lvbase, int size )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ LOCK;
++ /* this is quite inefficient but the function is seldom used */
++ for( ; size > 0 ; lvbase += 0x1000, size -= 0x1000 )
++ flush_lvptr_( vi, lvbase );
++ UNLOCK;
++}
++
++void
++flush_ea_range( kernel_vars_t *kv, ulong org_ea, int size )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ skiplist_iter_t iter;
++ pent_table_t *t;
++ char *userdata;
++ ulong ea, end;
++ int i;
++
++ //BUMP( flush_ea_range );
++ //printk("flush_ea_range\n");
++
++ LOCK;
++#ifdef DEBUG
++ if( size > 0x10000000 || org_ea & 0xf0000000 ) {
++ printk("flush_ea_range: Bad parameters %08lX %08X\n", org_ea, size);
++ size=0x10000000;
++ org_ea=0;
++ }
++#endif
++ end = org_ea + size;
++
++ /* XXX: This is horribly inefficient */
++ iter = skiplist_iterate( &MMU.vsid_sl );
++ while( skiplist_getnext(&MMU.vsid_sl, &iter, &userdata) ) {
++ vsid_ent_t *r = (vsid_ent_t*)userdata;
++ ea = org_ea;
++ while( ea < end ) {
++ if( !(t=r->lev2[LEV2_IND(ea)]) ) {
++ ea = (ea & ~LEV2_MASK) + LEV2_MASK + 1;
++ continue;
++ }
++ for( i=PELIST_IND(ea); i<32 && ea < end; i++, ea += 0x1000 ) {
++ if( t->pelist[i] )
++ flush_vsid_ea_( vi, r, ea );
++ }
++ }
++ }
++ UNLOCK;
++}
++
++/* clear all pte entries belonging to this vsid */
++static void
++flush_vsid( vsid_info_t *vi, vsid_ent_t *r )
++{
++ pent_table_t *t;
++ ulong ea=0;
++ int i;
++
++ //BUMP( flush_vsid );
++
++ /* not very efficient */
++ while( ea < 0x10000000 ) {
++ if( !(t=r->lev2[LEV2_IND(ea)]) ) {
++ ea = (ea & ~LEV2_MASK) + LEV2_MASK + 1;
++ continue;
++ }
++ for( i=PELIST_IND(ea); i<32; i++, ea += 0x1000 ) {
++ if( t->pelist[i] )
++ flush_vsid_ea_( vi, r, ea );
++ }
++ }
++ /* free level2 tables */
++ for( i=0; i<64; i++ ) {
++ pent_table_t *t = r->lev2[i];
++ r->lev2[i] = NULL;
++
++ /* XXX: The lev2 table _should_ be empty but we
++ * might want to verify this...
++ */
++ if( t ) {
++ t->pelist[0] = (void*)vi->free_pent_tables;
++ vi->free_pent_tables = t;
++ }
++ }
++}
++
++
++/************************************************************************/
++/* Allocations */
++/************************************************************************/
++
++/* this function allocates 0x1000 - sizeof(alloc_ent_t) zeroed bytes */
++static void *
++do_chunk_kmalloc( vsid_info_t *vi, int what )
++{
++ alloc_ent_t *mp;
++ char *ptr;
++
++ if( vi->alloc_size > vi->alloc_limit )
++ return NULL;
++ if( !(ptr=(char*)alloc_page_mol()) )
++ return NULL;
++ mp = (alloc_ent_t*)((char*)ptr + 0x1000 - sizeof(alloc_ent_t));
++
++ mp->next = vi->allocations;
++ mp->ptr = ptr;
++ mp->what = what;
++ vi->allocations = mp;
++
++ vi->alloc_size += 0x1000;
++ BUMP_N( alloced, 0x1000 );
++ return ptr;
++}
++
++static void
++do_kfree( vsid_info_t *vi, int what )
++{
++ alloc_ent_t *p, **mp = &vi->allocations;
++
++ while( *mp ) {
++ p = *mp;
++ if( p->what == what || what == ALLOC_CONT_ANY ) {
++ *mp = p->next;
++ free_page_mol( (ulong)p->ptr );
++
++ vi->alloc_size -= 0x1000;
++ BUMP_N( released, 0x1000 );
++ } else {
++ mp = &p->next;
++ }
++ }
++}
++
++/* Note: mtable_memory_check() must have been called previously */
++static inline pent_table_t *
++get_free_lev2( vsid_info_t *vi )
++{
++ pent_table_t *t = vi->free_pent_tables;
++
++ vi->free_pent_tables = (pent_table_t*)vi->free_pent_tables->pelist[0];
++ t->pelist[0] = NULL;
++ return t;
++}
++
++/* this function is responsible for setting PENT_LV_HEAD and lv_next */
++static pterec_t *
++get_free_pent( vsid_info_t *vi, pte_lvrange_t *lvrange, char *lvptr )
++{
++ pterec_t *pr, *pr2;
++ int pent = 0;
++ int ind;
++
++ if( lvrange ) {
++ ind = (((int)lvptr - lvrange->base) >> 12);
++ pr2 = &lvrange->pents[ind];
++
++ if( (pr2->pent & PENT_UNUSED) ) {
++ pr = pr2;
++ pent = PENT_LV_HEAD;
++ } else {
++ /* alloc new entry */
++ pr = vi->free_pents;
++ vi->free_pents = pr->lv_next;
++
++ /* add to lv ring (after the head element) */
++ pr->lv_next = pr2->lv_next;
++ pr2->lv_next = pr;
++ }
++ } else {
++ /* alloc new entry */
++ pr = vi->free_pents;
++ vi->free_pents = pr->lv_next;
++
++ pr->lv_next = NULL;
++ }
++
++ /* allocate pterec_t and insert into the lv ring */
++ pr->pent = pent;
++ return pr;
++}
++
++static int
++lev2_alloc( vsid_info_t *vi )
++{
++ const int m = sizeof(pent_table_t) - 1;
++ pent_table_t *t;
++ int i, n = CHUNK_SIZE/sizeof(pent_table_t);
++
++ //BUMP( lev2_alloc );
++
++ if( !(t=do_chunk_kmalloc(vi, ALLOC_CONT_LEV2)) )
++ return 1;
++
++ /* the alignment must be correct (the ea calculation will fail otherwise) */
++ if( (int)t & m ) {
++ t = (pent_table_t*)((int)t + m + 1 - ((int)t & m));
++ n--;
++ }
++
++ memset( t, 0, n*sizeof(pent_table_t) );
++ for( i=0; i<n-1; i++ )
++ t[i].pelist[0] = (void*)&t[i+1];
++ LOCK;
++ t[i].pelist[0] = (void*)vi->free_pent_tables;
++ vi->free_pent_tables = &t[0];
++ UNLOCK;
++ return 0;
++}
++
++static int
++pent_alloc( vsid_info_t *vi )
++{
++ const int n = CHUNK_SIZE/sizeof(pterec_t);
++ pterec_t *pr;
++ int i;
++
++ //BUMP( pent_alloc );
++
++ if( !(pr=do_chunk_kmalloc(vi, ALLOC_CONT_PENT)) )
++ return 1;
++ memset( pr, 0, CHUNK_SIZE );
++
++ for( i=0; i<n-1; i++ )
++ pr[i].lv_next = &pr[i+1];
++ LOCK;
++ pr[i].lv_next = vi->free_pents;
++ vi->free_pents = &pr[0];
++ UNLOCK;
++ return 0;
++}
++
++
++/* This function is to be called at a safe time (it might allocate
++ * memory). It ensures the next pte_inserted call will succeed.
++ */
++int
++mtable_memory_check( kernel_vars_t *kv )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++
++ /* optimize the common case */
++ if( vi->free_pents && vi->free_pent_tables )
++ return 0;
++
++ if( !vi->free_pent_tables )
++ lev2_alloc(vi);
++ if( !vi->free_pents )
++ pent_alloc(vi);
++
++ if( !vi->free_pents || !vi->free_pent_tables ) {
++ clear_all_vsids( kv );
++ return 1;
++ }
++ return 0;
++}
++
++
++/************************************************************************/
++/* pte_insert */
++/************************************************************************/
++
++static inline void
++relink_lv( vsid_info_t *vi, pterec_t *pr, pte_lvrange_t *lvrange, char *lvptr )
++{
++ int ind = (((int)lvptr - lvrange->base) >> 12);
++ pterec_t *pnew, *p, *lv_head = &lvrange->pents[ind];
++
++ if( !pr->lv_next ) {
++ //printk("Not previously on lvlist\n");
++ pr->lv_next = lv_head->lv_next;
++ lv_head->lv_next = pr;
++ return;
++ }
++
++ if( pr->pent & PENT_LV_HEAD ) {
++ if( pr == lv_head ) {
++ //printk("lvptr is head (correct lv ring)\n");
++ return;
++ }
++
++ /* unlink from ea ring and add new pent */
++ for( p=pr->ea_next; p->ea_next != pr ; p=p->ea_next )
++ ;
++ pnew = get_free_pent( vi, lvrange, lvptr );
++ pnew->ea_next = pr->ea_next;
++ p->ea_next = pnew;
++
++ pnew->pent |= (pr->pent & ~(PENT_UNUSED | PENT_LV_HEAD));
++
++ /* clear old lvhead */
++ // pr->ea_next = NULL;
++ pr->pent = PENT_LV_HEAD | PENT_UNUSED;
++
++ //printk("lvptr is head\n");
++ return;
++ } else {
++ for( p=pr->lv_next; !(p->pent & PENT_LV_HEAD) ; p=p->lv_next )
++ ;
++ if( p == lv_head ) {
++ //printk("lvptr is on the correct lv ring\n");
++ return;
++ }
++
++ /* lvptr has chagned, unlink */
++ for( ; p->lv_next != pr ; p=p->lv_next )
++ ;
++ p->lv_next = pr->lv_next;
++
++ /* add to lv ring */
++ pr->lv_next = lv_head->lv_next;
++ lv_head->lv_next = pr;
++ }
++}
++
++/* Note: If lvrange is NULL then lvptr should be ignored */
++void
++pte_inserted( kernel_vars_t *kv, ulong ea, char *lvptr, pte_lvrange_t *lvrange,
++ ulong *pte, vsid_ent_t *r, int segreg )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ int pl_ind = PELIST_IND(ea);
++ uint pent, pent_cmp;
++ pterec_t *pr, **pp;
++ pent_table_t **tt;
++
++ LOCK;
++ if( lvrange && MMU.lvptr_reservation_lost ) {
++ printk("mtable: lvptr reservation lost %08x\n", (int)lvptr );
++ pte[0] = 0;
++ __tlbie(ea);
++ goto out;
++ }
++
++ tt = &r->lev2[ LEV2_IND(ea) ];
++
++ pent_cmp = (ea & PENT_TOPEA_MASK);
++ if( (r->linux_vsid_sv & VSID_MASK) == (segreg & VSID_MASK) )
++ pent_cmp |= PENT_SV_BIT;
++
++ if( !*tt )
++ *tt = get_free_lev2(vi);
++
++ pp = &(**tt).pelist[ pl_ind ];
++ if( (pr=*pp) ) {
++ do {
++ pent = pr->pent;
++ if( (pent & PENT_CMP_MASK) == pent_cmp ) {
++ pent &= ~PENT_INDEX_MASK;
++ pent |= PTE_TO_IND(pte);
++ pr->pent = pent;
++
++ /* the lvptr might have changed */
++ if( lvrange )
++ relink_lv( vi, pr, lvrange, lvptr );
++ else {
++ /* The pent might belong to a lvring unnecessarily.
++ * It is not worth the extra overhead addressing this
++ * (uncommon) case
++ */
++ }
++ //printk("PTE entry reused\n");
++ goto out;
++ }
++ pr=pr->ea_next;
++ } while( !(pent & PENT_EA_LAST) );
++
++ /* get_free_pent inserts the entry into the lvring and sets a few pent bits */
++ pr = get_free_pent(vi, lvrange, lvptr);
++ pr->pent |= PTE_TO_IND(pte) | pent_cmp | ((ea & BIT(14)) ? PENT_EA_BIT14 : 0);
++
++ /* insert in (non-empty) ea ring */
++ pr->ea_next = *pp;
++ *pp = pr;
++ } else {
++ /* ea ring was empty */
++ pr = *pp = get_free_pent(vi, lvrange, lvptr);
++ pr->pent |= PENT_EA_LAST | PTE_TO_IND(pte) | pent_cmp
++ | ((ea & BIT(14)) ? PENT_EA_BIT14 : 0);
++ pr->ea_next = (pterec_t*)pp;
++ }
++ out:
++ UNLOCK;
++}
++
++
++/************************************************************************/
++/* VSID allocation */
++/************************************************************************/
++
++/* initialize vsid element callback (ind loops from 0 to n-1) */
++static void
++_vsid_el_callback( char *data, int ind, int n, void *usr1_kv, void *dummy )
++{
++ kernel_vars_t *kv = (kernel_vars_t*)usr1_kv;
++ vsid_ent_t *r = (vsid_ent_t*)data;
++
++ r->linux_vsid = alloc_context(kv) | VSID_Kp;
++ r->linux_vsid_sv = alloc_context(kv) | VSID_Kp;
++ r->myself_virt = r;
++}
++
++/* mac_vsid might be negative (used as vsid for unmapped access).
++ * Thus, do not apply this VSID mask anywhere...
++ */
++static vsid_ent_t *
++alloc_vsid_ent( kernel_vars_t *kv, int mac_vsid )
++{
++ char *buf;
++
++ if( skiplist_needalloc(&MMU.vsid_sl) ) {
++ /* this check might invoke clear_all_vsids() */
++ handle_context_wrap( kv, CHUNK_SIZE/sizeof(vsid_ent_t)*2 );
++
++ if( !(buf=do_chunk_kmalloc(MMU.vsid_info, ALLOC_CONT_VSID)) )
++ return NULL;
++ memset( buf, 0, CHUNK_SIZE );
++
++ (void) skiplist_prealloc( &MMU.vsid_sl, buf, CHUNK_SIZE, _vsid_el_callback, kv, NULL );
++ }
++ return (vsid_ent_t*)skiplist_insert( &MMU.vsid_sl, mac_vsid );
++}
++
++/* flushes all vsids (including the fake no-MMU vsids) */
++void
++clear_all_vsids( kernel_vars_t *kv )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ skiplist_iter_t iter;
++ char *userdata;
++
++ LOCK;
++ iter = skiplist_iterate( &MMU.vsid_sl );
++ while( skiplist_getnext(&MMU.vsid_sl, &iter, &userdata) )
++ flush_vsid( vi, (vsid_ent_t*)userdata );
++
++ skiplist_init( &MMU.vsid_sl, sizeof(vsid_ent_t) );
++
++ /* flush any dangling pointers */
++ clear_vsid_refs( kv );
++
++ /* all vsids cleared -> all lev2 cleared -> no pents in use */
++ vi->free_pents = NULL;
++ vi->free_pent_tables = NULL;
++ UNLOCK;
++ do_kfree( vi, ALLOC_CONT_ANY );
++
++ BUMP(clear_all_vsids);
++}
++
++/* This function flushes *ALL* PTEs inserted by MOL. It is primarily
++ * used when it is too difficult to make a more specific invalidation.
++ */
++void
++clear_pte_hash_table( kernel_vars_t *kv )
++{
++ /* this will free the vsids too... */
++ clear_all_vsids( kv );
++}
++
++vsid_ent_t *
++vsid_get_user_sv( kernel_vars_t *kv, int mac_vsid, ulong *user_ret, ulong *sv_ret )
++{
++ vsid_ent_t *r = vsid_ent_lookup( kv, mac_vsid );
++
++ if( !r && !(r=alloc_vsid_ent(kv, mac_vsid)) ) {
++ clear_all_vsids( kv );
++ if( !(r=alloc_vsid_ent(kv, mac_vsid)) ) {
++ printk("VSID allocation failure\n");
++ return NULL;
++ }
++ }
++ *user_ret = r->linux_vsid;
++ *sv_ret = r->linux_vsid_sv;
++ return r;
++}
++
++/************************************************************************/
++/* resource reclaiming */
++/************************************************************************/
++
++void
++mtable_reclaim( kernel_vars_t *kv )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ skiplist_iter_t iter;
++ pent_table_t *t;
++ char *userdata;
++ int i,j;
++
++ /* This thread runs on the main thread, thus the skiplist stuff does
++ * not need locking. In fact, it is only the free_pent_tables
++ * list that needs spinlock protection.
++ */
++ LOCK;
++ iter = skiplist_iterate( &MMU.vsid_sl );
++ while( skiplist_getnext(&MMU.vsid_sl, &iter, &userdata) ) {
++ vsid_ent_t *r = (vsid_ent_t*)userdata;
++ const int n1 = sizeof(r->lev2)/sizeof(r->lev2[0]);
++ const int n2 = sizeof(t->pelist)/sizeof(t->pelist[0]);
++
++ for( i=0; i<n1; i++ ) {
++ if( !(t=r->lev2[i]) )
++ continue;
++ for( j=0; j<n2 && !(t->pelist[j]) ; j++ )
++ ;
++ if( j != n2 )
++ break;
++ /* level2 empty... */
++ r->lev2[i]->pelist[0] = (void*)vi->free_pent_tables;
++ vi->free_pent_tables = r->lev2[i];
++ r->lev2[i] = NULL;
++
++ BUMP(lev2_reclaim);
++ }
++ if( i == n1 ) {
++ int vsid = skiplist_iter_getkey( &MMU.vsid_sl, (char*)r );
++
++ /* the segment might be in use... */
++ for( i=0; i<16 && MMU.vsid[i] != r; i++ )
++ ;
++ if( i != 16 || (uint)vsid > VSID_MASK )
++ continue;
++ skiplist_delete( &MMU.vsid_sl, vsid );
++ BUMP(vsid_reclaim);
++ }
++ }
++ UNLOCK;
++}
++
++/************************************************************************/
++/* lvrange allocation */
++/************************************************************************/
++
++pte_lvrange_t *
++register_lvrange( kernel_vars_t *kv, char *lvbase, int size )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ pte_lvrange_t *lvr;
++ int i, nel = (size >> 12);
++ int s = sizeof(pterec_t) * nel;
++
++ /* printk("register_lvrange\n"); */
++
++ if( !(lvr=kmalloc_mol(sizeof(pte_lvrange_t))) )
++ return NULL;
++ memset( lvr, 0, sizeof(pte_lvrange_t) );
++
++ if( !(lvr->pents=vmalloc_mol(s)) ) {
++ kfree_mol( lvr );
++ return NULL;
++ }
++ /* setup empty lvrings */
++ for( i=0; i<nel; i++ ) {
++ lvr->pents[i].pent = PENT_LV_HEAD | PENT_UNUSED;
++ lvr->pents[i].lv_next = &lvr->pents[i];
++ lvr->pents[i].ea_next = NULL;
++ }
++ lvr->base = (ulong)lvbase;
++ lvr->size = size;
++
++ LOCK;
++ /* add to linked list */
++ lvr->next = vi->lvrange_head;
++ vi->lvrange_head = lvr;
++ UNLOCK;
++
++ return lvr;
++}
++
++void
++free_lvrange( kernel_vars_t *kv, pte_lvrange_t *lvrange )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ pte_lvrange_t **lvr;
++
++ lvr = &vi->lvrange_head;
++ for( ; *lvr && *lvr != lvrange; lvr=&(**lvr).next )
++ ;
++ if( !*lvr ) {
++ printk("free_lvrange: Internal error\n");
++ return;
++ }
++ flush_lv_range( kv, (**lvr).base, (**lvr).size );
++ LOCK;
++ *lvr = (**lvr).next;
++ UNLOCK;
++
++ vfree_mol( lvrange->pents );
++ kfree_mol( lvrange );
++}
++
++/************************************************************************/
++/* init / cleanup */
++/************************************************************************/
++
++void
++mtable_tune_alloc_limit( kernel_vars_t *kv, int ramsize_mb )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++ vi->alloc_limit = (ramsize_mb + 160) * 4096;
++ /* printk("alloc_limit: %d K\n", vi->alloc_limit/1024 ); */
++}
++
++int
++init_mtable( kernel_vars_t *kv )
++{
++ vsid_info_t *vi = kmalloc_mol( sizeof(vsid_info_t) );
++
++ MMU.vsid_info = vi;
++ if( !vi )
++ return 1;
++ memset( vi, 0, sizeof(vsid_info_t) );
++ spin_lock_init_mol( &vi->lock );
++
++ /* will be tuned when we know how much RAM we have */
++ vi->alloc_limit = 2 * 1024 * 1024;
++
++ skiplist_init( &MMU.vsid_sl, sizeof(vsid_ent_t) );
++
++ if( !VSID_OFFSETS_OK ) {
++ printk("VSID offsets are BAD (fix offset in source)!\n");
++ return 1;
++ }
++ return 0;
++}
++
++void
++cleanup_mtable( kernel_vars_t *kv )
++{
++ vsid_info_t *vi = MMU.vsid_info;
++
++ if( vi ) {
++ while( vi->lvrange_head ) {
++ printk("Bug: lvrange unreleased!\n");
++ free_lvrange( kv, vi->lvrange_head );
++ }
++ do_kfree( vi, ALLOC_CONT_ANY );
++ kfree_mol( vi );
++ }
++ memset( &MMU.vsid_sl, 0, sizeof(MMU.vsid_sl) );
++ MMU.vsid_info = NULL;
++}
++
++
++/************************************************************************/
++/* userland debug */
++/************************************************************************/
++
++#ifdef UL_DEBUG
++#include "mtable_dbg.c"
++#endif
+--- /dev/null
++++ b/drivers/macintosh/mol/ptaccess.c
+@@ -0,0 +1,153 @@
++/*
++ * Creation Date: <2001/03/25 18:04:45 samuel>
++ * Time-stamp: <2002/08/03 17:43:10 samuel>
++ *
++ * <ptaccess.c>
++ *
++ * Handle stores to the (emulated) page table
++ *
++ * Copyright (C) 2001, 2002 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "mmu.h"
++#include "rvec.h"
++#include "mtable.h"
++#include "misc.h"
++#include "performance.h"
++
++extern int do_intercept_tlbie( kernel_vars_t *kv, ulong pte0, ulong pte1, ulong pteoffs );
++extern int do_intercept_tlbie_block( kernel_vars_t *kv, ulong pteoffs, ulong length );
++
++#define MMU (kv->mmu)
++#define MREGS (kv->mregs)
++
++int
++do_intercept_tlbie( kernel_vars_t *kv, ulong pte0, ulong pte1, ulong pteoffs )
++{
++ int vsid = (pte0 >> 7) & VSID_MASK;
++ ulong v;
++
++ BUMP( do_intercept_tlbie );
++
++ if( MMU.pthash_inuse_bits )
++ clear_bit_mol( pteoffs >> 3, MMU.pthash_inuse_bits );
++
++ v = (pteoffs >> 6);
++ if( pte0 & BIT(25) ) /* secondary hash? */
++ v = ~v;
++ v ^= (pte0 >> 7);
++ v = ((pte0 << 10) & 0xfc00) | (v & 0x3ff);
++
++ //printk("do_intercept_tlbie: vsid %08lX, ea %08lX\n", vsid, (v<<12) );
++ flush_vsid_ea( kv, vsid, (v<<12) );
++
++ return RVEC_NOP;
++}
++
++int
++do_intercept_tlbie_block( kernel_vars_t *kv, ulong pteoffs, ulong length )
++{
++ unsigned int finish;
++
++ //printk("do_intercept_tlbie_block: pteoffs %08lX length %08lX\n", pteoffs, length);
++
++ if (pteoffs + length > MMU.hash_mask) {
++ printk("do_intercept_tlbie_block: length exceeding hash!\n");
++ finish = MMU.hash_mask + 1;
++ } else
++ finish = pteoffs + length;
++
++ if (MMU.pthash_inuse_bits == NULL)
++ return RVEC_NOP;
++
++ while (pteoffs < finish) {
++ if (check_bit_mol(pteoffs >> 3, MMU.pthash_inuse_bits)) {
++ ulong pte0, pte1;
++
++ pte0 = *((unsigned int *) (MMU.hash_base + pteoffs));
++ pte1 = *((unsigned int *) (MMU.hash_base + pteoffs + 4));
++ do_intercept_tlbie(kv, pte0, pte1, pteoffs);
++ }
++
++ pteoffs += 8;
++ }
++
++ return RVEC_NOP;
++}
++
++#ifdef EMULATE_603
++
++extern int do_tlbli( kernel_vars_t *kv, ulong ea );
++extern int do_tlbld( kernel_vars_t *kv, ulong ea );
++
++int
++do_tlbli( kernel_vars_t *kv, ulong ea )
++{
++ int ind = (ea >> 12) & 0x1f;
++ mPTE_t *p;
++
++ //printk("do_tlbli %08lX : %08lX %08lX\n", ea, MREGS.spr[S_ICMP], MREGS.spr[S_RPA] );
++ if( MREGS.spr[S_SRR1] & BIT(14) )
++ ind += 32;
++
++ p = &MMU.ptes_i_603[ind];
++ if( p->v )
++ flush_vsid_ea( kv, p->vsid, MMU.ptes_i_ea_603[ind] );
++ MMU.ptes_i_ea_603[ind] = ea & 0x0ffff000;
++ *(ulong*)p = MREGS.spr[ S_ICMP ];
++ *((ulong*)p+1) = MREGS.spr[ S_RPA ];
++
++ return RVEC_NOP;
++}
++
++int
++do_tlbld( kernel_vars_t *kv, ulong ea )
++{
++ int ind = (ea >> 12) & 0x1f;
++ mPTE_t *p;
++
++ //printk("do_tlbld %08lX\n", ea );
++
++ if( MREGS.spr[S_SRR1] & BIT(14) )
++ ind += 32;
++
++ p = &MMU.ptes_d_603[ind];
++ if( p->v )
++ flush_vsid_ea( kv, p->vsid, MMU.ptes_d_ea_603[ind] );
++ MMU.ptes_d_ea_603[ind] = ea & 0x0ffff000;
++ *(ulong*)p = MREGS.spr[ S_DCMP ];
++ *((ulong*)p+1) = MREGS.spr[ S_RPA ];
++
++ return RVEC_NOP;
++}
++
++int
++do_tlbie( kernel_vars_t *kv, ulong ea )
++{
++ int ind = (ea >> 12) & 0x1f;
++ mPTE_t *pi, *pd;
++ ulong *iea, *dea;
++
++ pi = &MMU.ptes_i_603[ind];
++ pd = &MMU.ptes_d_603[ind];
++ iea = &MMU.ptes_i_ea_603[ind];
++ dea = &MMU.ptes_d_ea_603[ind];
++ for( ; ind < 64; ind +=32, pd += 32, pi += 32, iea += 32, dea +=32 ) {
++ if( pi->v )
++ flush_vsid_ea( kv, pi->vsid, *iea );
++ if( pd->v )
++ flush_vsid_ea( kv, pd->vsid, *dea );
++ *(ulong*)pi = 0;
++ *(ulong*)pd = 0;
++ }
++ return RVEC_NOP;
++}
++
++#endif /* EMULATE_603 */
++
+--- /dev/null
++++ b/drivers/macintosh/mol/sheep.c
+@@ -0,0 +1,701 @@
++/*
++ * sheep_net.c - Linux driver for SheepShaver/Basilisk II networking (access to raw Ethernet packets)
++ *
++ * SheepShaver (C) 1997-1999 Mar"c" Hellwig and Christian Bauer
++ * Basilisk II (C) 1997-1999 Christian Bauer
++ *
++ * Ported to 2.4 and reworked, Samuel Rydh 1999-2003
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/miscdevice.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/if_ether.h>
++#include <linux/if_arp.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/init.h>
++#include <net/sock.h>
++#include <asm/uaccess.h>
++#include <net/arp.h>
++#include <net/ip.h>
++#include <linux/in.h>
++#include <linux/wait.h>
++
++MODULE_AUTHOR("Marc Hellwig and Christian Bauer");
++MODULE_DESCRIPTION("SheepShaver/Basilisk II networking");
++MODULE_LICENSE("GPL");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++#define LINUX_26
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++#define ETH_HDR(skb) eth_hdr((skb))
++#else
++#define ETH_HDR(skb) (skb)->mac.ethernet
++#endif
++
++#define DEBUG 0
++
++#define bug printk
++#if DEBUG
++#define D(x) (x);
++#else
++#define D(x) ;
++#endif
++
++#define SHEEP_NET_MINOR 198 // Driver minor number
++#define MAX_QUEUE 32 // Maximum number of packets in queue
++#define PROT_MAGIC 1520 // Our "magic" protocol type
++
++#define ETH_ADDR_MULTICAST 0x1
++#define ETH_ADDR_LOCALLY_DEFINED 0x2
++
++#define SIOC_MOL_GET_IPFILTER SIOCDEVPRIVATE
++#define SIOC_MOL_SET_IPFILTER (SIOCDEVPRIVATE + 1)
++
++struct SheepVars {
++ /* IMPORTANT: the packet_type struct must go first. It no longer (2.6) contains
++ * a data field so we typecast to get the SheepVars struct
++ */
++ struct packet_type pt; // Receiver packet type
++ struct net_device *ether; // The Ethernet device we're attached to
++ struct sock *skt; // Socket for communication with Ethernet card
++ struct sk_buff_head queue; // Receiver packet queue
++ wait_queue_head_t wait; // Wait queue for blocking read operations
++ unsigned long ipfilter; // only receive ip packets destined for this address
++ char fake_addr[6];
++};
++
++/*
++ * How various hosts address MOL
++ *
++ * External hosts: eth_addr, MOL_IP
++ * Local host: fake_addr, MOL_IP
++ * MOL: fake_addr, MOL_IP
++ */
++
++#ifdef LINUX_26
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++#define compat_sk_alloc(a,b,c) sk_alloc( (a), (b), &mol_proto, 1 )
++#else
++#define compat_sk_alloc(a,b,c) sk_alloc( (a), (b), (c), NULL )
++#endif
++
++#define skt_set_dead(skt) do {} while(0)
++#define wmem_alloc sk_wmem_alloc
++#else
++#define compat_sk_alloc sk_alloc
++#define skt_set_dead(skt) (skt)->dead = 1
++#endif
++
++/************************************************************************/
++/* ethernet address masquerading */
++/************************************************************************/
++
++static inline int
++addrcmp( const char *a1, const char *a2 )
++{
++ if( *(u32*)a1 != *(u32*)a2 )
++ return 1;
++ return *((u16*)a1+2) != *((u16*)a2+2);
++}
++
++/* Outgoing packet. Replace the fake enet addr with the real one. */
++static inline void
++cpyaddr( char *d, const char *s )
++{
++ *(u32*)d = *(u32*)s;
++ *(u16*)&d[4] = *(u16*)&s[4];
++}
++
++static void
++demasquerade( struct sk_buff *skb, struct SheepVars *v )
++{
++ const char *local_addr = v->ether->dev_addr;
++ const char *fake_addr = v->fake_addr;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21))
++ char *p = skb_mac_header(skb);
++#else
++ char *p = skb->mac.raw;
++#endif
++ int proto = *(short*)&p[12];
++
++ cpyaddr( &p[6], local_addr ); // Source address
++
++ // Need to fix ARP packets
++ if( proto == htons(ETH_P_ARP) )
++ if( !addrcmp(&p[14+8], fake_addr) ) // sender HW-addr
++ cpyaddr( &p[14+8], local_addr );
++
++ // ...and AARPs (snap code: 0x00,0x00,0x00,0x80,0xF3)
++ if( !p[17] && *(u32*)&p[18] == 0x000080F3 ){
++ // XXX: we should perhaps look for the 802 frame too
++ if( !addrcmp(&p[30], fake_addr) )
++ cpyaddr( &p[30], local_addr ); // sender HW-addr
++ }
++}
++
++
++/************************************************************************/
++/* receive filter (also intercepts outgoing packets) */
++/************************************************************************/
++
++/* This function filters both outgoing and incoming traffic.
++ *
++ * - Outgoing PROT_MAGIC packets are outgoing mol packets
++ * addressed to the world (not to the local host).
++ *
++ * - Outgoing packets addressed to the fake address
++ * are incoming MOL packets (from the local host).
++ * These packets will be seen on the wire, since we can't
++ * block them...
++ *
++ * - Incoming packets which originate from the fake address
++ * are MOL packets addressed to the local host.
++ *
++ * - Incomming external traffic to the MOL IP address are incoming
++ * MOL packets. Linux will see these packets too. (Hmm... if
++ * we change protocol to PROT_MAGIC then linux ought to ignore
++ * them; currently linux responds to ICMP packets even though
++ * the IP address is wrong.)
++ */
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++static int
++sheep_net_receiver( struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev )
++#else
++static int
++sheep_net_receiver( struct sk_buff *skb, struct net_device *dev, struct packet_type *pt )
++#endif
++{
++ int multicast = (ETH_HDR(skb)->h_dest[0] & ETH_ADDR_MULTICAST);
++ const char *laddr = dev->dev_addr;
++ struct sk_buff *skb2;
++ struct SheepVars *v = (struct SheepVars*)pt;
++
++ D(bug("sheep_net: packet received\n"));
++
++ if( skb->pkt_type == PACKET_OUTGOING ) {
++ // Is this an MOL packet to the world?
++ if( skb->protocol == PROT_MAGIC )
++ goto drop;
++
++ if( !multicast ) {
++ // Drop, unless this is a localhost -> MOL transmission */
++ if( addrcmp((char*)Ð_HDR(skb)->h_dest, v->fake_addr) )
++ goto drop;
++
++ /* XXX: If it were possible, we would prevent the packet from beeing sent out
++ * on the wire (after having put it on our packet reception queue).
++ * A transmission to a non-existent mac address will unfortunately
++ * be subnet-visible (having a switched network doesn't help). As a
++ * workaround, we change the destination address to the address of
++ * the controller. This way, the packet ought to be discarded by
++ * switches.
++ */
++ cpyaddr( Ð_HDR(skb)->h_dest[0], laddr );
++ }
++ } else {
++ // is this a packet to the local host from MOL?
++ if( !addrcmp((char*)Ð_HDR(skb)->h_source, v->fake_addr) )
++ goto drop;
++
++ if( !multicast ) {
++ // if the packet is not meant for this host, discard it
++ if( addrcmp((char*)Ð_HDR(skb)->h_dest, laddr) )
++ goto drop;
++
++ // filter IP-traffic
++ if( (skb->protocol == htons(ETH_P_IP)) ) {
++ // drop if not addreesed to MOL?
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21))
++ if( !v->ipfilter || (ipip_hdr(skb)->daddr != v->ipfilter) )
++#else
++ if( !v->ipfilter || (skb->h.ipiph->daddr != v->ipfilter) )
++#endif
++ goto drop;
++ // we don't want this packet interpreted by linux...
++ skb->protocol = PROT_MAGIC;
++ }
++ }
++ }
++ // Discard packets if queue gets too full
++ if( skb_queue_len(&v->queue) > MAX_QUEUE )
++ goto drop;
++
++ /* masquerade. The skb is typically has a refcount != 1 so we play safe
++ * and make a copy before modifying it. This also takes care of fragmented
++ * skbuffs (we might receive those if we are attached to a device with support
++ * for it)
++ */
++ if( !(skb2=skb_copy(skb, GFP_ATOMIC)) )
++ goto drop;
++ kfree_skb( skb );
++ skb = skb2;
++
++ if( !multicast )
++ cpyaddr( Ð_HDR(skb)->h_dest[0], v->fake_addr );
++
++ // We also want the Ethernet header
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21))
++ skb_push( skb, skb->data - skb_mac_header(skb) );
++#else
++ skb_push( skb, skb->data - skb->mac.raw );
++#endif
++
++ // Enqueue packet
++ skb_queue_tail( &v->queue, skb );
++
++ // Unblock blocked read
++ wake_up_interruptible( &v->wait );
++ return 0;
++
++drop:
++ kfree_skb( skb );
++ return 0;
++}
++
++
++/************************************************************************/
++/* misc device ops */
++/************************************************************************/
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++static struct proto mol_proto =
++{
++ .name = "MOL",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct sock)
++};
++#endif
++
++
++static int
++sheep_net_open( struct inode *inode, struct file *f )
++{
++ static char fake_addr_[6] = { 0xFE, 0xFD, 0xDE, 0xAD, 0xBE, 0xEF };
++ struct SheepVars *v;
++ D(bug("sheep_net: open\n"));
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++ if (proto_register(&mol_proto,0) < 0)
++ {
++ printk(KERN_INFO "Unable to register protocol type\n");
++ return -1;
++ }
++#endif
++
++ // Must be opened with read permissions
++ if( (f->f_flags & O_ACCMODE) == O_WRONLY )
++ return -EPERM;
++
++ // Allocate private variables
++ f->private_data = kmalloc(sizeof(struct SheepVars), GFP_USER);
++ if( f->private_data == NULL)
++ return -ENOMEM;
++
++ v = (struct SheepVars *) f->private_data;
++
++ memset( v, 0, sizeof(*v) );
++ memcpy( v->fake_addr, fake_addr_, 6 );
++
++ skb_queue_head_init( &v->queue );
++ init_waitqueue_head( &v->wait );
++ return 0;
++}
++
++
++static int
++sheep_net_release( struct inode *inode, struct file *f )
++{
++ struct SheepVars *v = (struct SheepVars *)f->private_data;
++ struct sk_buff *skb;
++ D(bug("sheep_net: close\n"));
++
++ // Detach from Ethernet card
++ if( v->ether ) {
++ dev_remove_pack( &v->pt );
++ sk_free( v->skt );
++ v->skt = NULL;
++ dev_put( v->ether );
++ v->ether = NULL;
++ }
++
++ // Empty packet queue
++ while( (skb=skb_dequeue(&v->queue)) )
++ kfree_skb(skb);
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++ proto_unregister(&mol_proto);
++#endif
++
++ // Free private variables
++ kfree(v);
++ return 0;
++}
++
++static inline int
++get_iovsize( const struct iovec *iv, int count )
++{
++ int s;
++ for( s=0; count-- ; iv++ )
++ s += iv->iov_len;
++ return s;
++}
++
++static int
++memcpy_tov( const struct iovec *iv, const char *buf, int s )
++{
++ while( s > 0 ) {
++ int len = min_t( unsigned int, iv->iov_len, s );
++
++ if( copy_to_user(iv->iov_base, buf, len) )
++ return -EFAULT;
++ s -= len;
++ buf += len;
++ iv++;
++ }
++ return 0;
++}
++
++static int
++memcpy_fromv( char *buf, const struct iovec *iv, int s )
++{
++ while( s > 0 ) {
++ int len = min_t( unsigned int, iv->iov_len, s );
++
++ if( copy_from_user(buf, iv->iov_base, len) )
++ return -EFAULT;
++ s -= len;
++ buf += len;
++ iv++;
++ }
++ return 0;
++}
++
++static ssize_t
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++sheep_net_aio_read(struct kiocb *iocb, const struct iovec *iv, unsigned long count, loff_t pos)
++{
++ struct file *f = iocb->ki_filp;
++#else /* Linux 2.6.18 or older */
++sheep_net_readv( struct file *f, const struct iovec *iv, unsigned long count, loff_t *pos )
++{
++#endif
++ struct SheepVars *v = (struct SheepVars *)f->private_data;
++ struct sk_buff *skb;
++ int size = get_iovsize( iv, count );
++
++ D(bug("sheep_net: read\n"));
++
++ while( !(skb=skb_dequeue(&v->queue)) ) {
++ // wait around...
++ if( (f->f_flags & O_NONBLOCK))
++ return -EAGAIN;
++
++ interruptible_sleep_on( &v->wait );
++
++ if( signal_pending(current) )
++ return -EINTR;
++ }
++
++ // Pass packet to caller
++ if( size > skb->len )
++ size = skb->len;
++ if( memcpy_tov(iv, skb->data, size) )
++ size = -EFAULT;
++
++ kfree_skb( skb );
++ return size;
++}
++
++static ssize_t
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++sheep_net_aio_write(struct kiocb *iocb, const struct iovec *iv, unsigned long count, loff_t off)
++{
++ struct file *f = iocb->ki_filp;
++#else /* Linux 2.6.18 or older */
++sheep_net_writev( struct file *f, const struct iovec *iv, unsigned long count, loff_t *off )
++{
++#endif
++ struct SheepVars *v = (struct SheepVars *)f->private_data;
++ struct sk_buff *skb;
++ int size = get_iovsize( iv, count );
++ char *p, *laddr;
++ D(bug("sheep_net: write\n"));
++
++ // Check packet size
++ if( size < sizeof(struct ethhdr) )
++ return -EINVAL;
++ if( size > 1514 ) {
++ printk("sheep_net_write: packet > 1514!\n");
++ size = 1514;
++ }
++
++ // Interface active?
++ if( !v->ether )
++ return size;
++ laddr = v->ether->dev_addr;
++
++ // Allocate buffer for packet
++ if( !(skb=dev_alloc_skb(size)) )
++ return -ENOBUFS;
++
++ // Stuff packet in buffer
++ p = skb_put( skb, size );
++ if( memcpy_fromv(p, iv, size) ) {
++ kfree_skb(skb);
++ return -EFAULT;
++ }
++
++ // Transmit packet
++ atomic_add( skb->truesize, &v->skt->wmem_alloc );
++ skb->sk = v->skt;
++ skb->dev = v->ether;
++ skb->priority = 0;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21))
++ skb_set_network_header(skb, v->ether->hard_header_len);
++ skb_set_transport_header(skb, v->ether->hard_header_len);
++ skb_reset_mac_header(skb);
++#else
++ skb->nh.raw = skb->h.raw = skb->data + v->ether->hard_header_len;
++ skb->mac.raw = skb->data;
++#endif
++
++ // Base the IP-filter on the IP address of outgoing ARPs
++ if( ETH_HDR(skb)->h_proto == htons(ETH_P_ARP) ) {
++ char *s = &skb->data[14+14]; /* source IP-address */
++ int n[4];
++ if( *(long*)s != v->ipfilter ) {
++ v->ipfilter = *(long*)s;
++ n[0]=s[0], n[1]=s[1], n[2]=s[2], n[3]=s[3];
++ printk("IP-filter: %d.%d.%d.%d\n", n[0], n[1], n[2], n[3] );
++ }
++ }
++
++ // Is this package addressed solely to the local host?
++ if( !addrcmp(skb->data, laddr) && !(skb->data[0] & ETH_ADDR_MULTICAST) ) {
++ skb->protocol = eth_type_trans( skb, v->ether );
++ netif_rx_ni( skb );
++ return size;
++ }
++ if( skb->data[0] & ETH_ADDR_MULTICAST ) {
++ // We can't clone the skb since we will manipulate the data below
++ struct sk_buff *lskb = skb_copy( skb, GFP_ATOMIC );
++ if( lskb ) {
++ lskb->protocol = eth_type_trans( lskb, v->ether );
++ netif_rx_ni( lskb );
++ }
++ }
++ // Outgoing packet (will be seen on the wire)
++ demasquerade( skb, v );
++
++ skb->protocol = PROT_MAGIC; // Magic value (we can recognize the packet in sheep_net_receiver)
++ dev_queue_xmit( skb );
++ return size;
++}
++
++/* We take care of this using do_sync_* instead in 2.6.19 and newer */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static ssize_t
++sheep_net_read( struct file *f, char *buf, size_t count, loff_t *off )
++{
++ struct iovec iv;
++ iv.iov_base = buf;
++ iv.iov_len = count;
++ return sheep_net_readv( f, &iv, 1, off );
++}
++
++static ssize_t
++sheep_net_write( struct file *f, const char *buf, size_t count, loff_t *off )
++{
++ struct iovec iv;
++ iv.iov_len = count;
++ iv.iov_base = (char *)buf;
++ return sheep_net_writev( f, &iv, 1, off );
++}
++#endif
++
++static unsigned int
++sheep_net_poll( struct file *f, struct poll_table_struct *wait )
++{
++ struct SheepVars *v = (struct SheepVars *)f->private_data;
++ D(bug("sheep_net: poll\n"));
++
++ poll_wait( f, &v->wait, wait );
++
++ if( !skb_queue_empty(&v->queue) )
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static int
++sheep_net_ioctl( struct inode *inode, struct file *f, unsigned int code, unsigned long arg )
++{
++ struct SheepVars *v = (struct SheepVars *)f->private_data;
++ D(bug("sheep_net: ioctl %04x\n", code));
++
++ switch( code ) {
++ // Attach to Ethernet card
++ // arg: pointer to name of Ethernet device (char[20])
++ case SIOCSIFLINK: {
++ char name[20];
++ int err;
++
++ // Already attached?
++ if( v->ether )
++ return -EBUSY;
++
++ // Get Ethernet card name
++ if( copy_from_user(name, (void *)arg, 20) )
++ return -EFAULT;
++ name[19] = 0;
++
++ // Find card
++ if( !(v->ether=dev_get_by_name(name)) )
++ return -ENODEV;
++
++ // Is it Ethernet?
++ if( v->ether->type != ARPHRD_ETHER) {
++ err = -EINVAL;
++ goto error;
++ }
++
++ // Allocate socket
++ if( !(v->skt=compat_sk_alloc(0, GFP_USER, 1)) ) {
++ err = -ENOMEM;
++ goto error;
++ }
++ skt_set_dead( v->skt );
++
++ // Attach packet handler
++ v->pt.type = htons(ETH_P_ALL);
++ v->pt.dev = v->ether;
++ v->pt.func = sheep_net_receiver;
++ //v->pt.data = v;
++ dev_add_pack( &v->pt );
++ return 0;
++error:
++ if( v->ether )
++ dev_put( v->ether );
++ v->ether = NULL;
++ return err;
++ }
++
++ // Get hardware address of Ethernet card
++ // arg: pointer to buffer (6 bytes) to store address
++ case SIOCGIFADDR:
++ if( copy_to_user((void *)arg, v->fake_addr, 6))
++ return -EFAULT;
++ return 0;
++
++ // Set the fake HW-address the client will see
++ case SIOCSIFADDR:
++ if( copy_from_user(v->fake_addr, (void*)arg, 6 ))
++ return -EFAULT;
++ return 0;
++
++ // Add multicast address
++ // arg: pointer to address (6 bytes)
++ case SIOCADDMULTI: {
++ char addr[6];
++ int ret;
++ if( !v->ether )
++ return -ENODEV;
++ if( copy_from_user(addr, (void *)arg, 6))
++ return -EFAULT;
++ ret = dev_mc_add(v->ether, addr, 6, 0);
++ return ret;
++ }
++
++ // Remove multicast address
++ // arg: pointer to address (6 bytes)
++ case SIOCDELMULTI: {
++ char addr[6];
++ if( !v->ether )
++ return -ENODEV;
++ if( copy_from_user(addr, (void *)arg, 6))
++ return -EFAULT;
++ return dev_mc_delete(v->ether, addr, 6, 0);
++ }
++
++#if 0
++ // Return size of first packet in queue
++ case FIONREAD: {
++ int count = 0;
++ struct sk_buff *skb;
++ long flags;
++ spin_lock_irqsave(&v->queue.lock, flags );
++
++ skb = skb_peek(&v->queue);
++ if( skb )
++ count = skb->len;
++
++ spin_unlock_irqrestore(&v->queue.lock, flags );
++ return put_user(count, (int *)arg);
++ }
++#endif
++ case SIOC_MOL_GET_IPFILTER:
++ return put_user(v->ipfilter, (int *)arg );
++
++ case SIOC_MOL_SET_IPFILTER:
++ v->ipfilter = arg;
++ return 0;
++ }
++ return -ENOIOCTLCMD;
++}
++
++
++/************************************************************************/
++/* init / cleanup */
++/************************************************************************/
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++static struct file_operations sheep_net_fops = {
++ .owner = THIS_MODULE,
++ .read = do_sync_read,
++ .aio_read = sheep_net_aio_read,
++ .write = do_sync_write,
++ .aio_write = sheep_net_aio_write,
++ .poll = sheep_net_poll,
++ .ioctl = sheep_net_ioctl,
++ .open = sheep_net_open,
++ .release = sheep_net_release,
++};
++#else
++static struct file_operations sheep_net_fops = {
++ .owner = THIS_MODULE,
++ .read = sheep_net_read,
++ .write = sheep_net_write,
++ .readv = sheep_net_readv,
++ .writev = sheep_net_writev,
++ .poll = sheep_net_poll,
++ .ioctl = sheep_net_ioctl,
++ .open = sheep_net_open,
++ .release = sheep_net_release,
++};
++#endif
++
++static struct miscdevice sheep_net_device = {
++ .minor = SHEEP_NET_MINOR,
++ .name = "sheep_net",
++ .fops = &sheep_net_fops
++};
++
++int
++init_module( void )
++{
++ return misc_register( &sheep_net_device );
++}
++
++void
++cleanup_module( void )
++{
++ (void) misc_deregister( &sheep_net_device );
++}
+--- /dev/null
++++ b/drivers/macintosh/mol/skiplist.c
+@@ -0,0 +1,222 @@
++/*
++ * Creation Date: <2003/03/03 23:19:47 samuel>
++ * Time-stamp: <2004/02/21 16:24:56 samuel>
++ *
++ * <skiplist.c>
++ *
++ * Skiplist implementation
++ *
++ * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation
++ *
++ */
++
++#include "archinclude.h"
++#include "skiplist.h"
++#include "alloc.h"
++
++#define SKIPLIST_END INT_MAX /* this key is reserved */
++
++/*
++ * Skiplist Example:
++ *
++ * level 0 -> el1 -> el2 -> el3 --> el4 --> null_el
++ * level 1 --> el2 -> el3 --> el4 --> null_el
++ * level 2 --> el2 --> el4 --> null_el
++ * level 3 --> el2 -----> null_el
++ * level 4 ------------> null_el
++ * ...
++ * SKIPLIST_MAX_HEIGHT-1 ------------> null_el
++ */
++
++static unsigned int mol_rand_seed = 152;
++
++static inline int
++_cntlz( int val )
++{
++ int ret;
++ asm volatile("cntlzw %0,%1" : "=r" (ret) : "r"(val) );
++ return ret;
++}
++
++static unsigned long
++mol_random( void )
++{
++ unsigned int t;
++ asm( "mftb %0" : "=r"(t) : );
++ mol_rand_seed = mol_rand_seed*69069L+1;
++ return mol_rand_seed^t;
++}
++
++static void
++mol_random_entropy( void )
++{
++ unsigned int entropy;
++ asm( "mftb %0" : "=r" (entropy) : );
++ mol_rand_seed ^= entropy;
++}
++
++static inline void
++set_level_next( skiplist_level_t *level, skiplist_el_t *el )
++{
++ level->next = el;
++#ifdef __darwin__
++ level->next_phys = el ? tophys_mol(el) : 0;
++#endif
++}
++
++
++/************************************************************************/
++/* skiplist operations */
++/************************************************************************/
++
++int
++skiplist_prealloc( skiplist_t *sl, char *buf, unsigned int size,
++ skiplist_el_callback callback, void *usr1, void *usr2 )
++{
++ skiplist_el_t *p, *head;
++ unsigned int s;
++ int n, count;
++
++ head = NULL;
++ for( count=0 ;; size -= s, buf += s, count++ ) {
++ for( n=0; n<SKIPLIST_MAX_HEIGHT-1 && (mol_random() & 0x40) ; n++ )
++ ;
++ s = sl->datasize + sizeof(skiplist_t) + n*sizeof(skiplist_level_t);
++ if( s > size )
++ break;
++ p = (skiplist_el_t*)(buf + sl->datasize);
++ p->key = n;
++ set_level_next( &p->level[0], head );
++ head = p;
++ }
++
++ /* note: the callback is allowed to manipulate the skiplist */
++ for( n=0, p=head; p; p=p->level[0].next, n++ ) {
++ if( callback )
++ (*callback)( (char*)p - sl->datasize, n, count, usr1, usr2 );
++ if( !p->level[0].next ) {
++ p->level[0] = sl->freelist;
++ set_level_next( &sl->freelist, head );
++ break;
++ }
++ }
++ return count;
++}
++
++char *
++skiplist_insert( skiplist_t *sl, int key )
++{
++ skiplist_el_t *pleft = (skiplist_el_t*)((char*)&sl->root[0] - offsetof(skiplist_el_t, level));
++ skiplist_level_t el = sl->freelist;
++ skiplist_el_t *p = el.next;
++ int n, slev;
++
++ if( !p )
++ return NULL;
++ sl->freelist = p->level[0];
++ n = p->key;
++ p->key = key;
++
++ /* pick a good search level (the -3 is benchmarked) */
++ sl->nel++;
++ slev = 31 - _cntlz(sl->nel) - 3;
++ if( slev > SKIPLIST_MAX_HEIGHT-1 )
++ slev = SKIPLIST_MAX_HEIGHT-1;
++ else if( slev < 0 )
++ slev = 0;
++ sl->slevel = slev;
++
++ /* insert element */
++ if( slev < n )
++ slev = n;
++ for( ; slev >= 0; slev-- ) {
++ for( ; pleft->level[slev].next->key < key ; pleft=pleft->level[slev].next )
++ ;
++ if( slev <= n ) {
++ p->level[slev] = pleft->level[slev];
++ pleft->level[slev] = el;
++ }
++ }
++
++ return (char*)p - sl->datasize;
++}
++
++char *
++skiplist_delete( skiplist_t *sl, int key )
++{
++ skiplist_el_t *p = (skiplist_el_t*)((char*)&sl->root[0] - offsetof(skiplist_el_t, level));
++ skiplist_level_t delptr;
++ int n, level = -1;
++
++ delptr.next = 0;
++
++ for( n=SKIPLIST_MAX_HEIGHT-1; n>=0; n-- ) {
++ for( ; p->level[n].next->key < key ; p=p->level[n].next )
++ ;
++ if( p->level[n].next->key != key )
++ continue;
++
++ if( level < 0 ) {
++ delptr = p->level[n];
++ level = n;
++ }
++ p->level[n] = delptr.next->level[n];
++ }
++ if( level < 0 )
++ return NULL;
++
++ /* put on freelist */
++ p = delptr.next;
++ p->key = level;
++ p->level[0] = sl->freelist;
++ sl->freelist = delptr;
++ sl->nel--;
++
++ return (char*)p - sl->datasize;
++}
++
++char *
++skiplist_lookup( skiplist_t *sl, int key )
++{
++ skiplist_el_t *p = (skiplist_el_t*)((char*)&sl->root[0] - offsetof(skiplist_el_t, level));
++ int n = sl->slevel;
++
++ for( ;; ) {
++ if( p->level[n].next->key < key ) {
++ p = p->level[n].next;
++ continue;
++ }
++ if( p->level[n].next->key > key ) {
++ if( --n < 0 )
++ break;
++ continue;
++ }
++ return (char*)p->level[n].next - sl->datasize;
++ }
++ return NULL;
++}
++
++void
++skiplist_init( skiplist_t *sl, int datasize )
++{
++ skiplist_level_t nilptr;
++ int i;
++
++ mol_random_entropy();
++
++ memset( sl, 0, sizeof(*sl) );
++
++ sl->nil_el.key = SKIPLIST_END;
++ sl->datasize = datasize;
++
++ /* remember: the nil element is of level 0 */
++ set_level_next( &nilptr, &sl->nil_el );
++ sl->nil_el.level[0] = nilptr;
++
++ for( i=0; i < SKIPLIST_MAX_HEIGHT ; i++ )
++ sl->root[i] = nilptr;
++}