/*
* i386 helpers
- *
+ *
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
/* modulo 17 table */
const uint8_t rclw_table[32] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7,
8, 9,10,11,12,13,14,15,
16, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9,10,11,12,13,14,
/* modulo 9 table */
const uint8_t rclb_table[32] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7,
8, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 0, 1, 2, 3, 4, 5,
+ 7, 8, 0, 1, 2, 3, 4, 5,
6, 7, 8, 0, 1, 2, 3, 4,
};
1.44269504088896340739L, /*l2e*/
3.32192809488736234781L, /*l2t*/
};
-
+
/* thread support */
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
*e2_ptr = ldl_kernel(ptr + 4);
return 0;
}
-
+
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
{
unsigned int limit;
static inline void load_seg_vm(int seg, int selector)
{
selector &= 0xffff;
- cpu_x86_load_seg_cache(env, seg, selector,
+ cpu_x86_load_seg_cache(env, seg, selector,
(selector << 4), 0xffff, 0);
}
-static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
+static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
uint32_t *esp_ptr, int dpl)
{
int type, index, shift;
-
+
#if 0
{
int i;
}
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
- cpu_x86_load_seg_cache(env, seg_reg, selector,
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
} else {
- if (seg_reg == R_SS || seg_reg == R_CS)
+ if (seg_reg == R_SS || seg_reg == R_CS)
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
}
}
#define SWITCH_TSS_CALL 2
/* XXX: restore CPU state in registers (PowerPC case) */
-static void switch_tss(int tss_selector,
+static void switch_tss(int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip)
{
tss_limit_max = 43;
tss_limit = get_seg_limit(e1, e2);
tss_base = get_seg_base(e1, e2);
- if ((tss_selector & 4) != 0 ||
+ if ((tss_selector & 4) != 0 ||
tss_limit < tss_limit_max)
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
new_segs[R_GS] = 0;
new_trap = 0;
}
-
+
/* NOTE: we must avoid memory exceptions during the task switch,
so we make dummy accesses before */
/* XXX: it can still fail in some cases, so a bigger hack is
v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
stb_kernel(env->tr.base, v1);
stb_kernel(env->tr.base + old_tss_limit_max, v2);
-
+
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
target_ulong ptr;
old_eflags = compute_eflags();
if (source == SWITCH_TSS_IRET)
old_eflags &= ~NT_MASK;
-
+
/* save the current state in the old TSS */
if (type & 8) {
/* 32 bit */
for(i = 0; i < 4; i++)
stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
}
-
+
/* now if an exception occurs, it will occurs in the next task
context */
env->tr.base = tss_base;
env->tr.limit = tss_limit;
env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
-
+
if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
cpu_x86_update_cr3(env, new_cr3);
}
-
+
/* load all registers without an exception, then reload them with
possible exception */
env->eip = new_eip;
- eflags_mask = TF_MASK | AC_MASK | ID_MASK |
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK |
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
if (!(type & 8))
eflags_mask &= 0xffff;
ESI = new_regs[6];
EDI = new_regs[7];
if (new_eflags & VM_MASK) {
- for(i = 0; i < 6; i++)
+ for(i = 0; i < 6; i++)
load_seg_vm(i, new_segs[i]);
/* in vm86, CPL is always 3 */
cpu_x86_set_cpl(env, 3);
for(i = 0; i < 6; i++)
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
}
-
+
env->ldt.selector = new_ldt & ~4;
env->ldt.base = 0;
env->ldt.limit = 0;
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
load_seg_cache_raw_dt(&env->ldt, e1, e2);
}
-
+
/* load the segments */
if (!(new_eflags & VM_MASK)) {
tss_load_seg(R_CS, new_segs[R_CS]);
tss_load_seg(R_FS, new_segs[R_FS]);
tss_load_seg(R_GS, new_segs[R_GS]);
}
-
+
/* check that EIP is in the CS segment limits */
if (new_eip > env->segs[R_CS].limit) {
/* XXX: different exception if CALL ? */
static inline void check_io(int addr, int size)
{
int io_offset, val, mask;
-
+
/* TSS must be a valid 32 bit one */
if (!(env->tr.flags & DESC_P_MASK) ||
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
PUSHW(ssp, esp, sp_mask, error_code);
}
}
-
+
if (new_stack) {
if (env->eflags & VM_MASK) {
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
}
ss = (ss & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_SS, ss,
+ cpu_x86_load_seg_cache(env, R_SS, ss,
ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
}
SET_ESP(esp, sp_mask);
selector = (selector & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_CS, selector,
+ cpu_x86_load_seg_cache(env, R_CS, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
static inline target_ulong get_rsp_from_tss(int level)
{
int index;
-
+
#if 0
- printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
+ printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
env->tr.base, env->tr.limit);
#endif
if (has_error_code) {
PUSHQ(esp, error_code);
}
-
+
if (new_stack) {
ss = 0 | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
ESP = esp;
selector = (selector & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_CS, selector,
+ cpu_x86_load_seg_cache(env, R_CS, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
ECX = env->eip + next_eip_addend;
env->regs[11] = compute_eflags();
-
+
code64 = env->hflags & HF_CS64_MASK;
cpu_x86_set_cpl(env, 0);
- cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
- 0, 0xffffffff,
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
- cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
env->eip = env->lstar;
else
env->eip = env->cstar;
- } else
+ } else
#endif
{
ECX = (uint32_t)(env->eip + next_eip_addend);
-
+
cpu_x86_set_cpl(env, 0);
- cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
- 0, 0xffffffff,
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (dflag == 2) {
- cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
- 0, 0xffffffff,
+ cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
+ 0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
DESC_L_MASK);
env->eip = ECX;
} else {
- cpu_x86_load_seg_cache(env, R_CS, selector | 3,
- 0, 0xffffffff,
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
env->eip = (uint32_t)ECX;
}
- cpu_x86_load_seg_cache(env, R_SS, selector + 8,
+ cpu_x86_load_seg_cache(env, R_SS, selector + 8,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
+ load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
cpu_x86_set_cpl(env, 3);
- } else
+ } else
#endif
{
- cpu_x86_load_seg_cache(env, R_CS, selector | 3,
- 0, 0xffffffff,
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
env->eip = (uint32_t)ECX;
- cpu_x86_load_seg_cache(env, R_SS, selector + 8,
+ cpu_x86_load_seg_cache(env, R_SS, selector + 8,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
PUSHW(ssp, esp, 0xffff, compute_eflags());
PUSHW(ssp, esp, 0xffff, old_cs);
PUSHW(ssp, esp, 0xffff, old_eip);
-
+
/* update processor state */
ESP = (ESP & ~0xffff) | (esp & 0xffff);
env->eip = offset;
}
/* fake user mode interrupt */
-void do_interrupt_user(int intno, int is_int, int error_code,
+void do_interrupt_user(int intno, int is_int, int error_code,
target_ulong next_eip)
{
SegmentCache *dt;
dt = &env->idt;
ptr = dt->base + (intno * 8);
e2 = ldl_kernel(ptr + 4);
-
+
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privledge if software int */
/*
* Begin execution of an interruption. is_int is TRUE if coming from
* the int instruction. next_eip is the EIP value AFTER the interrupt
- * instruction. It is only relevant if is_int is TRUE.
+ * instruction. It is only relevant if is_int is TRUE.
*/
-void do_interrupt(int intno, int is_int, int error_code,
+void do_interrupt(int intno, int is_int, int error_code,
target_ulong next_eip, int is_hw)
{
if (loglevel & CPU_LOG_INT) {
* Signal an interruption. It is executed in the main CPU loop.
* is_int is TRUE if coming from the int instruction. next_eip is the
* EIP value AFTER the interrupt instruction. It is only relevant if
- * is_int is TRUE.
+ * is_int is TRUE.
*/
-void raise_interrupt(int intno, int is_int, int error_code,
+void raise_interrupt(int intno, int is_int, int error_code,
int next_eip_addend)
{
if (!is_int)
/* SMM support */
-#if defined(CONFIG_USER_ONLY)
+#if defined(CONFIG_USER_ONLY)
void do_smm_enter(void)
{
cpu_smm_update(env);
sm_state = env->smbase + 0x8000;
-
+
#ifdef TARGET_X86_64
for(i = 0; i < 6; i++) {
dt = &env->segs[i];
stq_phys(sm_state + 0x7e78, env->ldt.base);
stl_phys(sm_state + 0x7e74, env->ldt.limit);
stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
-
+
stq_phys(sm_state + 0x7e88, env->idt.base);
stl_phys(sm_state + 0x7e84, env->idt.limit);
stq_phys(sm_state + 0x7e98, env->tr.base);
stl_phys(sm_state + 0x7e94, env->tr.limit);
stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
-
+
stq_phys(sm_state + 0x7ed0, env->efer);
stq_phys(sm_state + 0x7ff8, EAX);
stq_phys(sm_state + 0x7fd0, EBP);
stq_phys(sm_state + 0x7fc8, ESI);
stq_phys(sm_state + 0x7fc0, EDI);
- for(i = 8; i < 16; i++)
+ for(i = 8; i < 16; i++)
stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
stq_phys(sm_state + 0x7f78, env->eip);
stl_phys(sm_state + 0x7f70, compute_eflags());
stl_phys(sm_state + 0x7fd0, EAX);
stl_phys(sm_state + 0x7fcc, env->dr[6]);
stl_phys(sm_state + 0x7fc8, env->dr[7]);
-
+
stl_phys(sm_state + 0x7fc4, env->tr.selector);
stl_phys(sm_state + 0x7f64, env->tr.base);
stl_phys(sm_state + 0x7f60, env->tr.limit);
stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
-
+
stl_phys(sm_state + 0x7fc0, env->ldt.selector);
stl_phys(sm_state + 0x7f80, env->ldt.base);
stl_phys(sm_state + 0x7f7c, env->ldt.limit);
stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
-
+
stl_phys(sm_state + 0x7f74, env->gdt.base);
stl_phys(sm_state + 0x7f70, env->gdt.limit);
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
-
- cpu_x86_update_cr0(env,
+
+ cpu_x86_update_cr0(env,
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
cpu_x86_update_cr4(env, 0);
env->dr[7] = 0x00000400;
for(i = 0; i < 6; i++) {
offset = 0x7e00 + i * 16;
- cpu_x86_load_seg_cache(env, i,
+ cpu_x86_load_seg_cache(env, i,
lduw_phys(sm_state + offset),
ldq_phys(sm_state + offset + 8),
ldl_phys(sm_state + offset + 4),
env->ldt.base = ldq_phys(sm_state + 0x7e78);
env->ldt.limit = ldl_phys(sm_state + 0x7e74);
env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
-
+
env->idt.base = ldq_phys(sm_state + 0x7e88);
env->idt.limit = ldl_phys(sm_state + 0x7e84);
env->tr.base = ldq_phys(sm_state + 0x7e98);
env->tr.limit = ldl_phys(sm_state + 0x7e94);
env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
-
+
EAX = ldq_phys(sm_state + 0x7ff8);
ECX = ldq_phys(sm_state + 0x7ff0);
EDX = ldq_phys(sm_state + 0x7fe8);
EBP = ldq_phys(sm_state + 0x7fd0);
ESI = ldq_phys(sm_state + 0x7fc8);
EDI = ldq_phys(sm_state + 0x7fc0);
- for(i = 8; i < 16; i++)
+ for(i = 8; i < 16; i++)
env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
env->eip = ldq_phys(sm_state + 0x7f78);
- load_eflags(ldl_phys(sm_state + 0x7f70),
+ load_eflags(ldl_phys(sm_state + 0x7f70),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
env->dr[6] = ldl_phys(sm_state + 0x7f68);
env->dr[7] = ldl_phys(sm_state + 0x7f60);
#else
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
- load_eflags(ldl_phys(sm_state + 0x7ff4),
+ load_eflags(ldl_phys(sm_state + 0x7ff4),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
env->eip = ldl_phys(sm_state + 0x7ff0);
EDI = ldl_phys(sm_state + 0x7fec);
EAX = ldl_phys(sm_state + 0x7fd0);
env->dr[6] = ldl_phys(sm_state + 0x7fcc);
env->dr[7] = ldl_phys(sm_state + 0x7fc8);
-
+
env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
env->tr.base = ldl_phys(sm_state + 0x7f64);
env->tr.limit = ldl_phys(sm_state + 0x7f60);
env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
-
+
env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
env->ldt.base = ldl_phys(sm_state + 0x7f80);
env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
-
+
env->gdt.base = ldl_phys(sm_state + 0x7f74);
env->gdt.limit = ldl_phys(sm_state + 0x7f70);
offset = 0x7f84 + i * 12;
else
offset = 0x7f2c + (i - 3) * 12;
- cpu_x86_load_seg_cache(env, i,
+ cpu_x86_load_seg_cache(env, i,
ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
ldl_phys(sm_state + offset + 8),
ldl_phys(sm_state + offset + 4),
{
unsigned int den, r;
uint64_t num, q;
-
+
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
den = T0;
if (den == 0) {
{
int den, r;
int64_t num, q;
-
+
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
den = T0;
if (den == 0) {
{
uint32_t index;
index = (uint32_t)EAX;
-
+
/* test if maximum index reached */
if (index & 0x80000000) {
- if (index > env->cpuid_xlevel)
+ if (index > env->cpuid_xlevel)
index = env->cpuid_level;
} else {
- if (index > env->cpuid_level)
+ if (index > env->cpuid_level)
index = env->cpuid_level;
}
-
+
switch(index) {
case 0:
EAX = env->cpuid_level;
uint32_t e1, e2;
int index, entry_limit;
target_ulong ptr;
-
+
selector = T0 & 0xffff;
if ((selector & 0xfffc) == 0) {
/* XXX: NULL selector case: invalid LDT */
if (env->hflags & HF_LMA_MASK)
entry_limit = 15;
else
-#endif
+#endif
entry_limit = 7;
if ((index + entry_limit) > dt->limit)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
uint32_t e1, e2;
int index, type, entry_limit;
target_ulong ptr;
-
+
selector = T0 & 0xffff;
if ((selector & 0xfffc) == 0) {
/* NULL selector case: invalid TR */
if (env->hflags & HF_LMA_MASK)
entry_limit = 15;
else
-#endif
+#endif
entry_limit = 7;
if ((index + entry_limit) > dt->limit)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
e1 = ldl_kernel(ptr);
e2 = ldl_kernel(ptr + 4);
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
- if ((e2 & DESC_S_MASK) ||
+ if ((e2 & DESC_S_MASK) ||
(type != 1 && type != 9))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
load_seg_cache_raw_dt(&env->tr, e1, e2);
env->tr.base |= (target_ulong)e3 << 32;
- } else
+ } else
#endif
{
load_seg_cache_raw_dt(&env->tr, e1, e2);
raise_exception_err(EXCP0D_GPF, 0);
cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
} else {
-
+
if (selector & 0x4)
dt = &env->ldt;
else
ptr = dt->base + index;
e1 = ldl_kernel(ptr);
e2 = ldl_kernel(ptr + 4);
-
+
if (!(e2 & DESC_S_MASK))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
rpl = selector & 3;
/* must be readable segment */
if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
-
+
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
/* if not conforming code, test rights */
- if (dpl < cpl || dpl < rpl)
+ if (dpl < cpl || dpl < rpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
}
}
stl_kernel(ptr + 4, e2);
}
- cpu_x86_load_seg_cache(env, seg_reg, selector,
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
#if 0
- fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
+ fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
selector, (unsigned long)sc->base, sc->limit, sc->flags);
#endif
}
int new_cs, gate_cs, type;
uint32_t e1, e2, cpl, dpl, rpl, limit;
target_ulong new_eip, next_eip;
-
+
new_cs = T0;
new_eip = T1;
if ((new_cs & 0xfffc) == 0)
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
limit = get_seg_limit(e1, e2);
- if (new_eip > limit &&
+ if (new_eip > limit &&
!(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
/* must be code segment */
- if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
+ if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
(DESC_S_MASK | DESC_CS_MASK)))
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
- if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
+ if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
(!(e2 & DESC_C_MASK) && (dpl != cpl)))
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
if (!(e2 & DESC_P_MASK))
uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
uint32_t val, limit, old_sp_mask;
target_ulong ssp, old_ssp, next_eip, new_eip;
-
+
new_cs = T0;
new_eip = T1;
next_eip = env->eip + next_eip_addend;
/* from this point, not restartable */
ESP = rsp;
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
- get_seg_base(e1, e2),
+ get_seg_base(e1, e2),
get_seg_limit(e1, e2), e2);
EIP = new_eip;
- } else
+ } else
#endif
{
sp = ESP;
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
PUSHW(ssp, sp, sp_mask, next_eip);
}
-
+
limit = get_seg_limit(e1, e2);
if (new_eip > limit)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
get_ss_esp_from_tss(&ss, &sp, dpl);
#ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL)
- fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
+ fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
ss, sp, param_count, ESP);
#endif
if ((ss & 0xfffc) == 0)
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
if (!(ss_e2 & DESC_P_MASK))
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
-
+
// push_size = ((param_count * 2) + 8) << shift;
old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
old_ssp = env->segs[R_SS].base;
-
+
sp_mask = get_sp_mask(ss_e2);
ssp = get_seg_base(ss_e1, ss_e2);
if (shift) {
if (new_stack) {
ss = (ss & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_SS, ss,
+ cpu_x86_load_seg_cache(env, R_SS, ss,
ssp,
get_seg_limit(ss_e1, ss_e2),
ss_e2);
}
selector = (selector & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_CS, selector,
+ cpu_x86_load_seg_cache(env, R_CS, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
/* XXX: on x86_64, we do not want to nullify FS and GS because
they may still contain a valid base. I would be interested to
know how a real x86_64 CPU behaves */
- if ((seg_reg == R_FS || seg_reg == R_GS) &&
+ if ((seg_reg == R_FS || seg_reg == R_GS) &&
(env->segs[seg_reg].selector & 0xfffc) == 0)
return;
uint32_t e1, e2, ss_e1, ss_e2;
int cpl, dpl, rpl, eflags_mask, iopl;
target_ulong ssp, sp, new_eip, new_esp, sp_mask;
-
+
#ifdef TARGET_X86_64
if (shift == 2)
sp_mask = -1;
!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->hflags & HF_CPL_MASK;
- rpl = new_cs & 3;
+ rpl = new_cs & 3;
if (rpl < cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
}
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
-
+
sp += addend;
- if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
+ if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
((env->hflags & HF_CS64_MASK) && !is_iret))) {
/* return to same priledge level */
- cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
/* NULL ss is allowed in long mode if cpl != 3*/
/* XXX: test CS64 ? */
if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
- cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
- } else
+ } else
#endif
{
raise_exception_err(EXCP0D_GPF, 0);
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
if (!(ss_e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
- cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
get_seg_base(ss_e1, ss_e2),
get_seg_limit(ss_e1, ss_e2),
ss_e2);
}
- cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
POPL(ssp, sp, sp_mask, new_ds);
POPL(ssp, sp, sp_mask, new_fs);
POPL(ssp, sp, sp_mask, new_gs);
-
+
/* modify processor state */
- load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
+ load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
load_seg_vm(R_CS, new_cs & 0xffff);
cpu_x86_set_cpl(env, 3);
{
int tss_selector, type;
uint32_t e1, e2;
-
+
/* specific case for TSS */
if (env->eflags & NT_MASK) {
#ifdef TARGET_X86_64
}
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
cpu_x86_set_cpl(env, 0);
- cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
- 0, 0xffffffff,
+ cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+ 0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
+ cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
raise_exception_err(EXCP0D_GPF, 0);
}
cpu_x86_set_cpl(env, 3);
- cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
- 0, 0xffffffff,
+ cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
+ 0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
+ cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
void helper_movl_crN_T0(int reg)
{
-#if !defined(CONFIG_USER_ONLY)
+#if !defined(CONFIG_USER_ONLY)
switch(reg) {
case 0:
cpu_x86_update_cr0(env, T0);
EDX = (uint32_t)(val >> 32);
}
-#if defined(CONFIG_USER_ONLY)
+#if defined(CONFIG_USER_ONLY)
void helper_wrmsr(void)
{
}
update_mask |= MSR_EFER_FFXSR;
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
update_mask |= MSR_EFER_NXE;
- env->efer = (env->efer & ~update_mask) |
+ env->efer = (env->efer & ~update_mask) |
(val & update_mask);
}
break;
#endif
default:
/* XXX: exception ? */
- break;
+ break;
}
}
default:
/* XXX: exception ? */
val = 0;
- break;
+ break;
}
EAX = (uint32_t)(val);
EDX = (uint32_t)(val >> 32);
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
{
- if (b == 0.0)
+ if (b == 0.0)
fpu_set_exception(FPUS_ZE);
return a / b;
}
{
if (env->cr[0] & CR0_NE_MASK) {
raise_exception(EXCP10_COPR);
- }
-#if !defined(CONFIG_USER_ONLY)
+ }
+#if !defined(CONFIG_USER_ONLY)
else {
cpu_set_ferr(env);
}
void helper_fyl2x(void)
{
CPU86_LDouble fptemp;
-
+
fptemp = ST0;
if (fptemp>0.0){
fptemp = log(fptemp)/log(2.0); /* log2(ST) */
ST1 *= fptemp;
fpop();
- } else {
+ } else {
env->fpus &= (~0x4700);
env->fpus |= 0x400;
}
fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
ST1 *= fptemp;
fpop();
- } else {
+ } else {
env->fpus &= (~0x4700);
env->fpus |= 0x400;
}
CPU86_LDouble fptemp;
fptemp = ST0;
- if (fptemp<0.0) {
+ if (fptemp<0.0) {
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
env->fpus |= 0x400;
}
void helper_fscale(void)
{
- ST0 = ldexp (ST0, (int)(ST1));
+ ST0 = ldexp (ST0, (int)(ST1));
}
void helper_fsin(void)
helper_fstt(tmp, addr);
addr += 16;
}
-
+
if (env->cr[4] & CR4_OSFXSR_MASK) {
/* XXX: finish it */
stl(ptr + 0x18, env->mxcsr); /* mxcsr */
#endif
}
-#if !defined(CONFIG_USER_ONLY)
+#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))