]> git.ipfire.org Git - thirdparty/qemu.git/blame - target-i386/seg_helper.c
log: do not unnecessarily include qom/cpu.h
[thirdparty/qemu.git] / target-i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
b6a0aa05 21#include "qemu/osdep.h"
3e457172 22#include "cpu.h"
1de7afc9 23#include "qemu/log.h"
2ef6175a 24#include "exec/helper-proto.h"
f08b6170 25#include "exec/cpu_ldst.h"
508127e2 26#include "exec/log.h"
eaa728ee 27
3e457172 28//#define DEBUG_PCALL
d12d51d5
AL
29
30#ifdef DEBUG_PCALL
20054ef0 31# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
32# define LOG_PCALL_STATE(cpu) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 34#else
20054ef0 35# define LOG_PCALL(...) do { } while (0)
8995b7a0 36# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
37#endif
38
9220fe54
PM
39#ifdef CONFIG_USER_ONLY
40#define MEMSUFFIX _kernel
41#define DATA_SIZE 1
42#include "exec/cpu_ldst_useronly_template.h"
43
44#define DATA_SIZE 2
45#include "exec/cpu_ldst_useronly_template.h"
46
47#define DATA_SIZE 4
48#include "exec/cpu_ldst_useronly_template.h"
49
50#define DATA_SIZE 8
51#include "exec/cpu_ldst_useronly_template.h"
52#undef MEMSUFFIX
53#else
8a201bd4
PB
54#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
55#define MEMSUFFIX _kernel
56#define DATA_SIZE 1
57#include "exec/cpu_ldst_template.h"
58
59#define DATA_SIZE 2
60#include "exec/cpu_ldst_template.h"
61
62#define DATA_SIZE 4
63#include "exec/cpu_ldst_template.h"
64
65#define DATA_SIZE 8
66#include "exec/cpu_ldst_template.h"
67#undef CPU_MMU_INDEX
68#undef MEMSUFFIX
69#endif
70
eaa728ee 71/* return non zero if error */
100ec099
PD
72static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
73 uint32_t *e2_ptr, int selector,
74 uintptr_t retaddr)
eaa728ee
FB
75{
76 SegmentCache *dt;
77 int index;
78 target_ulong ptr;
79
20054ef0 80 if (selector & 0x4) {
eaa728ee 81 dt = &env->ldt;
20054ef0 82 } else {
eaa728ee 83 dt = &env->gdt;
20054ef0 84 }
eaa728ee 85 index = selector & ~7;
20054ef0 86 if ((index + 7) > dt->limit) {
eaa728ee 87 return -1;
20054ef0 88 }
eaa728ee 89 ptr = dt->base + index;
100ec099
PD
90 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
91 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee
FB
92 return 0;
93}
94
100ec099
PD
95static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
96 uint32_t *e2_ptr, int selector)
97{
98 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
99}
100
eaa728ee
FB
101static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
102{
103 unsigned int limit;
20054ef0 104
eaa728ee 105 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 106 if (e2 & DESC_G_MASK) {
eaa728ee 107 limit = (limit << 12) | 0xfff;
20054ef0 108 }
eaa728ee
FB
109 return limit;
110}
111
112static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
113{
20054ef0 114 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
115}
116
20054ef0
BS
117static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
118 uint32_t e2)
eaa728ee
FB
119{
120 sc->base = get_seg_base(e1, e2);
121 sc->limit = get_seg_limit(e1, e2);
122 sc->flags = e2;
123}
124
125/* init the segment cache in vm86 mode. */
2999a0b2 126static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
127{
128 selector &= 0xffff;
b98dbc90
PB
129
130 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
131 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
132 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
133}
134
2999a0b2 135static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
100ec099
PD
136 uint32_t *esp_ptr, int dpl,
137 uintptr_t retaddr)
eaa728ee 138{
a47dddd7 139 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
140 int type, index, shift;
141
142#if 0
143 {
144 int i;
145 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 146 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 147 printf("%02x ", env->tr.base[i]);
20054ef0
BS
148 if ((i & 7) == 7) {
149 printf("\n");
150 }
eaa728ee
FB
151 }
152 printf("\n");
153 }
154#endif
155
20054ef0 156 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 157 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 158 }
eaa728ee 159 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 160 if ((type & 7) != 1) {
a47dddd7 161 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 162 }
eaa728ee
FB
163 shift = type >> 3;
164 index = (dpl * 4 + 2) << shift;
20054ef0 165 if (index + (4 << shift) - 1 > env->tr.limit) {
100ec099 166 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
20054ef0 167 }
eaa728ee 168 if (shift == 0) {
100ec099
PD
169 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
170 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
eaa728ee 171 } else {
100ec099
PD
172 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
173 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
eaa728ee
FB
174 }
175}
176
100ec099
PD
177static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
178 uintptr_t retaddr)
eaa728ee
FB
179{
180 uint32_t e1, e2;
d3b54918 181 int rpl, dpl;
eaa728ee
FB
182
183 if ((selector & 0xfffc) != 0) {
100ec099
PD
184 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
185 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
186 }
187 if (!(e2 & DESC_S_MASK)) {
100ec099 188 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 189 }
eaa728ee
FB
190 rpl = selector & 3;
191 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 192 if (seg_reg == R_CS) {
20054ef0 193 if (!(e2 & DESC_CS_MASK)) {
100ec099 194 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 195 }
20054ef0 196 if (dpl != rpl) {
100ec099 197 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 198 }
eaa728ee
FB
199 } else if (seg_reg == R_SS) {
200 /* SS must be writable data */
20054ef0 201 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 202 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
203 }
204 if (dpl != cpl || dpl != rpl) {
100ec099 205 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 206 }
eaa728ee
FB
207 } else {
208 /* not readable code */
20054ef0 209 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
100ec099 210 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 211 }
eaa728ee
FB
212 /* if data or non conforming code, checks the rights */
213 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 214 if (dpl < cpl || dpl < rpl) {
100ec099 215 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 216 }
eaa728ee
FB
217 }
218 }
20054ef0 219 if (!(e2 & DESC_P_MASK)) {
100ec099 220 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
20054ef0 221 }
eaa728ee 222 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
223 get_seg_base(e1, e2),
224 get_seg_limit(e1, e2),
225 e2);
eaa728ee 226 } else {
20054ef0 227 if (seg_reg == R_SS || seg_reg == R_CS) {
100ec099 228 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 229 }
eaa728ee
FB
230 }
231}
232
233#define SWITCH_TSS_JMP 0
234#define SWITCH_TSS_IRET 1
235#define SWITCH_TSS_CALL 2
236
237/* XXX: restore CPU state in registers (PowerPC case) */
100ec099
PD
238static void switch_tss_ra(CPUX86State *env, int tss_selector,
239 uint32_t e1, uint32_t e2, int source,
240 uint32_t next_eip, uintptr_t retaddr)
eaa728ee
FB
241{
242 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
243 target_ulong tss_base;
244 uint32_t new_regs[8], new_segs[6];
245 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
246 uint32_t old_eflags, eflags_mask;
247 SegmentCache *dt;
248 int index;
249 target_ulong ptr;
250
251 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
252 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
253 source);
eaa728ee
FB
254
255 /* if task gate, we read the TSS segment and we load it */
256 if (type == 5) {
20054ef0 257 if (!(e2 & DESC_P_MASK)) {
100ec099 258 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 259 }
eaa728ee 260 tss_selector = e1 >> 16;
20054ef0 261 if (tss_selector & 4) {
100ec099 262 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 263 }
100ec099
PD
264 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
265 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0
BS
266 }
267 if (e2 & DESC_S_MASK) {
100ec099 268 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 269 }
eaa728ee 270 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 271 if ((type & 7) != 1) {
100ec099 272 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 273 }
eaa728ee
FB
274 }
275
20054ef0 276 if (!(e2 & DESC_P_MASK)) {
100ec099 277 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 278 }
eaa728ee 279
20054ef0 280 if (type & 8) {
eaa728ee 281 tss_limit_max = 103;
20054ef0 282 } else {
eaa728ee 283 tss_limit_max = 43;
20054ef0 284 }
eaa728ee
FB
285 tss_limit = get_seg_limit(e1, e2);
286 tss_base = get_seg_base(e1, e2);
287 if ((tss_selector & 4) != 0 ||
20054ef0 288 tss_limit < tss_limit_max) {
100ec099 289 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 290 }
eaa728ee 291 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 292 if (old_type & 8) {
eaa728ee 293 old_tss_limit_max = 103;
20054ef0 294 } else {
eaa728ee 295 old_tss_limit_max = 43;
20054ef0 296 }
eaa728ee
FB
297
298 /* read all the registers from the new TSS */
299 if (type & 8) {
300 /* 32 bit */
100ec099
PD
301 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
302 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
303 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
20054ef0 304 for (i = 0; i < 8; i++) {
100ec099
PD
305 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
306 retaddr);
20054ef0
BS
307 }
308 for (i = 0; i < 6; i++) {
100ec099
PD
309 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
310 retaddr);
20054ef0 311 }
100ec099
PD
312 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
313 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
eaa728ee
FB
314 } else {
315 /* 16 bit */
316 new_cr3 = 0;
100ec099
PD
317 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
318 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
20054ef0 319 for (i = 0; i < 8; i++) {
100ec099
PD
320 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
321 retaddr) | 0xffff0000;
20054ef0
BS
322 }
323 for (i = 0; i < 4; i++) {
100ec099
PD
324 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
325 retaddr);
20054ef0 326 }
100ec099 327 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
eaa728ee
FB
328 new_segs[R_FS] = 0;
329 new_segs[R_GS] = 0;
330 new_trap = 0;
331 }
4581cbcd
BS
332 /* XXX: avoid a compiler warning, see
333 http://support.amd.com/us/Processor_TechDocs/24593.pdf
334 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
335 (void)new_trap;
eaa728ee
FB
336
337 /* NOTE: we must avoid memory exceptions during the task switch,
338 so we make dummy accesses before */
339 /* XXX: it can still fail in some cases, so a bigger hack is
340 necessary to valid the TLB after having done the accesses */
341
100ec099
PD
342 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
343 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
344 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
eaa728ee
FB
346
347 /* clear busy bit (it is restartable) */
348 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
349 target_ulong ptr;
350 uint32_t e2;
20054ef0 351
eaa728ee 352 ptr = env->gdt.base + (env->tr.selector & ~7);
100ec099 353 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 354 e2 &= ~DESC_TSS_BUSY_MASK;
100ec099 355 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee 356 }
997ff0d9 357 old_eflags = cpu_compute_eflags(env);
20054ef0 358 if (source == SWITCH_TSS_IRET) {
eaa728ee 359 old_eflags &= ~NT_MASK;
20054ef0 360 }
eaa728ee
FB
361
362 /* save the current state in the old TSS */
363 if (type & 8) {
364 /* 32 bit */
100ec099
PD
365 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
20054ef0 375 for (i = 0; i < 6; i++) {
100ec099
PD
376 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
377 env->segs[i].selector, retaddr);
20054ef0 378 }
eaa728ee
FB
379 } else {
380 /* 16 bit */
100ec099
PD
381 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
20054ef0 391 for (i = 0; i < 4; i++) {
100ec099
PD
392 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
393 env->segs[i].selector, retaddr);
20054ef0 394 }
eaa728ee
FB
395 }
396
397 /* now if an exception occurs, it will occurs in the next task
398 context */
399
400 if (source == SWITCH_TSS_CALL) {
100ec099 401 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
eaa728ee
FB
402 new_eflags |= NT_MASK;
403 }
404
405 /* set busy bit */
406 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
407 target_ulong ptr;
408 uint32_t e2;
20054ef0 409
eaa728ee 410 ptr = env->gdt.base + (tss_selector & ~7);
100ec099 411 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 412 e2 |= DESC_TSS_BUSY_MASK;
100ec099 413 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee
FB
414 }
415
416 /* set the new CPU state */
417 /* from this point, any exception which occurs can give problems */
418 env->cr[0] |= CR0_TS_MASK;
419 env->hflags |= HF_TS_MASK;
420 env->tr.selector = tss_selector;
421 env->tr.base = tss_base;
422 env->tr.limit = tss_limit;
423 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
424
425 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
426 cpu_x86_update_cr3(env, new_cr3);
427 }
428
429 /* load all registers without an exception, then reload them with
430 possible exception */
431 env->eip = new_eip;
432 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
433 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 434 if (!(type & 8)) {
eaa728ee 435 eflags_mask &= 0xffff;
20054ef0 436 }
997ff0d9 437 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 438 /* XXX: what to do in 16 bit case? */
4b34e3ad 439 env->regs[R_EAX] = new_regs[0];
a4165610 440 env->regs[R_ECX] = new_regs[1];
00f5e6f2 441 env->regs[R_EDX] = new_regs[2];
70b51365 442 env->regs[R_EBX] = new_regs[3];
08b3ded6 443 env->regs[R_ESP] = new_regs[4];
c12dddd7 444 env->regs[R_EBP] = new_regs[5];
78c3c6d3 445 env->regs[R_ESI] = new_regs[6];
cf75c597 446 env->regs[R_EDI] = new_regs[7];
eaa728ee 447 if (new_eflags & VM_MASK) {
20054ef0 448 for (i = 0; i < 6; i++) {
2999a0b2 449 load_seg_vm(env, i, new_segs[i]);
20054ef0 450 }
eaa728ee 451 } else {
eaa728ee 452 /* first just selectors as the rest may trigger exceptions */
20054ef0 453 for (i = 0; i < 6; i++) {
eaa728ee 454 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 455 }
eaa728ee
FB
456 }
457
458 env->ldt.selector = new_ldt & ~4;
459 env->ldt.base = 0;
460 env->ldt.limit = 0;
461 env->ldt.flags = 0;
462
463 /* load the LDT */
20054ef0 464 if (new_ldt & 4) {
100ec099 465 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 466 }
eaa728ee
FB
467
468 if ((new_ldt & 0xfffc) != 0) {
469 dt = &env->gdt;
470 index = new_ldt & ~7;
20054ef0 471 if ((index + 7) > dt->limit) {
100ec099 472 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 473 }
eaa728ee 474 ptr = dt->base + index;
100ec099
PD
475 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
476 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
20054ef0 477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 478 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0
BS
479 }
480 if (!(e2 & DESC_P_MASK)) {
100ec099 481 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 482 }
eaa728ee
FB
483 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 }
485
486 /* load the segments */
487 if (!(new_eflags & VM_MASK)) {
d3b54918 488 int cpl = new_segs[R_CS] & 3;
100ec099
PD
489 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
490 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
491 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
492 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
493 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
494 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
eaa728ee
FB
495 }
496
a78d0eab 497 /* check that env->eip is in the CS segment limits */
eaa728ee 498 if (new_eip > env->segs[R_CS].limit) {
20054ef0 499 /* XXX: different exception if CALL? */
100ec099 500 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee 501 }
01df040b
AL
502
503#ifndef CONFIG_USER_ONLY
504 /* reset local breakpoints */
428065ce 505 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
93d00d0f 506 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
01df040b
AL
507 }
508#endif
eaa728ee
FB
509}
510
100ec099
PD
511static void switch_tss(CPUX86State *env, int tss_selector,
512 uint32_t e1, uint32_t e2, int source,
513 uint32_t next_eip)
514{
515 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
516}
517
eaa728ee
FB
518static inline unsigned int get_sp_mask(unsigned int e2)
519{
20054ef0 520 if (e2 & DESC_B_MASK) {
eaa728ee 521 return 0xffffffff;
20054ef0 522 } else {
eaa728ee 523 return 0xffff;
20054ef0 524 }
eaa728ee
FB
525}
526
20054ef0 527static int exception_has_error_code(int intno)
2ed51f5b 528{
20054ef0
BS
529 switch (intno) {
530 case 8:
531 case 10:
532 case 11:
533 case 12:
534 case 13:
535 case 14:
536 case 17:
537 return 1;
538 }
539 return 0;
2ed51f5b
AL
540}
541
eaa728ee 542#ifdef TARGET_X86_64
08b3ded6
LG
543#define SET_ESP(val, sp_mask) \
544 do { \
545 if ((sp_mask) == 0xffff) { \
546 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
547 ((val) & 0xffff); \
548 } else if ((sp_mask) == 0xffffffffLL) { \
549 env->regs[R_ESP] = (uint32_t)(val); \
550 } else { \
551 env->regs[R_ESP] = (val); \
552 } \
20054ef0 553 } while (0)
eaa728ee 554#else
08b3ded6
LG
555#define SET_ESP(val, sp_mask) \
556 do { \
557 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
558 ((val) & (sp_mask)); \
20054ef0 559 } while (0)
eaa728ee
FB
560#endif
561
c0a04f0e
AL
562/* in 64-bit machines, this can overflow. So this segment addition macro
563 * can be used to trim the value to 32-bit whenever needed */
564#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
565
eaa728ee 566/* XXX: add a is_user flag to have proper security support */
100ec099 567#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
329e607d
BS
568 { \
569 sp -= 2; \
100ec099 570 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
20054ef0 571 }
eaa728ee 572
100ec099 573#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
20054ef0
BS
574 { \
575 sp -= 4; \
100ec099 576 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
20054ef0 577 }
eaa728ee 578
100ec099 579#define POPW_RA(ssp, sp, sp_mask, val, ra) \
329e607d 580 { \
100ec099 581 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
329e607d 582 sp += 2; \
20054ef0 583 }
eaa728ee 584
100ec099 585#define POPL_RA(ssp, sp, sp_mask, val, ra) \
329e607d 586 { \
100ec099 587 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
329e607d 588 sp += 4; \
20054ef0 589 }
eaa728ee 590
100ec099
PD
591#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
592#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
593#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
594#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
595
eaa728ee 596/* protected mode interrupt */
2999a0b2
BS
597static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
598 int error_code, unsigned int next_eip,
599 int is_hw)
eaa728ee
FB
600{
601 SegmentCache *dt;
602 target_ulong ptr, ssp;
603 int type, dpl, selector, ss_dpl, cpl;
604 int has_error_code, new_stack, shift;
1c918eba 605 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 606 uint32_t old_eip, sp_mask;
87446327 607 int vm86 = env->eflags & VM_MASK;
eaa728ee 608
eaa728ee 609 has_error_code = 0;
20054ef0
BS
610 if (!is_int && !is_hw) {
611 has_error_code = exception_has_error_code(intno);
612 }
613 if (is_int) {
eaa728ee 614 old_eip = next_eip;
20054ef0 615 } else {
eaa728ee 616 old_eip = env->eip;
20054ef0 617 }
eaa728ee
FB
618
619 dt = &env->idt;
20054ef0 620 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 621 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 622 }
eaa728ee 623 ptr = dt->base + intno * 8;
329e607d
BS
624 e1 = cpu_ldl_kernel(env, ptr);
625 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
626 /* check gate type */
627 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 628 switch (type) {
eaa728ee
FB
629 case 5: /* task gate */
630 /* must do that check here to return the correct error code */
20054ef0 631 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 632 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 633 }
2999a0b2 634 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
635 if (has_error_code) {
636 int type;
637 uint32_t mask;
20054ef0 638
eaa728ee
FB
639 /* push the error code */
640 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
641 shift = type >> 3;
20054ef0 642 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 643 mask = 0xffffffff;
20054ef0 644 } else {
eaa728ee 645 mask = 0xffff;
20054ef0 646 }
08b3ded6 647 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 648 ssp = env->segs[R_SS].base + esp;
20054ef0 649 if (shift) {
329e607d 650 cpu_stl_kernel(env, ssp, error_code);
20054ef0 651 } else {
329e607d 652 cpu_stw_kernel(env, ssp, error_code);
20054ef0 653 }
eaa728ee
FB
654 SET_ESP(esp, mask);
655 }
656 return;
657 case 6: /* 286 interrupt gate */
658 case 7: /* 286 trap gate */
659 case 14: /* 386 interrupt gate */
660 case 15: /* 386 trap gate */
661 break;
662 default:
77b2bc2c 663 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
664 break;
665 }
666 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
667 cpl = env->hflags & HF_CPL_MASK;
1235fc06 668 /* check privilege if software int */
20054ef0 669 if (is_int && dpl < cpl) {
77b2bc2c 670 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 671 }
eaa728ee 672 /* check valid bit */
20054ef0 673 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 674 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 675 }
eaa728ee
FB
676 selector = e1 >> 16;
677 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 678 if ((selector & 0xfffc) == 0) {
77b2bc2c 679 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 680 }
2999a0b2 681 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 682 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
683 }
684 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 685 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 686 }
eaa728ee 687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 688 if (dpl > cpl) {
77b2bc2c 689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
690 }
691 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 692 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 693 }
eaa728ee
FB
694 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
695 /* to inner privilege */
100ec099 696 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
20054ef0 697 if ((ss & 0xfffc) == 0) {
77b2bc2c 698 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
699 }
700 if ((ss & 3) != dpl) {
77b2bc2c 701 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 702 }
2999a0b2 703 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 704 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 705 }
eaa728ee 706 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 707 if (ss_dpl != dpl) {
77b2bc2c 708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 709 }
eaa728ee
FB
710 if (!(ss_e2 & DESC_S_MASK) ||
711 (ss_e2 & DESC_CS_MASK) ||
20054ef0 712 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 713 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
714 }
715 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 716 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 717 }
eaa728ee
FB
718 new_stack = 1;
719 sp_mask = get_sp_mask(ss_e2);
720 ssp = get_seg_base(ss_e1, ss_e2);
721 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
722 /* to same privilege */
87446327 723 if (vm86) {
77b2bc2c 724 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 725 }
eaa728ee
FB
726 new_stack = 0;
727 sp_mask = get_sp_mask(env->segs[R_SS].flags);
728 ssp = env->segs[R_SS].base;
08b3ded6 729 esp = env->regs[R_ESP];
eaa728ee
FB
730 dpl = cpl;
731 } else {
77b2bc2c 732 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
733 new_stack = 0; /* avoid warning */
734 sp_mask = 0; /* avoid warning */
735 ssp = 0; /* avoid warning */
736 esp = 0; /* avoid warning */
737 }
738
739 shift = type >> 3;
740
741#if 0
742 /* XXX: check that enough room is available */
743 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 744 if (vm86) {
eaa728ee 745 push_size += 8;
20054ef0 746 }
eaa728ee
FB
747 push_size <<= shift;
748#endif
749 if (shift == 1) {
750 if (new_stack) {
87446327 751 if (vm86) {
eaa728ee
FB
752 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
753 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
756 }
757 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 758 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 759 }
997ff0d9 760 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
761 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
762 PUSHL(ssp, esp, sp_mask, old_eip);
763 if (has_error_code) {
764 PUSHL(ssp, esp, sp_mask, error_code);
765 }
766 } else {
767 if (new_stack) {
87446327 768 if (vm86) {
eaa728ee
FB
769 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
770 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
773 }
774 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 775 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 776 }
997ff0d9 777 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
778 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
779 PUSHW(ssp, esp, sp_mask, old_eip);
780 if (has_error_code) {
781 PUSHW(ssp, esp, sp_mask, error_code);
782 }
783 }
784
fd460606
KC
785 /* interrupt gate clear IF mask */
786 if ((type & 1) == 0) {
787 env->eflags &= ~IF_MASK;
788 }
789 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
790
eaa728ee 791 if (new_stack) {
87446327 792 if (vm86) {
eaa728ee
FB
793 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
797 }
798 ss = (ss & ~3) | dpl;
799 cpu_x86_load_seg_cache(env, R_SS, ss,
800 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
801 }
802 SET_ESP(esp, sp_mask);
803
804 selector = (selector & ~3) | dpl;
805 cpu_x86_load_seg_cache(env, R_CS, selector,
806 get_seg_base(e1, e2),
807 get_seg_limit(e1, e2),
808 e2);
eaa728ee 809 env->eip = offset;
eaa728ee
FB
810}
811
812#ifdef TARGET_X86_64
813
100ec099 814#define PUSHQ_RA(sp, val, ra) \
20054ef0
BS
815 { \
816 sp -= 8; \
100ec099 817 cpu_stq_kernel_ra(env, sp, (val), ra); \
20054ef0 818 }
eaa728ee 819
100ec099 820#define POPQ_RA(sp, val, ra) \
20054ef0 821 { \
100ec099 822 val = cpu_ldq_kernel_ra(env, sp, ra); \
20054ef0
BS
823 sp += 8; \
824 }
eaa728ee 825
100ec099
PD
826#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
827#define POPQ(sp, val) POPQ_RA(sp, val, 0)
828
2999a0b2 829static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 830{
a47dddd7 831 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
832 int index;
833
834#if 0
835 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
836 env->tr.base, env->tr.limit);
837#endif
838
20054ef0 839 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 840 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 841 }
eaa728ee 842 index = 8 * level + 4;
20054ef0 843 if ((index + 7) > env->tr.limit) {
77b2bc2c 844 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 845 }
329e607d 846 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
847}
848
849/* 64 bit interrupt */
2999a0b2
BS
850static void do_interrupt64(CPUX86State *env, int intno, int is_int,
851 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
852{
853 SegmentCache *dt;
854 target_ulong ptr;
855 int type, dpl, selector, cpl, ist;
856 int has_error_code, new_stack;
857 uint32_t e1, e2, e3, ss;
858 target_ulong old_eip, esp, offset;
eaa728ee 859
eaa728ee 860 has_error_code = 0;
20054ef0
BS
861 if (!is_int && !is_hw) {
862 has_error_code = exception_has_error_code(intno);
863 }
864 if (is_int) {
eaa728ee 865 old_eip = next_eip;
20054ef0 866 } else {
eaa728ee 867 old_eip = env->eip;
20054ef0 868 }
eaa728ee
FB
869
870 dt = &env->idt;
20054ef0 871 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 872 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 873 }
eaa728ee 874 ptr = dt->base + intno * 16;
329e607d
BS
875 e1 = cpu_ldl_kernel(env, ptr);
876 e2 = cpu_ldl_kernel(env, ptr + 4);
877 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
878 /* check gate type */
879 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 880 switch (type) {
eaa728ee
FB
881 case 14: /* 386 interrupt gate */
882 case 15: /* 386 trap gate */
883 break;
884 default:
77b2bc2c 885 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
886 break;
887 }
888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
889 cpl = env->hflags & HF_CPL_MASK;
1235fc06 890 /* check privilege if software int */
20054ef0 891 if (is_int && dpl < cpl) {
77b2bc2c 892 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 893 }
eaa728ee 894 /* check valid bit */
20054ef0 895 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 896 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 897 }
eaa728ee
FB
898 selector = e1 >> 16;
899 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
900 ist = e2 & 7;
20054ef0 901 if ((selector & 0xfffc) == 0) {
77b2bc2c 902 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 903 }
eaa728ee 904
2999a0b2 905 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 906 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
907 }
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 909 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 910 }
eaa728ee 911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 912 if (dpl > cpl) {
77b2bc2c 913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
914 }
915 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 916 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
917 }
918 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 919 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 920 }
eaa728ee
FB
921 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
922 /* to inner privilege */
eaa728ee 923 new_stack = 1;
ae67dc72
PB
924 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
925 ss = 0;
eaa728ee
FB
926 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927 /* to same privilege */
20054ef0 928 if (env->eflags & VM_MASK) {
77b2bc2c 929 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 930 }
eaa728ee 931 new_stack = 0;
ae67dc72 932 esp = env->regs[R_ESP];
eaa728ee
FB
933 dpl = cpl;
934 } else {
77b2bc2c 935 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
936 new_stack = 0; /* avoid warning */
937 esp = 0; /* avoid warning */
938 }
ae67dc72 939 esp &= ~0xfLL; /* align stack */
eaa728ee
FB
940
941 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 942 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 943 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
944 PUSHQ(esp, env->segs[R_CS].selector);
945 PUSHQ(esp, old_eip);
946 if (has_error_code) {
947 PUSHQ(esp, error_code);
948 }
949
fd460606
KC
950 /* interrupt gate clear IF mask */
951 if ((type & 1) == 0) {
952 env->eflags &= ~IF_MASK;
953 }
954 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
955
eaa728ee
FB
956 if (new_stack) {
957 ss = 0 | dpl;
958 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
959 }
08b3ded6 960 env->regs[R_ESP] = esp;
eaa728ee
FB
961
962 selector = (selector & ~3) | dpl;
963 cpu_x86_load_seg_cache(env, R_CS, selector,
964 get_seg_base(e1, e2),
965 get_seg_limit(e1, e2),
966 e2);
eaa728ee 967 env->eip = offset;
eaa728ee
FB
968}
969#endif
970
d9957a8b 971#ifdef TARGET_X86_64
eaa728ee 972#if defined(CONFIG_USER_ONLY)
2999a0b2 973void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 974{
27103424
AF
975 CPUState *cs = CPU(x86_env_get_cpu(env));
976
977 cs->exception_index = EXCP_SYSCALL;
eaa728ee 978 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 979 cpu_loop_exit(cs);
eaa728ee
FB
980}
981#else
2999a0b2 982void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
983{
984 int selector;
985
986 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 987 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
988 }
989 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
990 if (env->hflags & HF_LMA_MASK) {
991 int code64;
992
a4165610 993 env->regs[R_ECX] = env->eip + next_eip_addend;
997ff0d9 994 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
995
996 code64 = env->hflags & HF_CS64_MASK;
997
fd460606
KC
998 env->eflags &= ~env->fmask;
999 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001 0, 0xffffffff,
1002 DESC_G_MASK | DESC_P_MASK |
1003 DESC_S_MASK |
20054ef0
BS
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1005 DESC_L_MASK);
eaa728ee
FB
1006 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007 0, 0xffffffff,
1008 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009 DESC_S_MASK |
1010 DESC_W_MASK | DESC_A_MASK);
20054ef0 1011 if (code64) {
eaa728ee 1012 env->eip = env->lstar;
20054ef0 1013 } else {
eaa728ee 1014 env->eip = env->cstar;
20054ef0 1015 }
d9957a8b 1016 } else {
a4165610 1017 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 1018
fd460606 1019 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
1020 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1021 0, 0xffffffff,
1022 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023 DESC_S_MASK |
1024 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1025 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1026 0, 0xffffffff,
1027 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028 DESC_S_MASK |
1029 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1030 env->eip = (uint32_t)env->star;
1031 }
1032}
1033#endif
d9957a8b 1034#endif
eaa728ee 1035
d9957a8b 1036#ifdef TARGET_X86_64
2999a0b2 1037void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1038{
1039 int cpl, selector;
1040
1041 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 1042 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
1043 }
1044 cpl = env->hflags & HF_CPL_MASK;
1045 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
100ec099 1046 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
1047 }
1048 selector = (env->star >> 48) & 0xffff;
eaa728ee 1049 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1050 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1051 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1052 NT_MASK);
eaa728ee
FB
1053 if (dflag == 2) {
1054 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1059 DESC_L_MASK);
a4165610 1060 env->eip = env->regs[R_ECX];
eaa728ee
FB
1061 } else {
1062 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1067 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee 1068 }
ac576229 1069 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1070 0, 0xffffffff,
1071 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1074 } else {
fd460606 1075 env->eflags |= IF_MASK;
eaa728ee
FB
1076 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1077 0, 0xffffffff,
1078 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1079 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1081 env->eip = (uint32_t)env->regs[R_ECX];
ac576229 1082 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1087 }
eaa728ee 1088}
d9957a8b 1089#endif
eaa728ee
FB
1090
1091/* real mode interrupt */
2999a0b2
BS
1092static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1093 int error_code, unsigned int next_eip)
eaa728ee
FB
1094{
1095 SegmentCache *dt;
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eip;
eaa728ee 1100
20054ef0 1101 /* real mode (simpler!) */
eaa728ee 1102 dt = &env->idt;
20054ef0 1103 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1104 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1105 }
eaa728ee 1106 ptr = dt->base + intno * 4;
329e607d
BS
1107 offset = cpu_lduw_kernel(env, ptr);
1108 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1109 esp = env->regs[R_ESP];
eaa728ee 1110 ssp = env->segs[R_SS].base;
20054ef0 1111 if (is_int) {
eaa728ee 1112 old_eip = next_eip;
20054ef0 1113 } else {
eaa728ee 1114 old_eip = env->eip;
20054ef0 1115 }
eaa728ee 1116 old_cs = env->segs[R_CS].selector;
20054ef0 1117 /* XXX: use SS segment size? */
997ff0d9 1118 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1119 PUSHW(ssp, esp, 0xffff, old_cs);
1120 PUSHW(ssp, esp, 0xffff, old_eip);
1121
1122 /* update processor state */
08b3ded6 1123 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1124 env->eip = offset;
1125 env->segs[R_CS].selector = selector;
1126 env->segs[R_CS].base = (selector << 4);
1127 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1128}
1129
e694d4e2 1130#if defined(CONFIG_USER_ONLY)
eaa728ee 1131/* fake user mode interrupt */
2999a0b2
BS
1132static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1133 int error_code, target_ulong next_eip)
eaa728ee
FB
1134{
1135 SegmentCache *dt;
1136 target_ulong ptr;
1137 int dpl, cpl, shift;
1138 uint32_t e2;
1139
1140 dt = &env->idt;
1141 if (env->hflags & HF_LMA_MASK) {
1142 shift = 4;
1143 } else {
1144 shift = 3;
1145 }
1146 ptr = dt->base + (intno << shift);
329e607d 1147 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
1148
1149 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1150 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1151 /* check privilege if software int */
20054ef0 1152 if (is_int && dpl < cpl) {
77b2bc2c 1153 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
20054ef0 1154 }
eaa728ee
FB
1155
1156 /* Since we emulate only user space, we cannot do more than
1157 exiting the emulation with the suitable exception and error
47575997
JM
1158 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1159 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1160 env->eip = next_eip;
20054ef0 1161 }
eaa728ee
FB
1162}
1163
e694d4e2
BS
1164#else
1165
2999a0b2
BS
1166static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1167 int error_code, int is_hw, int rm)
2ed51f5b 1168{
19d6ca16 1169 CPUState *cs = CPU(x86_env_get_cpu(env));
b216aa6c 1170 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1171 control.event_inj));
1172
2ed51f5b 1173 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1174 int type;
1175
1176 if (is_int) {
1177 type = SVM_EVTINJ_TYPE_SOFT;
1178 } else {
1179 type = SVM_EVTINJ_TYPE_EXEPT;
1180 }
1181 event_inj = intno | type | SVM_EVTINJ_VALID;
1182 if (!rm && exception_has_error_code(intno)) {
1183 event_inj |= SVM_EVTINJ_VALID_ERR;
b216aa6c 1184 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1185 control.event_inj_err),
1186 error_code);
1187 }
b216aa6c 1188 x86_stl_phys(cs,
ab1da857 1189 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1190 event_inj);
2ed51f5b
AL
1191 }
1192}
00ea18d1 1193#endif
2ed51f5b 1194
eaa728ee
FB
1195/*
1196 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1197 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1198 * instruction. It is only relevant if is_int is TRUE.
1199 */
ca4c810a 1200static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1201 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1202{
ca4c810a
AF
1203 CPUX86State *env = &cpu->env;
1204
8fec2b8c 1205 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1206 if ((env->cr[0] & CR0_PE_MASK)) {
1207 static int count;
20054ef0
BS
1208
1209 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1210 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1211 count, intno, error_code, is_int,
1212 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1213 env->segs[R_CS].selector, env->eip,
1214 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1215 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1216 if (intno == 0x0e) {
93fcfe39 1217 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1218 } else {
4b34e3ad 1219 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1220 }
93fcfe39 1221 qemu_log("\n");
a0762859 1222 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1223#if 0
1224 {
1225 int i;
9bd5494e 1226 target_ulong ptr;
20054ef0 1227
93fcfe39 1228 qemu_log(" code=");
eaa728ee 1229 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1230 for (i = 0; i < 16; i++) {
93fcfe39 1231 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1232 }
93fcfe39 1233 qemu_log("\n");
eaa728ee
FB
1234 }
1235#endif
1236 count++;
1237 }
1238 }
1239 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1240#if !defined(CONFIG_USER_ONLY)
20054ef0 1241 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1242 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1243 }
00ea18d1 1244#endif
eb38c52c 1245#ifdef TARGET_X86_64
eaa728ee 1246 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1247 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1248 } else
1249#endif
1250 {
2999a0b2
BS
1251 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1252 is_hw);
eaa728ee
FB
1253 }
1254 } else {
00ea18d1 1255#if !defined(CONFIG_USER_ONLY)
20054ef0 1256 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1257 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1258 }
00ea18d1 1259#endif
2999a0b2 1260 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1261 }
2ed51f5b 1262
00ea18d1 1263#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1264 if (env->hflags & HF_SVMI_MASK) {
fdfba1a2 1265 CPUState *cs = CPU(cpu);
b216aa6c 1266 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
20054ef0
BS
1267 offsetof(struct vmcb,
1268 control.event_inj));
1269
b216aa6c 1270 x86_stl_phys(cs,
ab1da857 1271 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1272 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1273 }
00ea18d1 1274#endif
eaa728ee
FB
1275}
1276
97a8ea5a 1277void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1278{
97a8ea5a
AF
1279 X86CPU *cpu = X86_CPU(cs);
1280 CPUX86State *env = &cpu->env;
1281
e694d4e2
BS
1282#if defined(CONFIG_USER_ONLY)
1283 /* if user mode only, we simulate a fake exception
1284 which will be handled outside the cpu execution
1285 loop */
27103424 1286 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1287 env->exception_is_int,
1288 env->error_code,
1289 env->exception_next_eip);
1290 /* successfully delivered */
1291 env->old_exception = -1;
1292#else
1293 /* simulate a real cpu exception. On i386, it can
1294 trigger new exceptions, but we do not handle
1295 double or triple faults yet. */
27103424 1296 do_interrupt_all(cpu, cs->exception_index,
e694d4e2
BS
1297 env->exception_is_int,
1298 env->error_code,
1299 env->exception_next_eip, 0);
1300 /* successfully delivered */
1301 env->old_exception = -1;
1302#endif
e694d4e2
BS
1303}
1304
2999a0b2 1305void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1306{
ca4c810a 1307 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1308}
1309
42f53fea
RH
1310bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1311{
1312 X86CPU *cpu = X86_CPU(cs);
1313 CPUX86State *env = &cpu->env;
1314 bool ret = false;
1315
1316#if !defined(CONFIG_USER_ONLY)
1317 if (interrupt_request & CPU_INTERRUPT_POLL) {
1318 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1319 apic_poll_irq(cpu->apic_state);
a4fc3212
PD
1320 /* Don't process multiple interrupt requests in a single call.
1321 This is required to make icount-driven execution deterministic. */
1322 return true;
42f53fea
RH
1323 }
1324#endif
1325 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1326 do_cpu_sipi(cpu);
1327 } else if (env->hflags2 & HF2_GIF_MASK) {
1328 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1329 !(env->hflags & HF_SMM_MASK)) {
1330 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1331 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1332 do_smm_enter(cpu);
1333 ret = true;
1334 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1335 !(env->hflags2 & HF2_NMI_MASK)) {
1336 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1337 env->hflags2 |= HF2_NMI_MASK;
1338 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1339 ret = true;
1340 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1341 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1342 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1343 ret = true;
1344 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1345 (((env->hflags2 & HF2_VINTR_MASK) &&
1346 (env->hflags2 & HF2_HIF_MASK)) ||
1347 (!(env->hflags2 & HF2_VINTR_MASK) &&
1348 (env->eflags & IF_MASK &&
1349 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1350 int intno;
1351 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1352 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1353 CPU_INTERRUPT_VIRQ);
1354 intno = cpu_get_pic_interrupt(env);
1355 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1356 "Servicing hardware INT=0x%02x\n", intno);
1357 do_interrupt_x86_hardirq(env, intno, 1);
1358 /* ensure that no TB jump will be modified as
1359 the program flow was changed */
1360 ret = true;
1361#if !defined(CONFIG_USER_ONLY)
1362 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1363 (env->eflags & IF_MASK) &&
1364 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1365 int intno;
1366 /* FIXME: this should respect TPR */
1367 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
b216aa6c 1368 intno = x86_ldl_phys(cs, env->vm_vmcb
42f53fea
RH
1369 + offsetof(struct vmcb, control.int_vector));
1370 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1371 "Servicing virtual hardware INT=0x%02x\n", intno);
1372 do_interrupt_x86_hardirq(env, intno, 1);
1373 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1374 ret = true;
1375#endif
1376 }
1377 }
1378
1379 return ret;
1380}
1381
2999a0b2
BS
1382void helper_enter_level(CPUX86State *env, int level, int data32,
1383 target_ulong t1)
eaa728ee
FB
1384{
1385 target_ulong ssp;
1386 uint32_t esp_mask, esp, ebp;
1387
1388 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1389 ssp = env->segs[R_SS].base;
c12dddd7 1390 ebp = env->regs[R_EBP];
08b3ded6 1391 esp = env->regs[R_ESP];
eaa728ee
FB
1392 if (data32) {
1393 /* 32 bit */
1394 esp -= 4;
1395 while (--level) {
1396 esp -= 4;
1397 ebp -= 4;
100ec099
PD
1398 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1399 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1400 GETPC()),
1401 GETPC());
eaa728ee
FB
1402 }
1403 esp -= 4;
100ec099 1404 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
eaa728ee
FB
1405 } else {
1406 /* 16 bit */
1407 esp -= 2;
1408 while (--level) {
1409 esp -= 2;
1410 ebp -= 2;
100ec099
PD
1411 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1412 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1413 GETPC()),
1414 GETPC());
eaa728ee
FB
1415 }
1416 esp -= 2;
100ec099 1417 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
eaa728ee
FB
1418 }
1419}
1420
1421#ifdef TARGET_X86_64
2999a0b2
BS
1422void helper_enter64_level(CPUX86State *env, int level, int data64,
1423 target_ulong t1)
eaa728ee
FB
1424{
1425 target_ulong esp, ebp;
20054ef0 1426
c12dddd7 1427 ebp = env->regs[R_EBP];
08b3ded6 1428 esp = env->regs[R_ESP];
eaa728ee
FB
1429
1430 if (data64) {
1431 /* 64 bit */
1432 esp -= 8;
1433 while (--level) {
1434 esp -= 8;
1435 ebp -= 8;
100ec099
PD
1436 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1437 GETPC());
eaa728ee
FB
1438 }
1439 esp -= 8;
100ec099 1440 cpu_stq_data_ra(env, esp, t1, GETPC());
eaa728ee
FB
1441 } else {
1442 /* 16 bit */
1443 esp -= 2;
1444 while (--level) {
1445 esp -= 2;
1446 ebp -= 2;
100ec099
PD
1447 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1448 GETPC());
eaa728ee
FB
1449 }
1450 esp -= 2;
100ec099 1451 cpu_stw_data_ra(env, esp, t1, GETPC());
eaa728ee
FB
1452 }
1453}
1454#endif
1455
2999a0b2 1456void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1457{
1458 SegmentCache *dt;
1459 uint32_t e1, e2;
1460 int index, entry_limit;
1461 target_ulong ptr;
1462
1463 selector &= 0xffff;
1464 if ((selector & 0xfffc) == 0) {
1465 /* XXX: NULL selector case: invalid LDT */
1466 env->ldt.base = 0;
1467 env->ldt.limit = 0;
1468 } else {
20054ef0 1469 if (selector & 0x4) {
100ec099 1470 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1471 }
eaa728ee
FB
1472 dt = &env->gdt;
1473 index = selector & ~7;
1474#ifdef TARGET_X86_64
20054ef0 1475 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1476 entry_limit = 15;
20054ef0 1477 } else
eaa728ee 1478#endif
20054ef0 1479 {
eaa728ee 1480 entry_limit = 7;
20054ef0
BS
1481 }
1482 if ((index + entry_limit) > dt->limit) {
100ec099 1483 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1484 }
eaa728ee 1485 ptr = dt->base + index;
100ec099
PD
1486 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1487 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
20054ef0 1488 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 1489 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1490 }
1491 if (!(e2 & DESC_P_MASK)) {
100ec099 1492 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1493 }
eaa728ee
FB
1494#ifdef TARGET_X86_64
1495 if (env->hflags & HF_LMA_MASK) {
1496 uint32_t e3;
20054ef0 1497
100ec099 1498 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
eaa728ee
FB
1499 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1500 env->ldt.base |= (target_ulong)e3 << 32;
1501 } else
1502#endif
1503 {
1504 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1505 }
1506 }
1507 env->ldt.selector = selector;
1508}
1509
2999a0b2 1510void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1511{
1512 SegmentCache *dt;
1513 uint32_t e1, e2;
1514 int index, type, entry_limit;
1515 target_ulong ptr;
1516
1517 selector &= 0xffff;
1518 if ((selector & 0xfffc) == 0) {
1519 /* NULL selector case: invalid TR */
1520 env->tr.base = 0;
1521 env->tr.limit = 0;
1522 env->tr.flags = 0;
1523 } else {
20054ef0 1524 if (selector & 0x4) {
100ec099 1525 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1526 }
eaa728ee
FB
1527 dt = &env->gdt;
1528 index = selector & ~7;
1529#ifdef TARGET_X86_64
20054ef0 1530 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1531 entry_limit = 15;
20054ef0 1532 } else
eaa728ee 1533#endif
20054ef0 1534 {
eaa728ee 1535 entry_limit = 7;
20054ef0
BS
1536 }
1537 if ((index + entry_limit) > dt->limit) {
100ec099 1538 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1539 }
eaa728ee 1540 ptr = dt->base + index;
100ec099
PD
1541 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1542 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee
FB
1543 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1544 if ((e2 & DESC_S_MASK) ||
20054ef0 1545 (type != 1 && type != 9)) {
100ec099 1546 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1547 }
1548 if (!(e2 & DESC_P_MASK)) {
100ec099 1549 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1550 }
eaa728ee
FB
1551#ifdef TARGET_X86_64
1552 if (env->hflags & HF_LMA_MASK) {
1553 uint32_t e3, e4;
20054ef0 1554
100ec099
PD
1555 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1556 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
20054ef0 1557 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
100ec099 1558 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1559 }
eaa728ee
FB
1560 load_seg_cache_raw_dt(&env->tr, e1, e2);
1561 env->tr.base |= (target_ulong)e3 << 32;
1562 } else
1563#endif
1564 {
1565 load_seg_cache_raw_dt(&env->tr, e1, e2);
1566 }
1567 e2 |= DESC_TSS_BUSY_MASK;
100ec099 1568 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1569 }
1570 env->tr.selector = selector;
1571}
1572
1573/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1574void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1575{
1576 uint32_t e1, e2;
1577 int cpl, dpl, rpl;
1578 SegmentCache *dt;
1579 int index;
1580 target_ulong ptr;
1581
1582 selector &= 0xffff;
1583 cpl = env->hflags & HF_CPL_MASK;
1584 if ((selector & 0xfffc) == 0) {
1585 /* null selector case */
1586 if (seg_reg == R_SS
1587#ifdef TARGET_X86_64
1588 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1589#endif
20054ef0 1590 ) {
100ec099 1591 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1592 }
eaa728ee
FB
1593 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1594 } else {
1595
20054ef0 1596 if (selector & 0x4) {
eaa728ee 1597 dt = &env->ldt;
20054ef0 1598 } else {
eaa728ee 1599 dt = &env->gdt;
20054ef0 1600 }
eaa728ee 1601 index = selector & ~7;
20054ef0 1602 if ((index + 7) > dt->limit) {
100ec099 1603 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1604 }
eaa728ee 1605 ptr = dt->base + index;
100ec099
PD
1606 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1607 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee 1608
20054ef0 1609 if (!(e2 & DESC_S_MASK)) {
100ec099 1610 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1611 }
eaa728ee
FB
1612 rpl = selector & 3;
1613 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1614 if (seg_reg == R_SS) {
1615 /* must be writable segment */
20054ef0 1616 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 1617 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1618 }
1619 if (rpl != cpl || dpl != cpl) {
100ec099 1620 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1621 }
eaa728ee
FB
1622 } else {
1623 /* must be readable segment */
20054ef0 1624 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
100ec099 1625 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1626 }
eaa728ee
FB
1627
1628 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1629 /* if not conforming code, test rights */
20054ef0 1630 if (dpl < cpl || dpl < rpl) {
100ec099 1631 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1632 }
eaa728ee
FB
1633 }
1634 }
1635
1636 if (!(e2 & DESC_P_MASK)) {
20054ef0 1637 if (seg_reg == R_SS) {
100ec099 1638 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
20054ef0 1639 } else {
100ec099 1640 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1641 }
eaa728ee
FB
1642 }
1643
1644 /* set the access bit if not already set */
1645 if (!(e2 & DESC_A_MASK)) {
1646 e2 |= DESC_A_MASK;
100ec099 1647 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1648 }
1649
1650 cpu_x86_load_seg_cache(env, seg_reg, selector,
1651 get_seg_base(e1, e2),
1652 get_seg_limit(e1, e2),
1653 e2);
1654#if 0
93fcfe39 1655 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1656 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1657#endif
1658 }
1659}
1660
1661/* protected mode jump */
2999a0b2 1662void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1663 target_ulong next_eip)
eaa728ee
FB
1664{
1665 int gate_cs, type;
1666 uint32_t e1, e2, cpl, dpl, rpl, limit;
eaa728ee 1667
20054ef0 1668 if ((new_cs & 0xfffc) == 0) {
100ec099 1669 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1670 }
100ec099
PD
1671 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1672 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1673 }
eaa728ee
FB
1674 cpl = env->hflags & HF_CPL_MASK;
1675 if (e2 & DESC_S_MASK) {
20054ef0 1676 if (!(e2 & DESC_CS_MASK)) {
100ec099 1677 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1678 }
eaa728ee
FB
1679 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1680 if (e2 & DESC_C_MASK) {
1681 /* conforming code segment */
20054ef0 1682 if (dpl > cpl) {
100ec099 1683 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1684 }
eaa728ee
FB
1685 } else {
1686 /* non conforming code segment */
1687 rpl = new_cs & 3;
20054ef0 1688 if (rpl > cpl) {
100ec099 1689 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1690 }
1691 if (dpl != cpl) {
100ec099 1692 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1693 }
eaa728ee 1694 }
20054ef0 1695 if (!(e2 & DESC_P_MASK)) {
100ec099 1696 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1697 }
eaa728ee
FB
1698 limit = get_seg_limit(e1, e2);
1699 if (new_eip > limit &&
20054ef0 1700 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
100ec099 1701 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1702 }
eaa728ee
FB
1703 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1704 get_seg_base(e1, e2), limit, e2);
a78d0eab 1705 env->eip = new_eip;
eaa728ee
FB
1706 } else {
1707 /* jump to call or task gate */
1708 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1709 rpl = new_cs & 3;
1710 cpl = env->hflags & HF_CPL_MASK;
1711 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 1712 switch (type) {
eaa728ee
FB
1713 case 1: /* 286 TSS */
1714 case 9: /* 386 TSS */
1715 case 5: /* task gate */
20054ef0 1716 if (dpl < cpl || dpl < rpl) {
100ec099 1717 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1718 }
100ec099 1719 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
eaa728ee
FB
1720 break;
1721 case 4: /* 286 call gate */
1722 case 12: /* 386 call gate */
20054ef0 1723 if ((dpl < cpl) || (dpl < rpl)) {
100ec099 1724 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1725 }
1726 if (!(e2 & DESC_P_MASK)) {
100ec099 1727 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1728 }
eaa728ee
FB
1729 gate_cs = e1 >> 16;
1730 new_eip = (e1 & 0xffff);
20054ef0 1731 if (type == 12) {
eaa728ee 1732 new_eip |= (e2 & 0xffff0000);
20054ef0 1733 }
100ec099
PD
1734 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1735 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1736 }
eaa728ee
FB
1737 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1738 /* must be code segment */
1739 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1740 (DESC_S_MASK | DESC_CS_MASK))) {
100ec099 1741 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1742 }
eaa728ee 1743 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1744 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
100ec099 1745 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0
BS
1746 }
1747 if (!(e2 & DESC_P_MASK)) {
100ec099 1748 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1749 }
eaa728ee 1750 limit = get_seg_limit(e1, e2);
20054ef0 1751 if (new_eip > limit) {
100ec099 1752 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1753 }
eaa728ee
FB
1754 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1755 get_seg_base(e1, e2), limit, e2);
a78d0eab 1756 env->eip = new_eip;
eaa728ee
FB
1757 break;
1758 default:
100ec099 1759 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1760 break;
1761 }
1762 }
1763}
1764
1765/* real mode call */
2999a0b2 1766void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1767 int shift, int next_eip)
1768{
1769 int new_eip;
1770 uint32_t esp, esp_mask;
1771 target_ulong ssp;
1772
1773 new_eip = new_eip1;
08b3ded6 1774 esp = env->regs[R_ESP];
eaa728ee
FB
1775 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1776 ssp = env->segs[R_SS].base;
1777 if (shift) {
100ec099
PD
1778 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1779 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee 1780 } else {
100ec099
PD
1781 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1782 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee
FB
1783 }
1784
1785 SET_ESP(esp, esp_mask);
1786 env->eip = new_eip;
1787 env->segs[R_CS].selector = new_cs;
1788 env->segs[R_CS].base = (new_cs << 4);
1789}
1790
1791/* protected mode call */
2999a0b2 1792void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1793 int shift, target_ulong next_eip)
eaa728ee
FB
1794{
1795 int new_stack, i;
1796 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 1797 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee 1798 uint32_t val, limit, old_sp_mask;
100ec099 1799 target_ulong ssp, old_ssp;
eaa728ee 1800
d12d51d5 1801 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
8995b7a0 1802 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 1803 if ((new_cs & 0xfffc) == 0) {
100ec099 1804 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1805 }
100ec099
PD
1806 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1807 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1808 }
eaa728ee 1809 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1810 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1811 if (e2 & DESC_S_MASK) {
20054ef0 1812 if (!(e2 & DESC_CS_MASK)) {
100ec099 1813 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1814 }
eaa728ee
FB
1815 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1816 if (e2 & DESC_C_MASK) {
1817 /* conforming code segment */
20054ef0 1818 if (dpl > cpl) {
100ec099 1819 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1820 }
eaa728ee
FB
1821 } else {
1822 /* non conforming code segment */
1823 rpl = new_cs & 3;
20054ef0 1824 if (rpl > cpl) {
100ec099 1825 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1826 }
1827 if (dpl != cpl) {
100ec099 1828 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1829 }
eaa728ee 1830 }
20054ef0 1831 if (!(e2 & DESC_P_MASK)) {
100ec099 1832 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1833 }
eaa728ee
FB
1834
1835#ifdef TARGET_X86_64
1836 /* XXX: check 16/32 bit cases in long mode */
1837 if (shift == 2) {
1838 target_ulong rsp;
20054ef0 1839
eaa728ee 1840 /* 64 bit case */
08b3ded6 1841 rsp = env->regs[R_ESP];
100ec099
PD
1842 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1843 PUSHQ_RA(rsp, next_eip, GETPC());
eaa728ee 1844 /* from this point, not restartable */
08b3ded6 1845 env->regs[R_ESP] = rsp;
eaa728ee
FB
1846 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1847 get_seg_base(e1, e2),
1848 get_seg_limit(e1, e2), e2);
a78d0eab 1849 env->eip = new_eip;
eaa728ee
FB
1850 } else
1851#endif
1852 {
08b3ded6 1853 sp = env->regs[R_ESP];
eaa728ee
FB
1854 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1855 ssp = env->segs[R_SS].base;
1856 if (shift) {
100ec099
PD
1857 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1858 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1859 } else {
100ec099
PD
1860 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1861 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1862 }
1863
1864 limit = get_seg_limit(e1, e2);
20054ef0 1865 if (new_eip > limit) {
100ec099 1866 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1867 }
eaa728ee
FB
1868 /* from this point, not restartable */
1869 SET_ESP(sp, sp_mask);
1870 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1871 get_seg_base(e1, e2), limit, e2);
a78d0eab 1872 env->eip = new_eip;
eaa728ee
FB
1873 }
1874 } else {
1875 /* check gate type */
1876 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1878 rpl = new_cs & 3;
20054ef0 1879 switch (type) {
eaa728ee
FB
1880 case 1: /* available 286 TSS */
1881 case 9: /* available 386 TSS */
1882 case 5: /* task gate */
20054ef0 1883 if (dpl < cpl || dpl < rpl) {
100ec099 1884 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1885 }
100ec099 1886 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
eaa728ee
FB
1887 return;
1888 case 4: /* 286 call gate */
1889 case 12: /* 386 call gate */
1890 break;
1891 default:
100ec099 1892 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1893 break;
1894 }
1895 shift = type >> 3;
1896
20054ef0 1897 if (dpl < cpl || dpl < rpl) {
100ec099 1898 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1899 }
eaa728ee 1900 /* check valid bit */
20054ef0 1901 if (!(e2 & DESC_P_MASK)) {
100ec099 1902 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1903 }
eaa728ee
FB
1904 selector = e1 >> 16;
1905 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1906 param_count = e2 & 0x1f;
20054ef0 1907 if ((selector & 0xfffc) == 0) {
100ec099 1908 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1909 }
eaa728ee 1910
100ec099
PD
1911 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1912 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1913 }
1914 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
100ec099 1915 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1916 }
eaa728ee 1917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1918 if (dpl > cpl) {
100ec099 1919 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1920 }
1921 if (!(e2 & DESC_P_MASK)) {
100ec099 1922 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1923 }
eaa728ee
FB
1924
1925 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1926 /* to inner privilege */
100ec099 1927 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
90a2541b
LG
1928 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1929 TARGET_FMT_lx "\n", ss, sp, param_count,
1930 env->regs[R_ESP]);
20054ef0 1931 if ((ss & 0xfffc) == 0) {
100ec099 1932 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1933 }
1934 if ((ss & 3) != dpl) {
100ec099 1935 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1936 }
100ec099
PD
1937 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1938 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1939 }
eaa728ee 1940 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1941 if (ss_dpl != dpl) {
100ec099 1942 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1943 }
eaa728ee
FB
1944 if (!(ss_e2 & DESC_S_MASK) ||
1945 (ss_e2 & DESC_CS_MASK) ||
20054ef0 1946 !(ss_e2 & DESC_W_MASK)) {
100ec099 1947 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1948 }
1949 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 1950 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1951 }
eaa728ee 1952
20054ef0 1953 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1954
1955 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1956 old_ssp = env->segs[R_SS].base;
1957
1958 sp_mask = get_sp_mask(ss_e2);
1959 ssp = get_seg_base(ss_e1, ss_e2);
1960 if (shift) {
100ec099
PD
1961 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1962 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1963 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1964 val = cpu_ldl_kernel_ra(env, old_ssp +
1965 ((env->regs[R_ESP] + i * 4) &
1966 old_sp_mask), GETPC());
1967 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1968 }
1969 } else {
100ec099
PD
1970 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1971 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1972 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1973 val = cpu_lduw_kernel_ra(env, old_ssp +
1974 ((env->regs[R_ESP] + i * 2) &
1975 old_sp_mask), GETPC());
1976 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1977 }
1978 }
1979 new_stack = 1;
1980 } else {
1981 /* to same privilege */
08b3ded6 1982 sp = env->regs[R_ESP];
eaa728ee
FB
1983 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1984 ssp = env->segs[R_SS].base;
20054ef0 1985 /* push_size = (4 << shift); */
eaa728ee
FB
1986 new_stack = 0;
1987 }
1988
1989 if (shift) {
100ec099
PD
1990 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1991 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1992 } else {
100ec099
PD
1993 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1994 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1995 }
1996
1997 /* from this point, not restartable */
1998
1999 if (new_stack) {
2000 ss = (ss & ~3) | dpl;
2001 cpu_x86_load_seg_cache(env, R_SS, ss,
2002 ssp,
2003 get_seg_limit(ss_e1, ss_e2),
2004 ss_e2);
2005 }
2006
2007 selector = (selector & ~3) | dpl;
2008 cpu_x86_load_seg_cache(env, R_CS, selector,
2009 get_seg_base(e1, e2),
2010 get_seg_limit(e1, e2),
2011 e2);
eaa728ee 2012 SET_ESP(sp, sp_mask);
a78d0eab 2013 env->eip = offset;
eaa728ee 2014 }
eaa728ee
FB
2015}
2016
2017/* real and vm86 mode iret */
2999a0b2 2018void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
2019{
2020 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2021 target_ulong ssp;
2022 int eflags_mask;
2023
20054ef0 2024 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 2025 sp = env->regs[R_ESP];
eaa728ee
FB
2026 ssp = env->segs[R_SS].base;
2027 if (shift == 1) {
2028 /* 32 bits */
100ec099
PD
2029 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2030 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
eaa728ee 2031 new_cs &= 0xffff;
100ec099 2032 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee
FB
2033 } else {
2034 /* 16 bits */
100ec099
PD
2035 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2036 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2037 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee 2038 }
08b3ded6 2039 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2040 env->segs[R_CS].selector = new_cs;
2041 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2042 env->eip = new_eip;
20054ef0
BS
2043 if (env->eflags & VM_MASK) {
2044 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2045 NT_MASK;
2046 } else {
2047 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2048 RF_MASK | NT_MASK;
2049 }
2050 if (shift == 0) {
eaa728ee 2051 eflags_mask &= 0xffff;
20054ef0 2052 }
997ff0d9 2053 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 2054 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2055}
2056
2999a0b2 2057static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
2058{
2059 int dpl;
2060 uint32_t e2;
2061
2062 /* XXX: on x86_64, we do not want to nullify FS and GS because
2063 they may still contain a valid base. I would be interested to
2064 know how a real x86_64 CPU behaves */
2065 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2066 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2067 return;
20054ef0 2068 }
eaa728ee
FB
2069
2070 e2 = env->segs[seg_reg].flags;
2071 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2072 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2073 /* data or non conforming code segment */
2074 if (dpl < cpl) {
2075 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2076 }
2077 }
2078}
2079
2080/* protected mode iret */
2999a0b2 2081static inline void helper_ret_protected(CPUX86State *env, int shift,
100ec099
PD
2082 int is_iret, int addend,
2083 uintptr_t retaddr)
eaa728ee
FB
2084{
2085 uint32_t new_cs, new_eflags, new_ss;
2086 uint32_t new_es, new_ds, new_fs, new_gs;
2087 uint32_t e1, e2, ss_e1, ss_e2;
2088 int cpl, dpl, rpl, eflags_mask, iopl;
2089 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2090
2091#ifdef TARGET_X86_64
20054ef0 2092 if (shift == 2) {
eaa728ee 2093 sp_mask = -1;
20054ef0 2094 } else
eaa728ee 2095#endif
20054ef0 2096 {
eaa728ee 2097 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2098 }
08b3ded6 2099 sp = env->regs[R_ESP];
eaa728ee
FB
2100 ssp = env->segs[R_SS].base;
2101 new_eflags = 0; /* avoid warning */
2102#ifdef TARGET_X86_64
2103 if (shift == 2) {
100ec099
PD
2104 POPQ_RA(sp, new_eip, retaddr);
2105 POPQ_RA(sp, new_cs, retaddr);
eaa728ee
FB
2106 new_cs &= 0xffff;
2107 if (is_iret) {
100ec099 2108 POPQ_RA(sp, new_eflags, retaddr);
eaa728ee
FB
2109 }
2110 } else
2111#endif
20054ef0
BS
2112 {
2113 if (shift == 1) {
2114 /* 32 bits */
100ec099
PD
2115 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2116 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0
BS
2117 new_cs &= 0xffff;
2118 if (is_iret) {
100ec099 2119 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0
BS
2120 if (new_eflags & VM_MASK) {
2121 goto return_to_vm86;
2122 }
2123 }
2124 } else {
2125 /* 16 bits */
100ec099
PD
2126 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2127 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0 2128 if (is_iret) {
100ec099 2129 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0 2130 }
eaa728ee 2131 }
eaa728ee 2132 }
d12d51d5
AL
2133 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2134 new_cs, new_eip, shift, addend);
8995b7a0 2135 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 2136 if ((new_cs & 0xfffc) == 0) {
100ec099 2137 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2138 }
100ec099
PD
2139 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2140 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2141 }
eaa728ee 2142 if (!(e2 & DESC_S_MASK) ||
20054ef0 2143 !(e2 & DESC_CS_MASK)) {
100ec099 2144 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2145 }
eaa728ee
FB
2146 cpl = env->hflags & HF_CPL_MASK;
2147 rpl = new_cs & 3;
20054ef0 2148 if (rpl < cpl) {
100ec099 2149 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2150 }
eaa728ee
FB
2151 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2152 if (e2 & DESC_C_MASK) {
20054ef0 2153 if (dpl > rpl) {
100ec099 2154 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2155 }
eaa728ee 2156 } else {
20054ef0 2157 if (dpl != rpl) {
100ec099 2158 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2159 }
eaa728ee 2160 }
20054ef0 2161 if (!(e2 & DESC_P_MASK)) {
100ec099 2162 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
20054ef0 2163 }
eaa728ee
FB
2164
2165 sp += addend;
2166 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2167 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2168 /* return to same privilege level */
eaa728ee
FB
2169 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2170 get_seg_base(e1, e2),
2171 get_seg_limit(e1, e2),
2172 e2);
2173 } else {
2174 /* return to different privilege level */
2175#ifdef TARGET_X86_64
2176 if (shift == 2) {
100ec099
PD
2177 POPQ_RA(sp, new_esp, retaddr);
2178 POPQ_RA(sp, new_ss, retaddr);
eaa728ee
FB
2179 new_ss &= 0xffff;
2180 } else
2181#endif
20054ef0
BS
2182 {
2183 if (shift == 1) {
2184 /* 32 bits */
100ec099
PD
2185 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2186 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0
BS
2187 new_ss &= 0xffff;
2188 } else {
2189 /* 16 bits */
100ec099
PD
2190 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2191 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0 2192 }
eaa728ee 2193 }
d12d51d5 2194 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2195 new_ss, new_esp);
eaa728ee
FB
2196 if ((new_ss & 0xfffc) == 0) {
2197#ifdef TARGET_X86_64
20054ef0
BS
2198 /* NULL ss is allowed in long mode if cpl != 3 */
2199 /* XXX: test CS64? */
eaa728ee
FB
2200 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2201 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2202 0, 0xffffffff,
2203 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2204 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2205 DESC_W_MASK | DESC_A_MASK);
20054ef0 2206 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2207 } else
2208#endif
2209 {
100ec099 2210 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee
FB
2211 }
2212 } else {
20054ef0 2213 if ((new_ss & 3) != rpl) {
100ec099 2214 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2215 }
100ec099
PD
2216 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2217 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2218 }
eaa728ee
FB
2219 if (!(ss_e2 & DESC_S_MASK) ||
2220 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2221 !(ss_e2 & DESC_W_MASK)) {
100ec099 2222 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2223 }
eaa728ee 2224 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2225 if (dpl != rpl) {
100ec099 2226 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0
BS
2227 }
2228 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 2229 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
20054ef0 2230 }
eaa728ee
FB
2231 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2232 get_seg_base(ss_e1, ss_e2),
2233 get_seg_limit(ss_e1, ss_e2),
2234 ss_e2);
2235 }
2236
2237 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2238 get_seg_base(e1, e2),
2239 get_seg_limit(e1, e2),
2240 e2);
eaa728ee
FB
2241 sp = new_esp;
2242#ifdef TARGET_X86_64
20054ef0 2243 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2244 sp_mask = -1;
20054ef0 2245 } else
eaa728ee 2246#endif
20054ef0 2247 {
eaa728ee 2248 sp_mask = get_sp_mask(ss_e2);
20054ef0 2249 }
eaa728ee
FB
2250
2251 /* validate data segments */
2999a0b2
BS
2252 validate_seg(env, R_ES, rpl);
2253 validate_seg(env, R_DS, rpl);
2254 validate_seg(env, R_FS, rpl);
2255 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2256
2257 sp += addend;
2258 }
2259 SET_ESP(sp, sp_mask);
2260 env->eip = new_eip;
2261 if (is_iret) {
2262 /* NOTE: 'cpl' is the _old_ CPL */
2263 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2264 if (cpl == 0) {
eaa728ee 2265 eflags_mask |= IOPL_MASK;
20054ef0 2266 }
eaa728ee 2267 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2268 if (cpl <= iopl) {
eaa728ee 2269 eflags_mask |= IF_MASK;
20054ef0
BS
2270 }
2271 if (shift == 0) {
eaa728ee 2272 eflags_mask &= 0xffff;
20054ef0 2273 }
997ff0d9 2274 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2275 }
2276 return;
2277
2278 return_to_vm86:
100ec099
PD
2279 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2280 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2281 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2282 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2283 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2284 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
eaa728ee
FB
2285
2286 /* modify processor state */
997ff0d9
BS
2287 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2288 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2289 VIP_MASK);
2999a0b2 2290 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2291 load_seg_vm(env, R_SS, new_ss & 0xffff);
2292 load_seg_vm(env, R_ES, new_es & 0xffff);
2293 load_seg_vm(env, R_DS, new_ds & 0xffff);
2294 load_seg_vm(env, R_FS, new_fs & 0xffff);
2295 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2296
2297 env->eip = new_eip & 0xffff;
08b3ded6 2298 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2299}
2300
2999a0b2 2301void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2302{
2303 int tss_selector, type;
2304 uint32_t e1, e2;
2305
2306 /* specific case for TSS */
2307 if (env->eflags & NT_MASK) {
2308#ifdef TARGET_X86_64
20054ef0 2309 if (env->hflags & HF_LMA_MASK) {
100ec099 2310 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 2311 }
eaa728ee 2312#endif
100ec099 2313 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
20054ef0 2314 if (tss_selector & 4) {
100ec099 2315 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2316 }
100ec099
PD
2317 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2318 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2319 }
eaa728ee
FB
2320 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2321 /* NOTE: we check both segment and busy TSS */
20054ef0 2322 if (type != 3) {
100ec099 2323 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2324 }
100ec099 2325 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
eaa728ee 2326 } else {
100ec099 2327 helper_ret_protected(env, shift, 1, 0, GETPC());
eaa728ee 2328 }
db620f46 2329 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2330}
2331
2999a0b2 2332void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2333{
100ec099 2334 helper_ret_protected(env, shift, 0, addend, GETPC());
eaa728ee
FB
2335}
2336
2999a0b2 2337void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2338{
2339 if (env->sysenter_cs == 0) {
100ec099 2340 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
2341 }
2342 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2343
2344#ifdef TARGET_X86_64
2345 if (env->hflags & HF_LMA_MASK) {
2346 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2347 0, 0xffffffff,
2348 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2349 DESC_S_MASK |
20054ef0
BS
2350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2351 DESC_L_MASK);
2436b61a
AZ
2352 } else
2353#endif
2354 {
2355 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2356 0, 0xffffffff,
2357 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2358 DESC_S_MASK |
2359 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2360 }
eaa728ee
FB
2361 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2362 0, 0xffffffff,
2363 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2364 DESC_S_MASK |
2365 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2366 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2367 env->eip = env->sysenter_eip;
eaa728ee
FB
2368}
2369
2999a0b2 2370void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2371{
2372 int cpl;
2373
2374 cpl = env->hflags & HF_CPL_MASK;
2375 if (env->sysenter_cs == 0 || cpl != 0) {
100ec099 2376 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee 2377 }
2436b61a
AZ
2378#ifdef TARGET_X86_64
2379 if (dflag == 2) {
20054ef0
BS
2380 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2381 3, 0, 0xffffffff,
2436b61a
AZ
2382 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2383 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2384 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2385 DESC_L_MASK);
2386 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2387 3, 0, 0xffffffff,
2436b61a
AZ
2388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2389 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2390 DESC_W_MASK | DESC_A_MASK);
2391 } else
2392#endif
2393 {
20054ef0
BS
2394 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2395 3, 0, 0xffffffff,
2436b61a
AZ
2396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2397 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2398 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2399 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2400 3, 0, 0xffffffff,
2436b61a
AZ
2401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2403 DESC_W_MASK | DESC_A_MASK);
2404 }
08b3ded6 2405 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2406 env->eip = env->regs[R_EDX];
eaa728ee
FB
2407}
2408
2999a0b2 2409target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2410{
2411 unsigned int limit;
2412 uint32_t e1, e2, eflags, selector;
2413 int rpl, dpl, cpl, type;
2414
2415 selector = selector1 & 0xffff;
f0967a1a 2416 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2417 if ((selector & 0xfffc) == 0) {
dc1ded53 2418 goto fail;
20054ef0 2419 }
100ec099 2420 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2421 goto fail;
20054ef0 2422 }
eaa728ee
FB
2423 rpl = selector & 3;
2424 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2425 cpl = env->hflags & HF_CPL_MASK;
2426 if (e2 & DESC_S_MASK) {
2427 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2428 /* conforming */
2429 } else {
20054ef0 2430 if (dpl < cpl || dpl < rpl) {
eaa728ee 2431 goto fail;
20054ef0 2432 }
eaa728ee
FB
2433 }
2434 } else {
2435 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2436 switch (type) {
eaa728ee
FB
2437 case 1:
2438 case 2:
2439 case 3:
2440 case 9:
2441 case 11:
2442 break;
2443 default:
2444 goto fail;
2445 }
2446 if (dpl < cpl || dpl < rpl) {
2447 fail:
2448 CC_SRC = eflags & ~CC_Z;
2449 return 0;
2450 }
2451 }
2452 limit = get_seg_limit(e1, e2);
2453 CC_SRC = eflags | CC_Z;
2454 return limit;
2455}
2456
2999a0b2 2457target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2458{
2459 uint32_t e1, e2, eflags, selector;
2460 int rpl, dpl, cpl, type;
2461
2462 selector = selector1 & 0xffff;
f0967a1a 2463 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2464 if ((selector & 0xfffc) == 0) {
eaa728ee 2465 goto fail;
20054ef0 2466 }
100ec099 2467 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2468 goto fail;
20054ef0 2469 }
eaa728ee
FB
2470 rpl = selector & 3;
2471 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2472 cpl = env->hflags & HF_CPL_MASK;
2473 if (e2 & DESC_S_MASK) {
2474 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2475 /* conforming */
2476 } else {
20054ef0 2477 if (dpl < cpl || dpl < rpl) {
eaa728ee 2478 goto fail;
20054ef0 2479 }
eaa728ee
FB
2480 }
2481 } else {
2482 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2483 switch (type) {
eaa728ee
FB
2484 case 1:
2485 case 2:
2486 case 3:
2487 case 4:
2488 case 5:
2489 case 9:
2490 case 11:
2491 case 12:
2492 break;
2493 default:
2494 goto fail;
2495 }
2496 if (dpl < cpl || dpl < rpl) {
2497 fail:
2498 CC_SRC = eflags & ~CC_Z;
2499 return 0;
2500 }
2501 }
2502 CC_SRC = eflags | CC_Z;
2503 return e2 & 0x00f0ff00;
2504}
2505
2999a0b2 2506void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2507{
2508 uint32_t e1, e2, eflags, selector;
2509 int rpl, dpl, cpl;
2510
2511 selector = selector1 & 0xffff;
f0967a1a 2512 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2513 if ((selector & 0xfffc) == 0) {
eaa728ee 2514 goto fail;
20054ef0 2515 }
100ec099 2516 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2517 goto fail;
20054ef0
BS
2518 }
2519 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2520 goto fail;
20054ef0 2521 }
eaa728ee
FB
2522 rpl = selector & 3;
2523 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2524 cpl = env->hflags & HF_CPL_MASK;
2525 if (e2 & DESC_CS_MASK) {
20054ef0 2526 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2527 goto fail;
20054ef0 2528 }
eaa728ee 2529 if (!(e2 & DESC_C_MASK)) {
20054ef0 2530 if (dpl < cpl || dpl < rpl) {
eaa728ee 2531 goto fail;
20054ef0 2532 }
eaa728ee
FB
2533 }
2534 } else {
2535 if (dpl < cpl || dpl < rpl) {
2536 fail:
2537 CC_SRC = eflags & ~CC_Z;
2538 return;
2539 }
2540 }
2541 CC_SRC = eflags | CC_Z;
2542}
2543
2999a0b2 2544void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2545{
2546 uint32_t e1, e2, eflags, selector;
2547 int rpl, dpl, cpl;
2548
2549 selector = selector1 & 0xffff;
f0967a1a 2550 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2551 if ((selector & 0xfffc) == 0) {
eaa728ee 2552 goto fail;
20054ef0 2553 }
100ec099 2554 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2555 goto fail;
20054ef0
BS
2556 }
2557 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2558 goto fail;
20054ef0 2559 }
eaa728ee
FB
2560 rpl = selector & 3;
2561 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2562 cpl = env->hflags & HF_CPL_MASK;
2563 if (e2 & DESC_CS_MASK) {
2564 goto fail;
2565 } else {
20054ef0 2566 if (dpl < cpl || dpl < rpl) {
eaa728ee 2567 goto fail;
20054ef0 2568 }
eaa728ee
FB
2569 if (!(e2 & DESC_W_MASK)) {
2570 fail:
2571 CC_SRC = eflags & ~CC_Z;
2572 return;
2573 }
2574 }
2575 CC_SRC = eflags | CC_Z;
2576}
2577
f299f437 2578#if defined(CONFIG_USER_ONLY)
2999a0b2 2579void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2580{
f299f437 2581 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2582 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2583 selector &= 0xffff;
2584 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2585 (selector << 4), 0xffff,
2586 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2587 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2588 } else {
2999a0b2 2589 helper_load_seg(env, seg_reg, selector);
13822781 2590 }
eaa728ee 2591}
eaa728ee 2592#endif
81cf8d8a
PB
2593
2594/* check if Port I/O is allowed in TSS */
100ec099
PD
2595static inline void check_io(CPUX86State *env, int addr, int size,
2596 uintptr_t retaddr)
81cf8d8a
PB
2597{
2598 int io_offset, val, mask;
2599
2600 /* TSS must be a valid 32 bit one */
2601 if (!(env->tr.flags & DESC_P_MASK) ||
2602 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2603 env->tr.limit < 103) {
2604 goto fail;
2605 }
100ec099 2606 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
81cf8d8a
PB
2607 io_offset += (addr >> 3);
2608 /* Note: the check needs two bytes */
2609 if ((io_offset + 1) > env->tr.limit) {
2610 goto fail;
2611 }
100ec099 2612 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
81cf8d8a
PB
2613 val >>= (addr & 7);
2614 mask = (1 << size) - 1;
2615 /* all bits must be zero to allow the I/O */
2616 if ((val & mask) != 0) {
2617 fail:
100ec099 2618 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
81cf8d8a
PB
2619 }
2620}
2621
2622void helper_check_iob(CPUX86State *env, uint32_t t0)
2623{
100ec099 2624 check_io(env, t0, 1, GETPC());
81cf8d8a
PB
2625}
2626
2627void helper_check_iow(CPUX86State *env, uint32_t t0)
2628{
100ec099 2629 check_io(env, t0, 2, GETPC());
81cf8d8a
PB
2630}
2631
2632void helper_check_iol(CPUX86State *env, uint32_t t0)
2633{
100ec099 2634 check_io(env, t0, 4, GETPC());
81cf8d8a 2635}