Remove instances of __i386__, except from tests and imported headers.
Drop a block containing sanity check and fprintf error message for
i386-on-i386 or x86_64-on-x86_64 emulation. If we really want
something like this, we would do it via some form of compile-time check.
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
#include <sys/eventfd.h>
#endif
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+#if defined(__x86_64__) || defined(__aarch64__)
# define KVM_HAVE_MCE_INJECTION 1
#endif
if test ! -z "$cpu" ; then
# command line argument
:
-elif check_define __i386__ ; then
- cpu="i386"
elif check_define __x86_64__ ; then
if check_define __ILP32__ ; then
cpu="x32"
#endif
#if defined(CONFIG_TCG_INTERPRETER)
s->info.print_insn = print_insn_tci;
-#elif defined(__i386__)
- s->info.mach = bfd_mach_i386_i386;
- s->info.cap_arch = CS_ARCH_X86;
- s->info.cap_mode = CS_MODE_32;
- s->info.cap_insn_unit = 1;
- s->info.cap_insn_split = 8;
#elif defined(__x86_64__)
s->info.mach = bfd_mach_x86_64;
s->info.cap_arch = CS_ARCH_X86;
*/
uint32_t *ptr32 = NULL;
uint32_t *ptr64 = NULL;
-#if defined(__i386__)
- ptr32 = (void*)page->pd;
- ptr64 = ((void*)page->pd) + 4;
-#elif defined(__x86_64__)
+#if defined(__x86_64__)
ptr32 = ((void*)page->pd) - 4;
ptr64 = (void*)page->pd;
#endif
/* 64bit dom0, 32bit domU */
mode = 32;
pd = ((void*)page->pd) - 4;
-#elif defined(__i386__)
- } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
- /* 32bit dom0, 64bit domU */
- mode = 64;
- pd = ((void*)page->pd) + 4;
#endif
}
* the same semantics.
*/
#if !defined(QEMU_SANITIZE_THREAD) && \
- (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+ (defined(__x86_64__) || defined(__s390x__))
# define smp_mb__before_rmw() signal_barrier()
# define smp_mb__after_rmw() signal_barrier()
#else
*/
#if !defined(QEMU_SANITIZE_THREAD) && \
- (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+ (defined(__x86_64__) || defined(__s390x__))
# define qatomic_set_mb(ptr, i) \
({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
#else
* mappings of the same physical page(s).
*/
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
+#if defined(__x86_64__) || defined(__s390__)
static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
{
bool qemu_has_direct_io(void);
-#if defined(__HAIKU__) && defined(__i386__)
-#define FMT_pid "%ld"
-#elif defined(WIN64)
+#ifdef WIN64
#define FMT_pid "%" PRId64
#else
#define FMT_pid "%d"
#ifndef QEMU_PROCESSOR_H
#define QEMU_PROCESSOR_H
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__x86_64__)
# define cpu_relax() asm volatile("rep; nop" ::: "memory")
#elif defined(__aarch64__)
return retval;
}
-#elif defined(__i386__)
-
-static inline int64_t cpu_get_host_ticks(void)
-{
- int64_t val;
- asm volatile ("rdtsc" : "=A" (val));
- return val;
-}
-
#elif defined(__x86_64__)
static inline int64_t cpu_get_host_ticks(void)
~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
(size << TARGET_IOC_SIZESHIFT);
}
-
- /* automatic consistency check if same arch */
-#if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
- (defined(__x86_64__) && defined(TARGET_X86_64))
- if (unlikely(ie->target_cmd != ie->host_cmd)) {
- fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
- ie->name, ie->target_cmd, ie->host_cmd);
- }
-#endif
ie++;
}
}
: "=a"(vec[0]), "=b"(vec[1]),
"=c"(vec[2]), "=d"(vec[3])
: "0"(function), "c"(count) : "cc");
-#elif defined(__i386__)
- asm volatile("pusha \n\t"
- "cpuid \n\t"
- "mov %%eax, 0(%2) \n\t"
- "mov %%ebx, 4(%2) \n\t"
- "mov %%ecx, 8(%2) \n\t"
- "mov %%edx, 12(%2) \n\t"
- "popa"
- : : "a"(function), "c"(count), "S"(vec)
- : "memory", "cc");
#else
abort();
#endif
#define TCG_TARGET_MO_H
/*
- * We could notice __i386__ or __s390x__ and reduce the barriers depending
+ * We could notice __x86_64__ or __s390x__ and reduce the barriers depending
* on the host. But if you want performance, you use the normal backend.
* We prefer consistency across hosts on this.
*/
* Architecture (+ OS) specific cache flushing mechanisms.
*/
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
+#if defined(__x86_64__) || defined(__s390__)
/* Caches are coherent and do not require flushing; symbol inline. */