+2009-06-04 Vladimir Serbinenko <phcoder@gmail.com>
+
+ Avoid clobbering %ebx/%rbx in inline assembly with Apple's CC
+
+ * efiemu/runtime/efiemu.c (write_cmos): use %cl instead of %bl as
+ temporary storage
+ * include/grub/i386/tsc.h (grub_get_tsc): restore %rbx/%ebx when
+ using Apple's CC
+ (grub_cpu_is_tsc_supported): likewise
+ * loader/i386/xnu.c (guessfsb): restore %rbx/%ebx in inline assembly
+
2009-06-04 Vladimir Serbinenko <phcoder@gmail.com>
Absolute addressing through constant with Apple's cc
write_cmos (grub_uint8_t addr, grub_uint8_t val)
{
__asm__ __volatile__ ("outb %%al,$0x70\n"
- "mov %%bl, %%al\n"
- "outb %%al,$0x71": :"a" (addr), "b" (val));
+ "mov %%cl, %%al\n"
+ "outb %%al,$0x71": :"a" (addr), "c" (val));
}
static inline grub_uint8_t
/* The CPUID instruction is a 'serializing' instruction, and
avoids out-of-order execution of the RDTSC instruction. */
+#ifdef APPLE_CC
__asm__ __volatile__ ("xorl %%eax, %%eax\n\t"
- "cpuid":::"%rax", "%rbx", "%rcx", "%rdx");
+#ifdef __x86_64__
+ "push %%rbx\n"
+#else
+ "push %%ebx\n"
+#endif
+ "cpuid\n"
+#ifdef __x86_64__
+ "pop %%rbx\n"
+#else
+ "pop %%ebx\n"
+#endif
+ :::"%rax", "%rcx", "%rdx");
+#else
+ __asm__ __volatile__ ("xorl %%eax, %%eax\n\t"
+ "cpuid":::"%rax", "%rbx", "%rcx", "%rdx");
+#endif
/* Read TSC value. We cannot use "=A", since this would use
%rax on x86_64. */
__asm__ __volatile__ ("rdtsc":"=a" (lo), "=d" (hi));
return 0;
grub_uint32_t features;
+#ifdef APPLE_CC
__asm__ ("movl $1, %%eax\n\t"
- "cpuid"
+#ifdef __x86_64__
+ "push %%rbx\n"
+#else
+ "push %%ebx\n"
+#endif
+ "cpuid\n"
+#ifdef __x86_64__
+ "pop %%rbx\n"
+#else
+ "pop %%ebx\n"
+#endif
+ : "=d" (features)
+ : /* No inputs. */
+ : /* Clobbered: */ "%rax", "%rcx");
+#else
+ __asm__ ("movl $1, %%eax\n\t"
+ "cpuid\n"
: "=d" (features)
: /* No inputs. */
: /* Clobbered: */ "%rax", "%rbx", "%rcx");
+#endif
return (features & (1 << 4)) != 0;
}
if (! grub_cpu_is_cpuid_supported ())
return sane_value;
+
+#ifdef APPLE_CC
+ asm volatile ("movl $0, %%eax\n"
+#ifdef __x86_64__
+ "push %%rbx\n"
+#else
+ "push %%ebx\n"
+#endif
+ "cpuid\n"
+#ifdef __x86_64__
+ "pop %%rbx\n"
+#else
+ "pop %%ebx\n"
+#endif
+ : "=a" (max_cpuid),
+ "=d" (manufacturer[1]), "=c" (manufacturer[2]));
+
+ /* Only Intel for now is done. */
+ if (grub_memcmp (manufacturer + 1, "ineIntel", 12) != 0)
+ return sane_value;
+
+#else
asm volatile ("movl $0, %%eax\n"
"cpuid"
: "=a" (max_cpuid), "=b" (manufacturer[0]),
/* Only Intel for now is done. */
if (grub_memcmp (manufacturer, "GenuineIntel", 12) != 0)
return sane_value;
+#endif
/* Check Speedstep. */
if (max_cpuid < 1)
return sane_value;
+#ifdef APPLE_CC
+ asm volatile ("movl $1, %%eax\n"
+#ifdef __x86_64__
+ "push %%rbx\n"
+#else
+ "push %%ebx\n"
+#endif
+ "cpuid\n"
+#ifdef __x86_64__
+ "pop %%rbx\n"
+#else
+ "pop %%ebx\n"
+#endif
+ : "=c" (capabilities):
+ : "%rax", "%rdx");
+#else
asm volatile ("movl $1, %%eax\n"
"cpuid"
: "=c" (capabilities):
- : "%eax", "%ebx", "%edx");
+ : "%rax", "%rbx", "%rdx");
+#endif
if (! (capabilities & (1 << 7)))
return sane_value;