From: Richard Henderson Date: Sat, 12 Jul 2025 00:20:26 +0000 (-0600) Subject: include/hw/core/cpu: Invert the indexing into CPUTLBDescFast X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=33ea495cd3bfb21db920b6928af7eda39ced5c20;p=thirdparty%2Fqemu.git include/hw/core/cpu: Invert the indexing into CPUTLBDescFast This array is within CPUNegativeOffsetState, which means the last element of the array has an offset from env with the smallest magnitude. This can be encoded into fewer bits when generating TCG fast path memory references. When we changed the NB_MMU_MODES to be a global constant, rather than a per-target value, we pessimized the code generated for targets which use only a few mmu indexes. By inverting the array index, we counteract that. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 22a78c9ee13..c9f40c25392 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -603,9 +603,18 @@ static inline CPUArchState *cpu_env(CPUState *cpu) } #ifdef CONFIG_TCG +/* + * Invert the index order of the CPUTLBDescFast array so that lower + * mmu_idx have offsets from env with smaller magnitude. + */ +static inline int mmuidx_to_fast_index(int mmu_idx) +{ + return NB_MMU_MODES - 1 - mmu_idx; +} + static inline CPUTLBDescFast *cpu_tlb_fast(CPUState *cpu, int mmu_idx) { - return &cpu->neg.tlb.f[mmu_idx]; + return &cpu->neg.tlb.f[mmuidx_to_fast_index(mmu_idx)]; } #endif diff --git a/tcg/tcg.c b/tcg/tcg.c index afac55a203a..294762c283b 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -425,7 +425,8 @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which) static int __attribute__((unused)) tlb_mask_table_ofs(TCGContext *s, int which) { - return (offsetof(CPUNegativeOffsetState, tlb.f[which]) - + int fi = mmuidx_to_fast_index(which); + return (offsetof(CPUNegativeOffsetState, tlb.f[fi]) - sizeof(CPUNegativeOffsetState)); }