static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
- uint16_t asked = data.host_int;
- uint16_t all_dirty, work, to_clean;
+ MMUIdxMap asked = data.host_int;
+ MMUIdxMap all_dirty, work, to_clean;
int64_t now = get_clock_realtime();
assert_cpu_is_self(cpu);
}
}
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap)
{
tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
}
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, MMUIdxMap idxmap)
{
const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
*/
static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
vaddr addr,
- uint16_t idxmap)
+ MMUIdxMap idxmap)
{
int mmu_idx;
{
vaddr addr_and_idxmap = data.target_ptr;
vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
- uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
+ MMUIdxMap idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
}
typedef struct {
vaddr addr;
- uint16_t idxmap;
+ MMUIdxMap idxmap;
} TLBFlushPageByMMUIdxData;
/**
g_free(d);
}
-void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, MMUIdxMap idxmap)
{
tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
vaddr addr,
- uint16_t idxmap)
+ MMUIdxMap idxmap)
{
tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
typedef struct {
vaddr addr;
vaddr len;
- uint16_t idxmap;
- uint16_t bits;
+ MMUIdxMap idxmap;
+ unsigned bits;
} TLBFlushRangeData;
static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
}
void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
- vaddr len, uint16_t idxmap,
+ vaddr len, MMUIdxMap idxmap,
unsigned bits)
{
TLBFlushRangeData d;
}
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
- uint16_t idxmap, unsigned bits)
+ MMUIdxMap idxmap, unsigned bits)
{
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
}
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
vaddr addr,
vaddr len,
- uint16_t idxmap,
+ MMUIdxMap idxmap,
unsigned bits)
{
TLBFlushRangeData d, *p;
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
vaddr addr,
- uint16_t idxmap,
+ MMUIdxMap idxmap,
unsigned bits)
{
tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
* MMU indexes.
*/
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
- uint16_t idxmap);
+ MMUIdxMap idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
* translations using the flushed TLBs.
*/
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
- uint16_t idxmap);
+ MMUIdxMap idxmap);
/**
* tlb_flush_by_mmuidx:
* Flush all entries from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap);
/**
* tlb_flush_by_mmuidx_all_cpus_synced:
* When this function returns, no CPUs will subsequently perform
* translations using the flushed TLBs.
*/
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, MMUIdxMap idxmap);
/**
* tlb_flush_page_bits_by_mmuidx
* Similar to tlb_flush_page_mask, but with a bitmap of indexes.
*/
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
- uint16_t idxmap, unsigned bits);
+ MMUIdxMap idxmap, unsigned bits);
/* Similarly, with broadcast and syncing. */
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
- uint16_t idxmap,
+ MMUIdxMap idxmap,
unsigned bits);
/**
* comparing only the low @bits worth of each virtual page.
*/
void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
- vaddr len, uint16_t idxmap,
+ vaddr len, MMUIdxMap idxmap,
unsigned bits);
/* Similarly, with broadcast and syncing. */
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
vaddr len,
- uint16_t idxmap,
+ MMUIdxMap idxmap,
unsigned bits);
#else
static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{
}
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
- vaddr addr, uint16_t idxmap)
+ vaddr addr, MMUIdxMap idxmap)
{
}
-static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
- uint16_t idxmap)
+ MMUIdxMap idxmap)
{
}
static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
- uint16_t idxmap)
+ MMUIdxMap idxmap)
{
}
static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
vaddr addr,
- uint16_t idxmap,
+ MMUIdxMap idxmap,
unsigned bits)
{
}
static inline void
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
- uint16_t idxmap, unsigned bits)
+ MMUIdxMap idxmap, unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
- vaddr len, uint16_t idxmap,
+ vaddr len, MMUIdxMap idxmap,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
vaddr len,
- uint16_t idxmap,
+ MMUIdxMap idxmap,
unsigned bits)
{
}
};
/*
- * Fix the number of mmu modes to 16, which is also the maximum
- * supported by the softmmu tlb api.
+ * Fix the number of mmu modes to 16.
*/
#define NB_MMU_MODES 16
+typedef uint16_t MMUIdxMap;
/* Use a fully associative victim tlb of 8 entries. */
#define CPU_VTLB_SIZE 8
* mmu_idx N since the last time that mmu_idx was flushed.
* Protected by tlb_c.lock.
*/
- uint16_t dirty;
+ MMUIdxMap dirty;
/*
* Statistics. These are not lock protected, but are read and
* written atomically. This allows the monitor to print a snapshot