ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
- cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
+ physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
- cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
+ physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
tested for self modifying code */
void tlb_unprotect_code(ram_addr_t ram_addr)
{
- cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
+ physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
}
if (prot & PAGE_WRITE) {
if (section->readonly) {
write_flags |= TLB_DISCARD_WRITE;
- } else if (cpu_physical_memory_is_clean(iotlb)) {
+ } else if (physical_memory_is_clean(iotlb)) {
write_flags |= TLB_NOTDIRTY;
}
}
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
- if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
+ if (!physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr);
}
* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster.
*/
- cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
+ physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
/* We remove the notdirty callback only if the code has been flushed. */
- if (!cpu_physical_memory_is_clean(ram_addr)) {
+ if (!physical_memory_is_clean(ram_addr)) {
trace_memory_notdirty_set_dirty(mem_vaddr);
tlb_set_dirty(cpu, mem_vaddr);
}
bitmap = (struct vfio_bitmap *)&unmap->data;
/*
- * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
* to qemu_real_host_page_size.
*/
ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
if (!ret) {
- cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
+ physical_memory_set_dirty_lebitmap(vbmap.bitmap,
iotlb->translated_addr, vbmap.pages);
} else {
error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
range->size = size;
/*
- * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
* to qemu_real_host_page_size.
*/
header);
/*
- * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty.
*/
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
int ret;
if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
- cpu_physical_memory_set_dirty_range(translated_addr, size,
+ physical_memory_set_dirty_range(translated_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
return 0;
goto out;
}
- dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
+ dirty_pages = physical_memory_set_dirty_lebitmap(vbmap.bitmap,
translated_addr,
vbmap.pages);
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
-bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client);
+bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client);
-bool cpu_physical_memory_is_clean(ram_addr_t addr);
+bool physical_memory_is_clean(ram_addr_t addr);
-uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
+uint8_t physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask);
-void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client);
+void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client);
-void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
+void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
uint8_t mask);
/*
- * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
+ * Contrary to physical_memory_sync_dirty_bitmap() this function returns
* the number of dirty pages in @bitmap passed as argument. On the other hand,
- * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
+ * physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
* weren't set in the global migration bitmap.
*/
-uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages);
-void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length);
+void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length);
-bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
+bool physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
-DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
+DirtyBitmapSnapshot *physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
-bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
+bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
ram_addr_t length);
}
}
if (num_dirty) {
- cpu_physical_memory_dirty_bits_cleared(start, length);
+ physical_memory_dirty_bits_cleared(start, length);
}
if (rb->clear_bmap) {
ram_addr_t offset = rb->offset;
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
- if (cpu_physical_memory_test_and_clear_dirty(
+ if (physical_memory_test_and_clear_dirty(
start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
hwaddr size)
{
assert(mr->ram_block);
- cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
+ physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
size,
memory_region_get_dirty_log_mask(mr));
}
DirtyBitmapSnapshot *snapshot;
assert(mr->ram_block);
memory_region_sync_dirty_bitmap(mr, false);
- snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
+ snapshot = physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
memory_global_after_dirty_log_sync();
return snapshot;
}
hwaddr addr, hwaddr size)
{
assert(mr->ram_block);
- return cpu_physical_memory_snapshot_get_dirty(snap,
+ return physical_memory_snapshot_get_dirty(snap,
memory_region_get_ram_addr(mr) + addr, size);
}
hwaddr size, unsigned client)
{
assert(mr->ram_block);
- cpu_physical_memory_test_and_clear_dirty(
+ physical_memory_test_and_clear_dirty(
memory_region_get_ram_addr(mr) + addr, size, client);
}
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
- cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
+ physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
4, dirty_log_mask);
r = MEMTX_OK;
}
}
}
-void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length)
+void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length)
{
if (tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
return dirty;
}
-bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client)
+bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client)
{
return physical_memory_get_dirty(addr, 1, client);
}
-bool cpu_physical_memory_is_clean(ram_addr_t addr)
+bool physical_memory_is_clean(ram_addr_t addr)
{
- bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
- bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
+ bool vga = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
+ bool code = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
bool migration =
- cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
+ physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
return !(vga && code && migration);
}
return dirty;
}
-uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
+uint8_t physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
return ret;
}
-void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client)
+void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client)
{
unsigned long page, idx, offset;
DirtyMemoryBlocks *blocks;
set_bit_atomic(offset, blocks->blocks[idx]);
}
-void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
+void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
uint8_t mask)
{
DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
}
/* Note: start and end must be within the same ram block. */
-bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
+bool physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
}
if (dirty) {
- cpu_physical_memory_dirty_bits_cleared(start, length);
+ physical_memory_dirty_bits_cleared(start, length);
}
return dirty;
static void physical_memory_clear_dirty_range(ram_addr_t addr, ram_addr_t length)
{
- cpu_physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_MIGRATION);
- cpu_physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_VGA);
- cpu_physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_CODE);
+ physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_MIGRATION);
+ physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_VGA);
+ physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_CODE);
}
-DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
+DirtyBitmapSnapshot *physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{
DirtyMemoryBlocks *blocks;
}
}
- cpu_physical_memory_dirty_bits_cleared(start, length);
+ physical_memory_dirty_bits_cleared(start, length);
memory_region_clear_dirty_bitmap(mr, offset, length);
return snap;
}
-bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
+bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
ram_addr_t length)
{
return false;
}
-uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages)
{
page_number = (i * HOST_LONG_BITS + j) * hpratio;
addr = page_number * TARGET_PAGE_SIZE;
ram_addr = start + addr;
- cpu_physical_memory_set_dirty_range(ram_addr,
+ physical_memory_set_dirty_range(ram_addr,
TARGET_PAGE_SIZE * hpratio, clients);
} while (c != 0);
}
physical_memory_clear_dirty_range(block->offset, block->used_length);
block->used_length = newsize;
- cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
+ physical_memory_set_dirty_range(block->offset, block->used_length,
DIRTY_CLIENTS_ALL);
memory_region_set_size(block->mr, unaligned_size);
if (block->resized) {
ram_list.version++;
qemu_mutex_unlock_ramlist();
- cpu_physical_memory_set_dirty_range(new_block->offset,
+ physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
addr += ramaddr;
/* No early return if dirty_log_mask is or becomes 0, because
- * cpu_physical_memory_set_dirty_range will still call
+ * physical_memory_set_dirty_range will still call
* xen_modified_memory.
*/
if (dirty_log_mask) {
dirty_log_mask =
- cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
+ physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
tb_invalidate_phys_range(NULL, addr, addr + length - 1);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
- cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
+ physical_memory_set_dirty_range(addr, length, dirty_log_mask);
}
void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)
*/
if (tag_access == MMU_DATA_STORE) {
ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
- cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
+ physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
}
return memory_region_get_ram_ptr(mr) + xlat;
# The eventual goal would be to fix these warnings.
# TSan is not happy about setting/getting of dirty bits,
-# for example, cpu_physical_memory_set_dirty_range,
-# and cpu_physical_memory_get_dirty.
+# for example, physical_memory_set_dirty_range,
+# and physical_memory_get_dirty.
src:bitops.c
src:bitmap.c