#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/target_page.h"
-#include "tcg.h"
+#include "tcg/tcg.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "sysemu/tcg.h"
+#include "sysemu/qtest.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "exec/ram_addr.h"
#include "exec/log.h"
+#include "qemu/pmem.h"
+
#include "migration/vmstate.h"
#include "qemu/range.h"
AddressSpace address_space_io;
AddressSpace address_space_memory;
-MemoryRegion io_mem_notdirty;
static MemoryRegion io_mem_unassigned;
#endif
-#ifdef TARGET_PAGE_BITS_VARY
-int target_page_bits;
-bool target_page_bits_decided;
-#endif
-
CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
__thread CPUState *current_cpu;
-/* 0 = Do not count executed instructions.
- 1 = Precise instruction counting.
- 2 = Adaptive rate instruction counting. */
-int use_icount;
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
-bool set_preferred_target_page_bits(int bits)
-{
- /* The target page size is the lowest common denominator for all
- * the CPUs in the system, so we can only make it smaller, never
- * larger. And we can't make it smaller once we've committed to
- * a particular size.
- */
-#ifdef TARGET_PAGE_BITS_VARY
- assert(bits >= TARGET_PAGE_BITS_MIN);
- if (target_page_bits == 0 || target_page_bits > bits) {
- if (target_page_bits_decided) {
- return false;
- }
- target_page_bits = bits;
- }
-#endif
- return true;
-}
-
#if !defined(CONFIG_USER_ONLY)
-
-static void finalize_target_page_bits(void)
-{
-#ifdef TARGET_PAGE_BITS_VARY
- if (target_page_bits == 0) {
- target_page_bits = TARGET_PAGE_BITS_MIN;
- }
- target_page_bits_decided = true;
-#endif
-}
+/* 0 = Do not count executed instructions.
+ 1 = Precise instruction counting.
+ 2 = Adaptive rate instruction counting. */
+int use_icount;
typedef struct PhysPageEntry PhysPageEntry;
} subpage_t;
#define PHYS_SECTION_UNASSIGNED 0
-#define PHYS_SECTION_NOTDIRTY 1
static void io_mem_init(void);
static void memory_map_init(void);
*/
MemoryRegion *mr = MEMORY_REGION(iommu_mr);
TCGIOMMUNotifier *notifier;
- int i;
+ Error *err = NULL;
+ int i, ret;
for (i = 0; i < cpu->iommu_notifiers->len; i++) {
notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
0,
HWADDR_MAX,
iommu_idx);
- memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n);
+ ret = memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n,
+ &err);
+ if (ret) {
+ error_report_err(err);
+ exit(1);
+ }
}
if (!notifier->active) {
}
tlb_init(cpu);
+ qemu_plugin_vcpu_init_hook(cpu);
+
#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
void tb_invalidate_phys_addr(target_ulong addr)
{
mmap_lock();
- tb_invalidate_phys_page_range(addr, addr + 1, 0);
+ tb_invalidate_phys_page_range(addr, addr + 1);
mmap_unlock();
}
return;
}
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
mr = address_space_translate(as, addr, &addr, &l, false, attrs);
if (!(memory_region_is_ram(mr)
|| memory_region_is_romd(mr))) {
- rcu_read_unlock();
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
- tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
- rcu_read_unlock();
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
}
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
- MemTxAttrs attrs;
- hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
- if (phys != -1) {
- /* Locks grabbed by tb_invalidate_phys_addr */
- tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
- phys | (pc & ~TARGET_PAGE_MASK), attrs);
- }
+ /*
+ * There may not be a virtual to physical translation for the pc
+ * right now, but there may exist cached TB for this pc.
+ * Flush the whole TB cache to force re-translation of such TBs.
+ * This is heavyweight, but we're debugging anyway.
+ */
+ tb_flush(cpu);
}
#endif
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
if (qemu_log_separate()) {
- qemu_log_lock();
+ FILE *logfile = qemu_log_lock();
qemu_log("qemu: fatal: ");
qemu_log_vprintf(fmt, ap2);
qemu_log("\n");
log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
qemu_log_flush();
- qemu_log_unlock();
+ qemu_log_unlock(logfile);
qemu_log_close();
}
va_end(ap2);
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
block = qemu_get_ram_block(start);
assert(block == qemu_get_ram_block(end - 1));
start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
CPU_FOREACH(cpu) {
tlb_reset_dirty(cpu, start1, length);
}
- rcu_read_unlock();
}
/* Note: start and end must be within the same ram block. */
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ ramblock = qemu_get_ram_block(start);
+ /* Range sanity check on the ramblock */
+ assert(start >= ramblock->offset &&
+ start + length <= ramblock->offset + ramblock->used_length);
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
- ramblock = qemu_get_ram_block(start);
- /* Range sanity check on the ramblock */
- assert(start >= ramblock->offset &&
- start + length <= ramblock->offset + ramblock->used_length);
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page,
+ DIRTY_MEMORY_BLOCK_SIZE - offset);
- while (page < end) {
- unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+ dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
+ offset, num);
+ page += num;
+ }
- dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
- offset, num);
- page += num;
+ mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
+ mr_size = (end - page) << TARGET_PAGE_BITS;
+ memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
}
- mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
- mr_size = (end - page) << TARGET_PAGE_BITS;
- memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
-
- rcu_read_unlock();
-
if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
end = last >> TARGET_PAGE_BITS;
dest = 0;
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
-
- while (page < end) {
- unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page,
+ DIRTY_MEMORY_BLOCK_SIZE - offset);
- assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
- assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
- offset >>= BITS_PER_LEVEL;
+ assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
+ assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
+ offset >>= BITS_PER_LEVEL;
- bitmap_copy_and_clear_atomic(snap->dirty + dest,
- blocks->blocks[idx] + offset,
- num);
- page += num;
- dest += num >> BITS_PER_LEVEL;
+ bitmap_copy_and_clear_atomic(snap->dirty + dest,
+ blocks->blocks[idx] + offset,
+ num);
+ page += num;
+ dest += num >> BITS_PER_LEVEL;
+ }
}
- rcu_read_unlock();
-
if (tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
/* Called from RCU critical section */
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
- MemoryRegionSection *section,
- target_ulong vaddr,
- hwaddr paddr, hwaddr xlat,
- int prot,
- target_ulong *address)
-{
- hwaddr iotlb;
-
- if (memory_region_is_ram(section->mr)) {
- /* Normal RAM. */
- iotlb = memory_region_get_ram_addr(section->mr) + xlat;
- if (!section->readonly) {
- iotlb |= PHYS_SECTION_NOTDIRTY;
- }
- } else {
- AddressSpaceDispatch *d;
-
- d = flatview_to_dispatch(section->fv);
- iotlb = section - d->map.sections;
- iotlb += xlat;
- }
-
- return iotlb;
+ MemoryRegionSection *section)
+{
+ AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
+ return section - d->map.sections;
}
#endif /* defined(CONFIG_USER_ONLY) */
RAMBlock *block;
char *psize;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
"Block Name", "PSize", "Offset", "Used", "Total");
RAMBLOCK_FOREACH(block) {
(uint64_t)block->max_length);
g_free(psize);
}
- rcu_read_unlock();
}
#ifdef __linux__
long qemu_minrampagesize(void)
{
long hpsize = LONG_MAX;
- long mainrampagesize;
- Object *memdev_root;
- MachineState *ms = MACHINE(qdev_get_machine());
-
- mainrampagesize = qemu_mempath_getpagesize(mem_path);
-
- /* it's possible we have memory-backend objects with
- * hugepage-backed RAM. these may get mapped into system
- * address space via -numa parameters or memory hotplug
- * hooks. we want to take these into account, but we
- * also want to make sure these supported hugepage
- * sizes are applicable across the entire range of memory
- * we may boot from, so we take the min across all
- * backends, and assume normal pages in cases where a
- * backend isn't backed by hugepages.
- */
- memdev_root = object_resolve_path("/objects", NULL);
- if (memdev_root) {
- object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
- }
- if (hpsize == LONG_MAX) {
- /* No additional memory regions found ==> Report main RAM page size */
- return mainrampagesize;
- }
-
- /* If NUMA is disabled or the NUMA nodes are not backed with a
- * memory-backend, then there is at least one node using "normal" RAM,
- * so if its page size is smaller we have got to report that size instead.
- */
- if (hpsize > mainrampagesize &&
- (ms->numa_state == NULL ||
- ms->numa_state->num_nodes == 0 ||
- ms->numa_state->nodes[0].node_memdev == NULL)) {
- static bool warned;
- if (!warned) {
- error_report("Huge page support disabled (n/a for main memory).");
- warned = true;
- }
- return mainrampagesize;
- }
+ Object *memdev_root = object_resolve_path("/objects", NULL);
+ object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
return hpsize;
}
long qemu_maxrampagesize(void)
{
- long pagesize = qemu_mempath_getpagesize(mem_path);
+ long pagesize = 0;
Object *memdev_root = object_resolve_path("/objects", NULL);
- if (memdev_root) {
- object_child_foreach(memdev_root, find_max_backend_pagesize,
- &pagesize);
- }
+ object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize);
return pagesize;
}
#else
long qemu_minrampagesize(void)
{
- return getpagesize();
+ return qemu_real_host_page_size;
}
long qemu_maxrampagesize(void)
{
- return getpagesize();
+ return qemu_real_host_page_size;
}
#endif
bool truncate,
Error **errp)
{
- MachineState *ms = MACHINE(qdev_get_machine());
void *area;
block->page_size = qemu_fd_getpagesize(fd);
return NULL;
}
- if (mem_prealloc) {
- os_mem_prealloc(fd, area, memory, ms->smp.cpus, errp);
- if (errp && *errp) {
- qemu_ram_munmap(fd, area, memory);
- return NULL;
- }
- }
-
block->fd = fd;
return area;
}
RAMBlock *block;
ram_addr_t last = 0;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH(block) {
last = MAX(last, block->offset + block->max_length);
}
- rcu_read_unlock();
return last >> TARGET_PAGE_BITS;
}
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH(block) {
if (block != new_block &&
!strcmp(block->idstr, new_block->idstr)) {
abort();
}
}
- rcu_read_unlock();
}
/* Called with iothread lock held. */
return 0;
}
+/*
+ * Trigger sync on the given ram block for range [start, start + length]
+ * with the backing store if one is available.
+ * Otherwise no-op.
+ * @Note: this is supposed to be a synchronous op.
+ */
+void qemu_ram_writeback(RAMBlock *block, ram_addr_t start, ram_addr_t length)
+{
+ /* The requested range should fit in within the block range */
+ g_assert((start + length) <= block->used_length);
+
+#ifdef CONFIG_LIBPMEM
+ /* The lack of support for pmem should not block the sync */
+ if (ramblock_is_pmem(block)) {
+ void *addr = ramblock_ptr(block, start);
+ pmem_persist(addr, length);
+ return;
+ }
+#endif
+ if (block->fd >= 0) {
+ /**
+ * Case there is no support for PMEM or the memory has not been
+ * specified as persistent (or is not one) - use the msync.
+ * Less optimal but still achieves the same goal
+ */
+ void *addr = ramblock_ptr(block, start);
+ if (qemu_msync(addr, length, block->fd)) {
+ warn_report("%s: failed to sync memory range: start: "
+ RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
+ __func__, start, length);
+ }
+ }
+}
+
/* Called with ram_list.mutex held */
static void dirty_memory_extend(ram_addr_t old_ram_size,
ram_addr_t new_ram_size)
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
- /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
- qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
+ /*
+ * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
+ * Configure it unless the machine is a qtest server, in which case
+ * KVM is not used and it may be forked (eg for fuzzing purposes).
+ */
+ if (!qtest_enabled()) {
+ qemu_madvise(new_block->host, new_block->max_length,
+ QEMU_MADV_DONTFORK);
+ }
ram_block_notify_add(new_block->host, new_block->max_length);
}
}
size = HOST_PAGE_ALIGN(size);
file_size = get_file_size(fd);
if (file_size > 0 && file_size < size) {
- error_setg(errp, "backing store %s size 0x%" PRIx64
+ error_setg(errp, "backing store size 0x%" PRIx64
" does not match 'size' option 0x" RAM_ADDR_FMT,
- mem_path, file_size, size);
+ file_size, size);
return NULL;
}
new_block->max_length = max_size;
assert(max_size >= size);
new_block->fd = -1;
- new_block->page_size = getpagesize();
+ new_block->page_size = qemu_real_host_page_size;
new_block->host = host;
if (host) {
new_block->flags |= RAM_PREALLOC;
if (xen_enabled()) {
ram_addr_t ram_addr;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
ram_addr = xen_ram_addr_from_mapcache(ptr);
block = qemu_get_ram_block(ram_addr);
if (block) {
*offset = ram_addr - block->offset;
}
- rcu_read_unlock();
return block;
}
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
block = atomic_rcu_read(&ram_list.mru_block);
if (block && block->host && host - block->host < block->max_length) {
goto found;
}
}
- rcu_read_unlock();
return NULL;
found:
if (round_offset) {
*offset &= TARGET_PAGE_MASK;
}
- rcu_read_unlock();
return block;
}
return block->offset + offset;
}
-/* Called within RCU critical section. */
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
- CPUState *cpu,
- vaddr mem_vaddr,
- ram_addr_t ram_addr,
- unsigned size)
-{
- ndi->cpu = cpu;
- ndi->ram_addr = ram_addr;
- ndi->mem_vaddr = mem_vaddr;
- ndi->size = size;
- ndi->pages = NULL;
-
- trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
-
- assert(tcg_enabled());
- if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
- tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
- }
-}
-
-/* Called within RCU critical section. */
-void memory_notdirty_write_complete(NotDirtyInfo *ndi)
-{
- if (ndi->pages) {
- assert(tcg_enabled());
- page_collection_unlock(ndi->pages);
- ndi->pages = NULL;
- }
-
- /* Set both VGA and migration bits for simplicity and to remove
- * the notdirty callback faster.
- */
- cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
- DIRTY_CLIENTS_NOCODE);
- /* we remove the notdirty callback only if the code has been
- flushed */
- if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
- trace_memory_notdirty_set_dirty(ndi->mem_vaddr);
- tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
- }
-}
-
-/* Called within RCU critical section. */
-static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
- uint64_t val, unsigned size)
-{
- NotDirtyInfo ndi;
-
- memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
- ram_addr, size);
-
- stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
- memory_notdirty_write_complete(&ndi);
-}
-
-static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs)
-{
- return is_write;
-}
-
-static const MemoryRegionOps notdirty_mem_ops = {
- .write = notdirty_mem_write,
- .valid.accepts = notdirty_mem_accepts,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 8,
- .unaligned = false,
- },
- .impl = {
- .min_access_size = 1,
- .max_access_size = 8,
- .unaligned = false,
- },
-};
-
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
cpu->watchpoint_hit = wp;
mmap_lock();
- tb_check_watchpoint(cpu);
+ tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
}
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, hwaddr len);
+ MemTxAttrs attrs, void *buf, hwaddr len);
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len);
+ const void *buf, hwaddr len);
static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
bool is_write, MemTxAttrs attrs);
{
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX);
-
- /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
- * which can be called without the iothread mutex.
- */
- memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
- NULL, UINT64_MAX);
- memory_region_clear_global_locking(&io_mem_notdirty);
}
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
n = dummy_section(&d->map, fv, &io_mem_unassigned);
assert(n == PHYS_SECTION_UNASSIGNED);
- n = dummy_section(&d->map, fv, &io_mem_notdirty);
- assert(n == PHYS_SECTION_NOTDIRTY);
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
* by pushing the migration thread's memory read after the vCPU thread has
* written the memory.
*/
- cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
- run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
+ if (replay_mode == REPLAY_MODE_NONE) {
+ /*
+ * VGA can make calls to this function while updating the screen.
+ * In record/replay mode this causes a deadlock, because
+ * run_on_cpu waits for rr mutex. Therefore no races are possible
+ * in this case and no need for making run_on_cpu when
+ * record/replay is not enabled.
+ */
+ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
+ run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
+ }
}
static void tcg_commit(MemoryListener *listener)
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- uint8_t *buf, target_ulong len, int is_write)
+ void *ptr, target_ulong len, bool is_write)
{
int flags;
target_ulong l, page;
void * p;
+ uint8_t *buf = ptr;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
/* Called within RCU critical section. */
static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf,
+ const void *ptr,
hwaddr len, hwaddr addr1,
hwaddr l, MemoryRegion *mr)
{
- uint8_t *ptr;
+ uint8_t *ram_ptr;
uint64_t val;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
+ const uint8_t *buf = ptr;
for (;;) {
if (!memory_access_is_direct(mr, true)) {
size_memop(l), attrs);
} else {
/* RAM case */
- ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
- memcpy(ptr, buf, l);
+ ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
+ memcpy(ram_ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
/* Called from RCU critical section. */
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len)
+ const void *buf, hwaddr len)
{
hwaddr l;
hwaddr addr1;
/* Called within RCU critical section. */
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
+ MemTxAttrs attrs, void *ptr,
hwaddr len, hwaddr addr1, hwaddr l,
MemoryRegion *mr)
{
- uint8_t *ptr;
+ uint8_t *ram_ptr;
uint64_t val;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
+ uint8_t *buf = ptr;
for (;;) {
if (!memory_access_is_direct(mr, false)) {
stn_he_p(buf, l, val);
} else {
/* RAM case */
- ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
- memcpy(buf, ptr, l);
+ ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
+ memcpy(buf, ram_ptr, l);
}
if (release_lock) {
/* Called from RCU critical section. */
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, hwaddr len)
+ MemTxAttrs attrs, void *buf, hwaddr len)
{
hwaddr l;
hwaddr addr1;
}
MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, hwaddr len)
+ MemTxAttrs attrs, void *buf, hwaddr len)
{
MemTxResult result = MEMTX_OK;
FlatView *fv;
if (len > 0) {
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
result = flatview_read(fv, addr, attrs, buf, len);
- rcu_read_unlock();
}
return result;
MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len)
+ const void *buf, hwaddr len)
{
MemTxResult result = MEMTX_OK;
FlatView *fv;
if (len > 0) {
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
result = flatview_write(fv, addr, attrs, buf, len);
- rcu_read_unlock();
}
return result;
}
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- uint8_t *buf, hwaddr len, bool is_write)
+ void *buf, hwaddr len, bool is_write)
{
if (is_write) {
return address_space_write(as, addr, attrs, buf, len);
}
}
-void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
- hwaddr len, int is_write)
+void cpu_physical_memory_rw(hwaddr addr, void *buf,
+ hwaddr len, bool is_write)
{
address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
buf, len, is_write);
static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf,
+ const void *ptr,
hwaddr len,
enum write_rom_type type)
{
hwaddr l;
- uint8_t *ptr;
+ uint8_t *ram_ptr;
hwaddr addr1;
MemoryRegion *mr;
+ const uint8_t *buf = ptr;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
l = memory_access_size(mr, l, addr1);
} else {
/* ROM/RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (type) {
case WRITE_DATA:
- memcpy(ptr, buf, l);
+ memcpy(ram_ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
break;
case FLUSH_CACHE:
- flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
+ flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l);
break;
}
}
buf += l;
addr += l;
}
- rcu_read_unlock();
return MEMTX_OK;
}
/* used for ROM loading : can write in RAM and ROM */
MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len)
+ const void *buf, hwaddr len)
{
return address_space_write_rom_internal(as, addr, attrs,
buf, len, WRITE_DATA);
FlatView *fv;
bool result;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
result = flatview_access_valid(fv, addr, len, is_write, attrs);
- rcu_read_unlock();
return result;
}
}
l = len;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
- rcu_read_unlock();
return NULL;
}
/* Avoid unbounded allocations */
bounce.buffer, l);
}
- rcu_read_unlock();
*plen = l;
return bounce.buffer;
}
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
l, is_write, attrs);
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
- rcu_read_unlock();
return ptr;
}
/* Unmaps a memory region previously mapped by address_space_map().
- * Will also mark the memory as dirty if is_write == 1. access_len gives
+ * Will also mark the memory as dirty if is_write is true. access_len gives
* the amount of memory that was actually read or written by the caller.
*/
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
- int is_write, hwaddr access_len)
+ bool is_write, hwaddr access_len)
{
if (buffer != bounce.buffer) {
MemoryRegion *mr;
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
- int is_write)
+ bool is_write)
{
return address_space_map(&address_space_memory, addr, plen, is_write,
MEMTXATTRS_UNSPECIFIED);
}
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
- int is_write, hwaddr access_len)
+ bool is_write, hwaddr access_len)
{
return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
}
/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- uint8_t *buf, target_ulong len, int is_write)
+ void *ptr, target_ulong len, bool is_write)
{
hwaddr phys_addr;
target_ulong l, page;
+ uint8_t *buf = ptr;
cpu_synchronize_state(cpu);
while (len > 0) {
address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
attrs, buf, l);
} else {
- address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l, 0);
+ address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf,
+ l);
}
len -= l;
buf += l;
hwaddr l = 1;
bool res;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
mr = address_space_translate(&address_space_memory,
phys_addr, &phys_addr, &l, false,
MEMTXATTRS_UNSPECIFIED);
res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
- rcu_read_unlock();
return res;
}
RAMBlock *block;
int ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH(block) {
ret = func(block, opaque);
if (ret) {
break;
}
}
- rcu_read_unlock();
return ret;
}
uint8_t *host_startaddr = rb->host + start;
- if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
+ if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
error_report("ram_block_discard_range: Unaligned start address: %p",
host_startaddr);
goto err;
if ((start + length) <= rb->used_length) {
bool need_madvise, need_fallocate;
- uint8_t *host_endaddr = host_startaddr + length;
- if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
- error_report("ram_block_discard_range: Unaligned end address: %p",
- host_endaddr);
+ if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
+ error_report("ram_block_discard_range: Unaligned length: %zx",
+ length);
goto err;
}