Leon3
M: Fabien Chouteau <chouteau@adacore.com>
+M: KONRAD Frederic <frederic.konrad@adacore.com>
S: Maintained
F: hw/sparc/leon3.c
F: hw/*/grlib*
-F: include/hw/sparc/grlib.h
+F: include/hw/*/grlib*
S390 Machines
-------------
F: tests/virtio-net-test.c
T: git https://github.com/jasowang/qemu.git net
+Parallel NOR Flash devices
+M: Philippe Mathieu-Daudé <philmd@redhat.com>
+T: git https://gitlab.com/philmd/qemu.git pflash-next
+S: Maintained
+F: hw/block/pflash_cfi*.c
+F: include/hw/block/flash.h
+
SCSI
M: Paolo Bonzini <pbonzini@redhat.com>
R: Fam Zheng <fam@euphon.net>
F: docs/interop/vhost-user.json
F: docs/interop/vhost-user.txt
F: contrib/vhost-user-*/
+F: backends/vhost-user.c
+F: include/sysemu/vhost-user-backend.h
virtio
M: Michael S. Tsirkin <mst@redhat.com>
virtio-input
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
+F: hw/input/vhost-user-input.c
F: hw/input/virtio-input*.c
F: include/hw/virtio/virtio-input.h
CURL
L: qemu-block@nongnu.org
-S: Supported
+S: Odd Fixes
F: block/curl.c
GLUSTER
L: qemu-block@nongnu.org
-S: Supported
+L: integration@gluster.org
+S: Odd Fixes
F: block/gluster.c
Null Block Driver
include $(SRC_PATH)/rules.mak
+# notempy and lor are defined in rules.mak
+CONFIG_TOOLS := $(call notempty,$(TOOLS))
+CONFIG_BLOCK := $(call lor,$(CONFIG_SOFTMMU),$(CONFIG_TOOLS))
+
# Create QEMU_PKGVERSION and FULL_VERSION strings
# If PKGVERSION is set, use that; otherwise get version and -dirty status from git
QEMU_PKGVERSION := $(if $(PKGVERSION),$(PKGVERSION),$(shell \
# Either "version (pkgversion)", or just "version" if pkgversion not set
FULL_VERSION := $(if $(QEMU_PKGVERSION),$(VERSION) ($(QEMU_PKGVERSION)),$(VERSION))
-GENERATED_FILES = qemu-version.h config-host.h qemu-options.def
+generated-files-y = qemu-version.h config-host.h qemu-options.def
GENERATED_QAPI_FILES = qapi/qapi-builtin-types.h qapi/qapi-builtin-types.c
GENERATED_QAPI_FILES += qapi/qapi-types.h qapi/qapi-types.c
GENERATED_QAPI_FILES += qapi/qapi-introspect.c qapi/qapi-introspect.h
GENERATED_QAPI_FILES += qapi/qapi-doc.texi
-GENERATED_FILES += $(GENERATED_QAPI_FILES)
+generated-files-y += $(GENERATED_QAPI_FILES)
-GENERATED_FILES += trace/generated-tcg-tracers.h
+generated-files-y += trace/generated-tcg-tracers.h
-GENERATED_FILES += trace/generated-helpers-wrappers.h
-GENERATED_FILES += trace/generated-helpers.h
-GENERATED_FILES += trace/generated-helpers.c
+generated-files-y += trace/generated-helpers-wrappers.h
+generated-files-y += trace/generated-helpers.h
+generated-files-y += trace/generated-helpers.c
-ifdef CONFIG_TRACE_UST
-GENERATED_FILES += trace-ust-all.h
-GENERATED_FILES += trace-ust-all.c
-endif
+generated-files-$(CONFIG_TRACE_UST) += trace-ust-all.h
+generated-files-$(CONFIG_TRACE_UST) += trace-ust-all.c
-GENERATED_FILES += module_block.h
+generated-files-y += module_block.h
TRACE_HEADERS = trace-root.h $(trace-events-subdirs:%=%/trace.h)
TRACE_SOURCES = trace-root.c $(trace-events-subdirs:%=%/trace.c)
TRACE_HEADERS += trace-ust-root.h $(trace-events-subdirs:%=%/trace-ust.h)
endif
-GENERATED_FILES += $(TRACE_HEADERS)
-GENERATED_FILES += $(TRACE_SOURCES)
-GENERATED_FILES += $(BUILD_DIR)/trace-events-all
-GENERATED_FILES += .git-submodule-status
+generated-files-y += $(TRACE_HEADERS)
+generated-files-y += $(TRACE_SOURCES)
+generated-files-y += $(BUILD_DIR)/trace-events-all
+generated-files-y += .git-submodule-status
trace-group-name = $(shell dirname $1 | sed -e 's/[^a-zA-Z0-9]/_/g')
ui/input-keymap-osx-to-qcode.c \
$(NULL)
-GENERATED_FILES += $(KEYCODEMAP_FILES)
+generated-files-$(CONFIG_SOFTMMU) += $(KEYCODEMAP_FILES)
ui/input-keymap-%.c: $(KEYCODEMAP_GEN) $(KEYCODEMAP_CSV) $(SRC_PATH)/ui/Makefile.objs
$(call quiet-command,\
# This has to be kept in sync with Kconfig.host.
MINIKCONF_ARGS = \
$(CONFIG_MINIKCONF_MODE) \
- $@ $*-config.devices.mak.d $< $(MINIKCONF_INPUTS) \
+ $@ $*/config-devices.mak.d $< $(MINIKCONF_INPUTS) \
CONFIG_KVM=$(CONFIG_KVM) \
CONFIG_SPICE=$(CONFIG_SPICE) \
CONFIG_IVSHMEM=$(CONFIG_IVSHMEM) \
$(SOFTMMU_SUBDIR_RULES): $(authz-obj-y)
$(SOFTMMU_SUBDIR_RULES): $(block-obj-y)
+$(SOFTMMU_SUBDIR_RULES): $(chardev-obj-y)
$(SOFTMMU_SUBDIR_RULES): $(crypto-obj-y)
$(SOFTMMU_SUBDIR_RULES): $(io-obj-y)
$(SOFTMMU_SUBDIR_RULES): config-all-devices.mak
subdir-slirp: .git-submodule-status
$(call quiet-command,$(MAKE) -C $(SRC_PATH)/slirp BUILD_DIR="$(BUILD_DIR)/slirp" CC="$(CC)" AR="$(AR)" LD="$(LD)" RANLIB="$(RANLIB)" CFLAGS="$(QEMU_CFLAGS) $(CFLAGS)" LDFLAGS="$(LDFLAGS)")
-$(SUBDIR_RULES): libqemuutil.a $(common-obj-y) $(chardev-obj-y) \
+$(SUBDIR_RULES): libqemuutil.a $(common-obj-y) \
$(qom-obj-y) $(crypto-aes-obj-$(CONFIG_USER_ONLY))
ROMSUBDIR_RULES=$(patsubst %,romsubdir-%, $(ROMS))
rm -f fsdev/*.pod scsi/*.pod
rm -f qemu-img-cmds.h
rm -f ui/shader/*-vert.h ui/shader/*-frag.h
- @# May not be present in GENERATED_FILES
+ @# May not be present in generated-files-y
rm -f trace/generated-tracers-dtrace.dtrace*
rm -f trace/generated-tracers-dtrace.h*
- rm -f $(foreach f,$(GENERATED_FILES),$(f) $(f)-timestamp)
+ rm -f $(foreach f,$(generated-files-y),$(f) $(f)-timestamp)
rm -f qapi-gen-timestamp
rm -rf qga/qapi-generated
for d in $(ALL_SUBDIRS); do \
# rebuilt before other object files
ifneq ($(wildcard config-host.mak),)
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
-Makefile: $(GENERATED_FILES)
+Makefile: $(generated-files-y)
endif
endif
trace-events-subdirs =
trace-events-subdirs += accel/kvm
trace-events-subdirs += accel/tcg
-trace-events-subdirs += audio
+trace-events-subdirs += crypto
+ifeq ($(CONFIG_USER_ONLY),y)
+trace-events-subdirs += linux-user
+endif
+ifeq ($(CONFIG_BLOCK),y)
trace-events-subdirs += authz
trace-events-subdirs += block
+trace-events-subdirs += io
+trace-events-subdirs += nbd
+trace-events-subdirs += scsi
+endif
+ifeq ($(CONFIG_SOFTMMU),y)
trace-events-subdirs += chardev
-trace-events-subdirs += crypto
+trace-events-subdirs += audio
trace-events-subdirs += hw/9pfs
trace-events-subdirs += hw/acpi
trace-events-subdirs += hw/alpha
trace-events-subdirs += hw/block
trace-events-subdirs += hw/block/dataplane
trace-events-subdirs += hw/char
-trace-events-subdirs += hw/display
trace-events-subdirs += hw/dma
trace-events-subdirs += hw/hppa
trace-events-subdirs += hw/i2c
trace-events-subdirs += hw/watchdog
trace-events-subdirs += hw/xen
trace-events-subdirs += hw/gpio
-trace-events-subdirs += io
-trace-events-subdirs += linux-user
trace-events-subdirs += migration
-trace-events-subdirs += nbd
trace-events-subdirs += net
+trace-events-subdirs += ui
+endif
+trace-events-subdirs += hw/display
trace-events-subdirs += qapi
trace-events-subdirs += qom
-trace-events-subdirs += scsi
trace-events-subdirs += target/arm
trace-events-subdirs += target/hppa
trace-events-subdirs += target/i386
trace-events-subdirs += target/riscv
trace-events-subdirs += target/s390x
trace-events-subdirs += target/sparc
-trace-events-subdirs += ui
trace-events-subdirs += util
trace-events-files = $(SRC_PATH)/trace-events $(trace-events-subdirs:%=$(SRC_PATH)/%/trace-events)
obj-y += hw/$(TARGET_BASE_ARCH)/
endif
-GENERATED_FILES += hmp-commands.h hmp-commands-info.h
+generated-files-y += hmp-commands.h hmp-commands-info.h
endif # CONFIG_SOFTMMU
$(INSTALL_DATA) $(QEMU_PROG)-log.stp "$(DESTDIR)$(qemu_datadir)/../systemtap/tapset/$(QEMU_PROG)-log.stp"
endif
-GENERATED_FILES += config-target.h
-Makefile: $(GENERATED_FILES)
+generated-files-y += config-target.h
+Makefile: $(generated-files-y)
return ram_addr;
}
+/*
+ * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
+ * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
+ * be discarded and looked up again (e.g. via tlb_entry()).
+ */
+static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ bool ok;
+
+ /*
+ * This is not a probe, so only valid return is success; failure
+ * should result in exception + longjmp to the cpu loop.
+ */
+ ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
+ assert(ok);
+}
+
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
- int mmu_idx,
- target_ulong addr, uintptr_t retaddr,
- bool recheck, MMUAccessType access_type, int size)
+ int mmu_idx, target_ulong addr, uintptr_t retaddr,
+ MMUAccessType access_type, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
bool locked = false;
MemTxResult r;
- if (recheck) {
- /*
- * This is a TLB_RECHECK access, where the MMU protection
- * covers a smaller range than a target page, and we must
- * repeat the MMU check here. This tlb_fill() call might
- * longjump out if this access should cause a guest exception.
- */
- CPUTLBEntry *entry;
- target_ulong tlb_addr;
-
- tlb_fill(cpu, addr, size, access_type, mmu_idx, retaddr);
-
- entry = tlb_entry(env, mmu_idx, addr);
- tlb_addr = (access_type == MMU_DATA_LOAD ?
- entry->addr_read : entry->addr_code);
- if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
- /* RAM access */
- uintptr_t haddr = addr + entry->addend;
-
- return ldn_p((void *)haddr, size);
- }
- /* Fall through for handling IO accesses */
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
}
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
- int mmu_idx,
- uint64_t val, target_ulong addr,
- uintptr_t retaddr, bool recheck, int size)
+ int mmu_idx, uint64_t val, target_ulong addr,
+ uintptr_t retaddr, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
bool locked = false;
MemTxResult r;
- if (recheck) {
- /*
- * This is a TLB_RECHECK access, where the MMU protection
- * covers a smaller range than a target page, and we must
- * repeat the MMU check here. This tlb_fill() call might
- * longjump out if this access should cause a guest exception.
- */
- CPUTLBEntry *entry;
- target_ulong tlb_addr;
-
- tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
-
- entry = tlb_entry(env, mmu_idx, addr);
- tlb_addr = tlb_addr_write(entry);
- if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
- /* RAM access */
- uintptr_t haddr = addr + entry->addend;
-
- stn_p((void *)haddr, size, val);
- return;
- }
- /* Fall through for handling IO accesses */
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
}
}
+static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
+{
+#if TCG_OVERSIZED_GUEST
+ return *(target_ulong *)((uintptr_t)entry + ofs);
+#else
+ /* ofs might correspond to .addr_write, so use atomic_read */
+ return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
+#endif
+}
+
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
assert_cpu_is_self(ENV_GET_CPU(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
- target_ulong cmp;
-
- /* elt_ofs might correspond to .addr_write, so use atomic_read */
-#if TCG_OVERSIZED_GUEST
- cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
-#else
- cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
-#endif
+ target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
}
}
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t tlb_addr, page;
+ size_t elt_ofs;
+
+ switch (access_type) {
+ case MMU_DATA_LOAD:
+ elt_ofs = offsetof(CPUTLBEntry, addr_read);
+ break;
+ case MMU_DATA_STORE:
+ elt_ofs = offsetof(CPUTLBEntry, addr_write);
+ break;
+ case MMU_INST_FETCH:
+ elt_ofs = offsetof(CPUTLBEntry, addr_code);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ page = addr & TARGET_PAGE_MASK;
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+
+ if (!tlb_hit_page(tlb_addr, page)) {
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+
+ if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
+ CPUState *cs = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+
+ if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
+ /* Non-faulting page table read failed. */
+ return NULL;
+ }
+
+ /* TLB resize via tlb_fill may have moved the entry. */
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+ }
+
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ return NULL;
+ }
+
+ return (void *)((uintptr_t)addr + entry->addend);
+}
+
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
#ifdef TARGET_WORDS_BIGENDIAN
-# define TGT_BE(X) (X)
-# define TGT_LE(X) BSWAP(X)
+#define NEED_BE_BSWAP 0
+#define NEED_LE_BSWAP 1
#else
-# define TGT_BE(X) BSWAP(X)
-# define TGT_LE(X) (X)
+#define NEED_BE_BSWAP 1
+#define NEED_LE_BSWAP 0
#endif
-#define MMUSUFFIX _mmu
+/*
+ * Byte Swap Helper
+ *
+ * This should all dead code away depending on the build host and
+ * access type.
+ */
-#define DATA_SIZE 1
-#include "softmmu_template.h"
+static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
+{
+ if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
+ switch (size) {
+ case 1: return val;
+ case 2: return bswap16(val);
+ case 4: return bswap32(val);
+ case 8: return bswap64(val);
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ return val;
+ }
+}
-#define DATA_SIZE 2
-#include "softmmu_template.h"
+/*
+ * Load Helpers
+ *
+ * We support two different access types. SOFTMMU_CODE_ACCESS is
+ * specifically for reading instructions from system memory. It is
+ * called by the translation loop and in some helpers where the code
+ * is disassembled. It shouldn't be called directly by guest code.
+ */
-#define DATA_SIZE 4
-#include "softmmu_template.h"
+typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
-#define DATA_SIZE 8
-#include "softmmu_template.h"
+static inline uint64_t __attribute__((always_inline))
+load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
+ FullLoadHelper *full_load)
+{
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ const size_t tlb_off = code_read ?
+ offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
+ const MMUAccessType access_type =
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
+ void *haddr;
+ uint64_t res;
+
+ /* Handle CPU specific unaligned behaviour */
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
+ mmu_idx, retaddr);
+ }
+
+ /* If the TLB entry is for a different page, reload and try again. */
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ addr & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), addr, size,
+ access_type, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ if ((addr & (size - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+
+ if (tlb_addr & TLB_RECHECK) {
+ /*
+ * This is a TLB_RECHECK access, where the MMU protection
+ * covers a smaller range than a target page, and we must
+ * repeat the MMU check here. This tlb_fill() call might
+ * longjump out if this access should cause a guest exception.
+ */
+ tlb_fill(ENV_GET_CPU(env), addr, size,
+ access_type, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+
+ tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ tlb_addr &= ~TLB_RECHECK;
+ if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
+ /* RAM access */
+ goto do_aligned_access;
+ }
+ }
+
+ res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
+ retaddr, access_type, size);
+ return handle_bswap(res, size, big_endian);
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ target_ulong addr1, addr2;
+ tcg_target_ulong r1, r2;
+ unsigned shift;
+ do_unaligned_access:
+ addr1 = addr & ~(size - 1);
+ addr2 = addr1 + size;
+ r1 = full_load(env, addr1, oi, retaddr);
+ r2 = full_load(env, addr2, oi, retaddr);
+ shift = (addr & (size - 1)) * 8;
+
+ if (big_endian) {
+ /* Big-endian combine. */
+ res = (r1 << shift) | (r2 >> ((size * 8) - shift));
+ } else {
+ /* Little-endian combine. */
+ res = (r1 >> shift) | (r2 << ((size * 8) - shift));
+ }
+ return res & MAKE_64BIT_MASK(0, size * 8);
+ }
+
+ do_aligned_access:
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+ switch (size) {
+ case 1:
+ res = ldub_p(haddr);
+ break;
+ case 2:
+ if (big_endian) {
+ res = lduw_be_p(haddr);
+ } else {
+ res = lduw_le_p(haddr);
+ }
+ break;
+ case 4:
+ if (big_endian) {
+ res = (uint32_t)ldl_be_p(haddr);
+ } else {
+ res = (uint32_t)ldl_le_p(haddr);
+ }
+ break;
+ case 8:
+ if (big_endian) {
+ res = ldq_be_p(haddr);
+ } else {
+ res = ldq_le_p(haddr);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return res;
+}
+
+/*
+ * For the benefit of TCG generated code, we want to avoid the
+ * complication of ABI-specific return type promotion and always
+ * return a value extended to the register size of the host. This is
+ * tcg_target_long, except in the case of a 32-bit host and 64-bit
+ * data, and for that we always have uint64_t.
+ *
+ * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
+ */
+
+static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, false,
+ full_ldub_mmu);
+}
+
+tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_ldub_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, false,
+ full_le_lduw_mmu);
+}
+
+tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_le_lduw_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, false,
+ full_be_lduw_mmu);
+}
+
+tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_be_lduw_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, false,
+ full_le_ldul_mmu);
+}
+
+tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_le_ldul_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, false,
+ full_be_ldul_mmu);
+}
+
+tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_be_ldul_mmu(env, addr, oi, retaddr);
+}
+
+uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, false, false,
+ helper_le_ldq_mmu);
+}
+
+uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, true, false,
+ helper_be_ldq_mmu);
+}
+
+/*
+ * Provide signed versions of the load routines as well. We can of course
+ * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
+ */
+
+
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
+}
+
+/*
+ * Store Helpers
+ */
+
+static inline void __attribute__((always_inline))
+store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
+{
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ target_ulong tlb_addr = tlb_addr_write(entry);
+ const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
+ void *haddr;
+
+ /* Handle CPU specific unaligned behaviour */
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /* If the TLB entry is for a different page, reload and try again. */
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ addr & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ if ((addr & (size - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+
+ if (tlb_addr & TLB_RECHECK) {
+ /*
+ * This is a TLB_RECHECK access, where the MMU protection
+ * covers a smaller range than a target page, and we must
+ * repeat the MMU check here. This tlb_fill() call might
+ * longjump out if this access should cause a guest exception.
+ */
+ tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+
+ tlb_addr = tlb_addr_write(entry);
+ tlb_addr &= ~TLB_RECHECK;
+ if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
+ /* RAM access */
+ goto do_aligned_access;
+ }
+ }
+
+ io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
+ handle_bswap(val, size, big_endian),
+ addr, retaddr, size);
+ return;
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ int i;
+ uintptr_t index2;
+ CPUTLBEntry *entry2;
+ target_ulong page2, tlb_addr2;
+ do_unaligned_access:
+ /*
+ * Ensure the second page is in the TLB. Note that the first page
+ * is already guaranteed to be filled, and that the second page
+ * cannot evict the first.
+ */
+ page2 = (addr + size) & TARGET_PAGE_MASK;
+ index2 = tlb_index(env, mmu_idx, page2);
+ entry2 = tlb_entry(env, mmu_idx, page2);
+ tlb_addr2 = tlb_addr_write(entry2);
+ if (!tlb_hit_page(tlb_addr2, page2)
+ && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
+ page2 & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /*
+ * XXX: not efficient, but simple.
+ * This loop must go in the forward direction to avoid issues
+ * with self-modifying code in Windows 64-bit.
+ */
+ for (i = 0; i < size; ++i) {
+ uint8_t val8;
+ if (big_endian) {
+ /* Big-endian extract. */
+ val8 = val >> (((size - 1) * 8) - (i * 8));
+ } else {
+ /* Little-endian extract. */
+ val8 = val >> (i * 8);
+ }
+ helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
+ }
+ return;
+ }
+
+ do_aligned_access:
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+ switch (size) {
+ case 1:
+ stb_p(haddr, val);
+ break;
+ case 2:
+ if (big_endian) {
+ stw_be_p(haddr, val);
+ } else {
+ stw_le_p(haddr, val);
+ }
+ break;
+ case 4:
+ if (big_endian) {
+ stl_be_p(haddr, val);
+ } else {
+ stl_le_p(haddr, val);
+ }
+ break;
+ case 8:
+ if (big_endian) {
+ stq_be_p(haddr, val);
+ } else {
+ stq_le_p(haddr, val);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 1, false);
+}
+
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 2, false);
+}
+
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 2, true);
+}
+
+void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 4, false);
+}
+
+void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 4, true);
+}
+
+void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 8, false);
+}
+
+void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 8, true);
+}
/* First set of helpers allows passing in of OI and RETADDR. This makes
them callable from other helpers. */
/* Code access functions. */
-#undef MMUSUFFIX
-#define MMUSUFFIX _cmmu
-#undef GETPC
-#define GETPC() ((uintptr_t)0)
-#define SOFTMMU_CODE_ACCESS
+static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, true,
+ full_ldub_cmmu);
+}
+
+uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_ldub_cmmu(env, addr, oi, retaddr);
+}
-#define DATA_SIZE 1
-#include "softmmu_template.h"
+static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, true,
+ full_le_lduw_cmmu);
+}
-#define DATA_SIZE 2
-#include "softmmu_template.h"
+uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_le_lduw_cmmu(env, addr, oi, retaddr);
+}
-#define DATA_SIZE 4
-#include "softmmu_template.h"
+static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, true,
+ full_be_lduw_cmmu);
+}
-#define DATA_SIZE 8
-#include "softmmu_template.h"
+uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_be_lduw_cmmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, true,
+ full_le_ldul_cmmu);
+}
+
+uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_le_ldul_cmmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, true,
+ full_be_ldul_cmmu);
+}
+
+uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return full_be_ldul_cmmu(env, addr, oi, retaddr);
+}
+
+uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, false, true,
+ helper_le_ldq_cmmu);
+}
+
+uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, true, true,
+ helper_be_ldq_cmmu);
+}
+++ /dev/null
-/*
- * Software MMU support
- *
- * Generate helpers used by TCG for qemu_ld/st ops and code load
- * functions.
- *
- * Included from target op helpers and exec.c.
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#if DATA_SIZE == 8
-#define SUFFIX q
-#define LSUFFIX q
-#define SDATA_TYPE int64_t
-#define DATA_TYPE uint64_t
-#elif DATA_SIZE == 4
-#define SUFFIX l
-#define LSUFFIX l
-#define SDATA_TYPE int32_t
-#define DATA_TYPE uint32_t
-#elif DATA_SIZE == 2
-#define SUFFIX w
-#define LSUFFIX uw
-#define SDATA_TYPE int16_t
-#define DATA_TYPE uint16_t
-#elif DATA_SIZE == 1
-#define SUFFIX b
-#define LSUFFIX ub
-#define SDATA_TYPE int8_t
-#define DATA_TYPE uint8_t
-#else
-#error unsupported data size
-#endif
-
-
-/* For the benefit of TCG generated code, we want to avoid the complication
- of ABI-specific return type promotion and always return a value extended
- to the register size of the host. This is tcg_target_long, except in the
- case of a 32-bit host and 64-bit data, and for that we always have
- uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
-#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
-# define WORD_TYPE DATA_TYPE
-# define USUFFIX SUFFIX
-#else
-# define WORD_TYPE tcg_target_ulong
-# define USUFFIX glue(u, SUFFIX)
-# define SSUFFIX glue(s, SUFFIX)
-#endif
-
-#ifdef SOFTMMU_CODE_ACCESS
-#define READ_ACCESS_TYPE MMU_INST_FETCH
-#define ADDR_READ addr_code
-#else
-#define READ_ACCESS_TYPE MMU_DATA_LOAD
-#define ADDR_READ addr_read
-#endif
-
-#if DATA_SIZE == 8
-# define BSWAP(X) bswap64(X)
-#elif DATA_SIZE == 4
-# define BSWAP(X) bswap32(X)
-#elif DATA_SIZE == 2
-# define BSWAP(X) bswap16(X)
-#else
-# define BSWAP(X) (X)
-#endif
-
-#if DATA_SIZE == 1
-# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
-# define helper_be_ld_name helper_le_ld_name
-# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
-# define helper_be_lds_name helper_le_lds_name
-# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
-# define helper_be_st_name helper_le_st_name
-#else
-# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
-# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
-# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
-# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
-# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
-# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
-#endif
-
-#ifndef SOFTMMU_CODE_ACCESS
-static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
- size_t mmu_idx, size_t index,
- target_ulong addr,
- uintptr_t retaddr,
- bool recheck,
- MMUAccessType access_type)
-{
- CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
- return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
- access_type, DATA_SIZE);
-}
-#endif
-
-WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- uintptr_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = entry->ADDR_READ;
- unsigned a_bits = get_alignment_bits(get_memop(oi));
- uintptr_t haddr;
- DATA_TYPE res;
-
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
- /* If the TLB entry is for a different page, reload and try again. */
- if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
- }
- tlb_addr = entry->ADDR_READ;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK,
- READ_ACCESS_TYPE);
- res = TGT_LE(res);
- return res;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- target_ulong addr1, addr2;
- DATA_TYPE res1, res2;
- unsigned shift;
- do_unaligned_access:
- addr1 = addr & ~(DATA_SIZE - 1);
- addr2 = addr1 + DATA_SIZE;
- res1 = helper_le_ld_name(env, addr1, oi, retaddr);
- res2 = helper_le_ld_name(env, addr2, oi, retaddr);
- shift = (addr & (DATA_SIZE - 1)) * 8;
-
- /* Little-endian combine. */
- res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
- return res;
- }
-
- haddr = addr + entry->addend;
-#if DATA_SIZE == 1
- res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
-#else
- res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
-#endif
- return res;
-}
-
-#if DATA_SIZE > 1
-WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- uintptr_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = entry->ADDR_READ;
- unsigned a_bits = get_alignment_bits(get_memop(oi));
- uintptr_t haddr;
- DATA_TYPE res;
-
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
- /* If the TLB entry is for a different page, reload and try again. */
- if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
- }
- tlb_addr = entry->ADDR_READ;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK,
- READ_ACCESS_TYPE);
- res = TGT_BE(res);
- return res;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- target_ulong addr1, addr2;
- DATA_TYPE res1, res2;
- unsigned shift;
- do_unaligned_access:
- addr1 = addr & ~(DATA_SIZE - 1);
- addr2 = addr1 + DATA_SIZE;
- res1 = helper_be_ld_name(env, addr1, oi, retaddr);
- res2 = helper_be_ld_name(env, addr2, oi, retaddr);
- shift = (addr & (DATA_SIZE - 1)) * 8;
-
- /* Big-endian combine. */
- res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
- return res;
- }
-
- haddr = addr + entry->addend;
- res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
- return res;
-}
-#endif /* DATA_SIZE > 1 */
-
-#ifndef SOFTMMU_CODE_ACCESS
-
-/* Provide signed versions of the load routines as well. We can of course
- avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
-#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
-WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
-}
-
-# if DATA_SIZE > 1
-WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
-}
-# endif
-#endif
-
-static inline void glue(io_write, SUFFIX)(CPUArchState *env,
- size_t mmu_idx, size_t index,
- DATA_TYPE val,
- target_ulong addr,
- uintptr_t retaddr,
- bool recheck)
-{
- CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
- return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
- recheck, DATA_SIZE);
-}
-
-void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- uintptr_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_addr_write(entry);
- unsigned a_bits = get_alignment_bits(get_memop(oi));
- uintptr_t haddr;
-
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* If the TLB entry is for a different page, reload and try again. */
- if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
- mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
- }
- tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- val = TGT_LE(val);
- glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr,
- retaddr, tlb_addr & TLB_RECHECK);
- return;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- int i;
- target_ulong page2;
- CPUTLBEntry *entry2;
- do_unaligned_access:
- /* Ensure the second page is in the TLB. Note that the first page
- is already guaranteed to be filled, and that the second page
- cannot evict the first. */
- page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
- entry2 = tlb_entry(env, mmu_idx, page2);
- if (!tlb_hit_page(tlb_addr_write(entry2), page2)
- && !VICTIM_TLB_HIT(addr_write, page2)) {
- tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* XXX: not efficient, but simple. */
- /* This loop must go in the forward direction to avoid issues
- with self-modifying code in Windows 64-bit. */
- for (i = 0; i < DATA_SIZE; ++i) {
- /* Little-endian extract. */
- uint8_t val8 = val >> (i * 8);
- glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr);
- }
- return;
- }
-
- haddr = addr + entry->addend;
-#if DATA_SIZE == 1
- glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-#else
- glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-#endif
-}
-
-#if DATA_SIZE > 1
-void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- uintptr_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_addr_write(entry);
- unsigned a_bits = get_alignment_bits(get_memop(oi));
- uintptr_t haddr;
-
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* If the TLB entry is for a different page, reload and try again. */
- if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
- mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
- }
- tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- val = TGT_BE(val);
- glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr,
- tlb_addr & TLB_RECHECK);
- return;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- int i;
- target_ulong page2;
- CPUTLBEntry *entry2;
- do_unaligned_access:
- /* Ensure the second page is in the TLB. Note that the first page
- is already guaranteed to be filled, and that the second page
- cannot evict the first. */
- page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
- entry2 = tlb_entry(env, mmu_idx, page2);
- if (!tlb_hit_page(tlb_addr_write(entry2), page2)
- && !VICTIM_TLB_HIT(addr_write, page2)) {
- tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* XXX: not efficient, but simple */
- /* This loop must go in the forward direction to avoid issues
- with self-modifying code. */
- for (i = 0; i < DATA_SIZE; ++i) {
- /* Big-endian extract. */
- uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
- glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr);
- }
- return;
- }
-
- haddr = addr + entry->addend;
- glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
-}
-#endif /* DATA_SIZE > 1 */
-#endif /* !defined(SOFTMMU_CODE_ACCESS) */
-
-#undef READ_ACCESS_TYPE
-#undef DATA_TYPE
-#undef SUFFIX
-#undef LSUFFIX
-#undef DATA_SIZE
-#undef ADDR_READ
-#undef WORD_TYPE
-#undef SDATA_TYPE
-#undef USUFFIX
-#undef SSUFFIX
-#undef BSWAP
-#undef helper_le_ld_name
-#undef helper_be_ld_name
-#undef helper_le_lds_name
-#undef helper_be_lds_name
-#undef helper_le_st_name
-#undef helper_be_st_name
clear_high(d, oprsz, desc);
}
+void HELPER(gvec_abs8)(void *d, void *a, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(int8_t)) {
+ int8_t aa = *(int8_t *)(a + i);
+ *(int8_t *)(d + i) = aa < 0 ? -aa : aa;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_abs16)(void *d, void *a, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(int16_t)) {
+ int16_t aa = *(int16_t *)(a + i);
+ *(int16_t *)(d + i) = aa < 0 ? -aa : aa;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_abs32)(void *d, void *a, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(int32_t)) {
+ int32_t aa = *(int32_t *)(a + i);
+ *(int32_t *)(d + i) = aa < 0 ? -aa : aa;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_abs64)(void *d, void *a, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(int64_t)) {
+ int64_t aa = *(int64_t *)(a + i);
+ *(int64_t *)(d + i) = aa < 0 ? -aa : aa;
+ }
+ clear_high(d, oprsz, desc);
+}
+
void HELPER(gvec_mov)(void *d, void *a, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);
clear_high(d, oprsz, desc);
}
+void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
+ uint8_t sh = *(uint8_t *)(b + i) & 7;
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
+ uint8_t sh = *(uint16_t *)(b + i) & 15;
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
+ uint8_t sh = *(uint32_t *)(b + i) & 31;
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+ uint8_t sh = *(uint64_t *)(b + i) & 63;
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
+ uint8_t sh = *(uint8_t *)(b + i) & 7;
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
+ uint8_t sh = *(uint16_t *)(b + i) & 15;
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
+ uint8_t sh = *(uint32_t *)(b + i) & 31;
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+ uint8_t sh = *(uint64_t *)(b + i) & 63;
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(vec8)) {
+ uint8_t sh = *(uint8_t *)(b + i) & 7;
+ *(int8_t *)(d + i) = *(int8_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(int16_t)) {
+ uint8_t sh = *(uint16_t *)(b + i) & 15;
+ *(int16_t *)(d + i) = *(int16_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(vec32)) {
+ uint8_t sh = *(uint32_t *)(b + i) & 31;
+ *(int32_t *)(d + i) = *(int32_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(vec64)) {
+ uint8_t sh = *(uint64_t *)(b + i) & 63;
+ *(int64_t *)(d + i) = *(int64_t *)(a + i) >> sh;
+ }
+ clear_high(d, oprsz, desc);
+}
+
/* If vectors are enabled, the compiler fills in -1 for true.
Otherwise, we must take care of this by hand. */
#ifdef CONFIG_VECTOR16
DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_abs8, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_abs16, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_abs32, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_abs64, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_3(gvec_not, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_and, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_or, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
{
CPUState *cpu = current_cpu;
CPUClass *cc;
- int ret;
unsigned long address = (unsigned long)info->si_addr;
+ MMUAccessType access_type;
/* We must handle PC addresses from two different sources:
* a call return address and a signal frame address.
are still valid segv ones */
address = h2g_nocheck(address);
- cc = CPU_GET_CLASS(cpu);
- /* see if it is an MMU fault */
- g_assert(cc->handle_mmu_fault);
- ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX);
-
- if (ret == 0) {
- /* The MMU fault was handled without causing real CPU fault.
- * Retain helper_retaddr for a possible second fault.
- */
- return 1;
- }
-
- /* All other paths lead to cpu_exit; clear helper_retaddr
- * for next execution.
+ /*
+ * There is no way the target can handle this other than raising
+ * an exception. Undo signal and retaddr state prior to longjmp.
*/
- helper_retaddr = 0;
-
- if (ret < 0) {
- return 0; /* not an MMU fault */
- }
-
- /* Now we have a real cpu fault. */
- cpu_restore_state(cpu, pc, true);
-
sigprocmask(SIG_SETMASK, old_set, NULL);
- cpu_loop_exit(cpu);
+ helper_retaddr = 0;
- /* never comes here */
- return 1;
+ cc = CPU_GET_CLASS(cpu);
+ access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
+ g_assert_not_reached();
}
#if defined(__i386__)
#include "qemu/osdep.h"
#include "authz/base.h"
-#include "authz/trace.h"
+#include "trace.h"
bool qauthz_is_allowed(QAuthZ *authz,
const char *identity,
#include "qemu/osdep.h"
#include "authz/list.h"
-#include "authz/trace.h"
+#include "trace.h"
#include "qom/object_interfaces.h"
#include "qapi/qapi-visit-authz.h"
#include "qemu/osdep.h"
#include "authz/listfile.h"
-#include "authz/trace.h"
+#include "trace.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
#include "qemu/osdep.h"
#include "authz/pamacct.h"
-#include "authz/trace.h"
+#include "trace.h"
#include "qom/object_interfaces.h"
#include <security/pam_appl.h>
#include "qemu/osdep.h"
#include "authz/simple.h"
-#include "authz/trace.h"
+#include "trace.h"
#include "qom/object_interfaces.h"
static bool qauthz_simple_is_allowed(QAuthZ *authz,
common-obj-$(CONFIG_VHOST_CRYPTO) += cryptodev-vhost-user.o
endif
+common-obj-$(call land,$(CONFIG_VHOST_USER),$(CONFIG_VIRTIO)) += vhost-user.o
+
common-obj-$(CONFIG_LINUX) += hostmem-memfd.o
--- /dev/null
+/*
+ * QEMU vhost-user backend
+ *
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+
+#include "qemu/osdep.h"
+#include "hw/qdev.h"
+#include "qapi/error.h"
+#include "qapi/qmp/qerror.h"
+#include "qemu/error-report.h"
+#include "qom/object_interfaces.h"
+#include "sysemu/vhost-user-backend.h"
+#include "sysemu/kvm.h"
+#include "io/channel-command.h"
+#include "hw/virtio/virtio-bus.h"
+
+static bool
+ioeventfd_enabled(void)
+{
+ return kvm_enabled() && kvm_eventfds_enabled();
+}
+
+int
+vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
+ unsigned nvqs, Error **errp)
+{
+ int ret;
+
+ assert(!b->vdev && vdev);
+
+ if (!ioeventfd_enabled()) {
+ error_setg(errp, "vhost initialization failed: requires kvm");
+ return -1;
+ }
+
+ if (!vhost_user_init(&b->vhost_user, &b->chr, errp)) {
+ return -1;
+ }
+
+ b->vdev = vdev;
+ b->dev.nvqs = nvqs;
+ b->dev.vqs = g_new(struct vhost_virtqueue, nvqs);
+
+ ret = vhost_dev_init(&b->dev, &b->vhost_user, VHOST_BACKEND_TYPE_USER, 0);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vhost initialization failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+vhost_user_backend_start(VhostUserBackend *b)
+{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(b->vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ int ret, i ;
+
+ if (b->started) {
+ return;
+ }
+
+ if (!k->set_guest_notifiers) {
+ error_report("binding does not support guest notifiers");
+ return;
+ }
+
+ ret = vhost_dev_enable_notifiers(&b->dev, b->vdev);
+ if (ret < 0) {
+ return;
+ }
+
+ ret = k->set_guest_notifiers(qbus->parent, b->dev.nvqs, true);
+ if (ret < 0) {
+ error_report("Error binding guest notifier");
+ goto err_host_notifiers;
+ }
+
+ b->dev.acked_features = b->vdev->guest_features;
+ ret = vhost_dev_start(&b->dev, b->vdev);
+ if (ret < 0) {
+ error_report("Error start vhost dev");
+ goto err_guest_notifiers;
+ }
+
+ /* guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < b->dev.nvqs; i++) {
+ vhost_virtqueue_mask(&b->dev, b->vdev,
+ b->dev.vq_index + i, false);
+ }
+
+ b->started = true;
+ return;
+
+err_guest_notifiers:
+ k->set_guest_notifiers(qbus->parent, b->dev.nvqs, false);
+err_host_notifiers:
+ vhost_dev_disable_notifiers(&b->dev, b->vdev);
+}
+
+void
+vhost_user_backend_stop(VhostUserBackend *b)
+{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(b->vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ int ret = 0;
+
+ if (!b->started) {
+ return;
+ }
+
+ vhost_dev_stop(&b->dev, b->vdev);
+
+ if (k->set_guest_notifiers) {
+ ret = k->set_guest_notifiers(qbus->parent,
+ b->dev.nvqs, false);
+ if (ret < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ }
+ }
+ assert(ret >= 0);
+
+ vhost_dev_disable_notifiers(&b->dev, b->vdev);
+ b->started = false;
+}
+
+static void set_chardev(Object *obj, const char *value, Error **errp)
+{
+ VhostUserBackend *b = VHOST_USER_BACKEND(obj);
+ Chardev *chr;
+
+ if (b->completed) {
+ error_setg(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ g_free(b->chr_name);
+ b->chr_name = g_strdup(value);
+
+ chr = qemu_chr_find(b->chr_name);
+ if (chr == NULL) {
+ error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
+ "Chardev '%s' not found", b->chr_name);
+ return;
+ }
+
+ if (!qemu_chr_fe_init(&b->chr, chr, errp)) {
+ return;
+ }
+
+ b->completed = true;
+ /* could call vhost_dev_init() so early message can be exchanged */
+}
+
+static char *get_chardev(Object *obj, Error **errp)
+{
+ VhostUserBackend *b = VHOST_USER_BACKEND(obj);
+ Chardev *chr = qemu_chr_fe_get_driver(&b->chr);
+
+ if (chr && chr->label) {
+ return g_strdup(chr->label);
+ }
+
+ return NULL;
+}
+
+static void vhost_user_backend_init(Object *obj)
+{
+ object_property_add_str(obj, "chardev", get_chardev, set_chardev, NULL);
+}
+
+static void vhost_user_backend_finalize(Object *obj)
+{
+ VhostUserBackend *b = VHOST_USER_BACKEND(obj);
+
+ g_free(b->dev.vqs);
+ g_free(b->chr_name);
+
+ vhost_user_cleanup(&b->vhost_user);
+ qemu_chr_fe_deinit(&b->chr, true);
+}
+
+static const TypeInfo vhost_user_backend_info = {
+ .name = TYPE_VHOST_USER_BACKEND,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(VhostUserBackend),
+ .instance_init = vhost_user_backend_init,
+ .instance_finalize = vhost_user_backend_finalize,
+ .class_size = sizeof(VhostUserBackendClass),
+};
+
+static void register_types(void)
+{
+ type_register_static(&vhost_user_backend_info);
+}
+
+type_init(register_types);
return 0;
}
+static bool bdrv_child_cb_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore, Error **errp)
+{
+ BlockDriverState *bs = child->opaque;
+ return bdrv_can_set_aio_context(bs, ctx, ignore, errp);
+}
+
+static void bdrv_child_cb_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore)
+{
+ BlockDriverState *bs = child->opaque;
+ return bdrv_set_aio_context_ignore(bs, ctx, ignore);
+}
+
/*
* Returns the options and flags that a temporary snapshot should get, based on
* the originally requested flags (the originally requested image will have
.attach = bdrv_child_cb_attach,
.detach = bdrv_child_cb_detach,
.inactivate = bdrv_child_cb_inactivate,
+ .can_set_aio_ctx = bdrv_child_cb_can_set_aio_ctx,
+ .set_aio_ctx = bdrv_child_cb_set_aio_ctx,
};
/*
.attach = bdrv_child_cb_attach,
.detach = bdrv_child_cb_detach,
.inactivate = bdrv_child_cb_inactivate,
+ .can_set_aio_ctx = bdrv_child_cb_can_set_aio_ctx,
+ .set_aio_ctx = bdrv_child_cb_set_aio_ctx,
};
static void bdrv_backing_attach(BdrvChild *c)
.drained_end = bdrv_child_cb_drained_end,
.inactivate = bdrv_child_cb_inactivate,
.update_filename = bdrv_backing_update_filename,
+ .can_set_aio_ctx = bdrv_child_cb_can_set_aio_ctx,
+ .set_aio_ctx = bdrv_child_cb_set_aio_ctx,
};
static int bdrv_open_flags(BlockDriverState *bs, int flags)
GSList *ignore_children, Error **errp);
static void bdrv_child_abort_perm_update(BdrvChild *c);
static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared);
+static void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm,
+ uint64_t *shared_perm);
typedef struct BlockReopenQueueEntry {
bool prepared;
if ((cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) &&
!bdrv_is_writable_after_reopen(bs, q))
{
- error_setg(errp, "Block node is read-only");
+ if (!bdrv_is_writable_after_reopen(bs, NULL)) {
+ error_setg(errp, "Block node is read-only");
+ } else {
+ uint64_t current_perms, current_shared;
+ bdrv_get_cumulative_perm(bs, ¤t_perms, ¤t_shared);
+ if (current_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
+ error_setg(errp, "Cannot make block node read-only, there is "
+ "a writer on it");
+ } else {
+ error_setg(errp, "Cannot make block node read-only and create "
+ "a writer on it");
+ }
+ }
+
return -EPERM;
}
assert(bdrv_op_blocker_is_empty(bs));
assert(!bs->refcnt);
- bdrv_close(bs);
-
/* remove from list, if necessary */
if (bs->node_name[0] != '\0') {
QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
}
QTAILQ_REMOVE(&all_bdrv_states, bs, bs_list);
+ bdrv_close(bs);
+
g_free(bs);
}
g_free(ban);
}
-void bdrv_detach_aio_context(BlockDriverState *bs)
+static void bdrv_detach_aio_context(BlockDriverState *bs)
{
BdrvAioNotifier *baf, *baf_tmp;
- BdrvChild *child;
assert(!bs->walking_aio_notifiers);
bs->walking_aio_notifiers = true;
if (bs->drv && bs->drv->bdrv_detach_aio_context) {
bs->drv->bdrv_detach_aio_context(bs);
}
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_detach_aio_context(child->bs);
- }
if (bs->quiesce_counter) {
aio_enable_external(bs->aio_context);
bs->aio_context = NULL;
}
-void bdrv_attach_aio_context(BlockDriverState *bs,
- AioContext *new_context)
+static void bdrv_attach_aio_context(BlockDriverState *bs,
+ AioContext *new_context)
{
BdrvAioNotifier *ban, *ban_tmp;
- BdrvChild *child;
if (bs->quiesce_counter) {
aio_disable_external(new_context);
bs->aio_context = new_context;
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_attach_aio_context(child->bs, new_context);
- }
if (bs->drv && bs->drv->bdrv_attach_aio_context) {
bs->drv->bdrv_attach_aio_context(bs, new_context);
}
bs->walking_aio_notifiers = false;
}
-/* The caller must own the AioContext lock for the old AioContext of bs, but it
- * must not own the AioContext lock for new_context (unless new_context is
- * the same as the current context of bs). */
-void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
+/* @ignore will accumulate all visited BdrvChild object. The caller is
+ * responsible for freeing the list afterwards. */
+void bdrv_set_aio_context_ignore(BlockDriverState *bs,
+ AioContext *new_context, GSList **ignore)
{
+ BdrvChild *child;
+
if (bdrv_get_aio_context(bs) == new_context) {
return;
}
bdrv_drained_begin(bs);
+
+ QLIST_FOREACH(child, &bs->children, next) {
+ if (g_slist_find(*ignore, child)) {
+ continue;
+ }
+ *ignore = g_slist_prepend(*ignore, child);
+ bdrv_set_aio_context_ignore(child->bs, new_context, ignore);
+ }
+ QLIST_FOREACH(child, &bs->parents, next_parent) {
+ if (g_slist_find(*ignore, child)) {
+ continue;
+ }
+ if (child->role->set_aio_ctx) {
+ *ignore = g_slist_prepend(*ignore, child);
+ child->role->set_aio_ctx(child, new_context, ignore);
+ }
+ }
+
bdrv_detach_aio_context(bs);
/* This function executes in the old AioContext so acquire the new one in
aio_context_release(new_context);
}
+/* The caller must own the AioContext lock for the old AioContext of bs, but it
+ * must not own the AioContext lock for new_context (unless new_context is
+ * the same as the current context of bs). */
+void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
+{
+ GSList *ignore_list = NULL;
+ bdrv_set_aio_context_ignore(bs, new_context, &ignore_list);
+ g_slist_free(ignore_list);
+}
+
+static bool bdrv_parent_can_set_aio_context(BdrvChild *c, AioContext *ctx,
+ GSList **ignore, Error **errp)
+{
+ if (g_slist_find(*ignore, c)) {
+ return true;
+ }
+ *ignore = g_slist_prepend(*ignore, c);
+
+ /* A BdrvChildRole that doesn't handle AioContext changes cannot
+ * tolerate any AioContext changes */
+ if (!c->role->can_set_aio_ctx) {
+ char *user = bdrv_child_user_desc(c);
+ error_setg(errp, "Changing iothreads is not supported by %s", user);
+ g_free(user);
+ return false;
+ }
+ if (!c->role->can_set_aio_ctx(c, ctx, ignore, errp)) {
+ assert(!errp || *errp);
+ return false;
+ }
+ return true;
+}
+
+bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx,
+ GSList **ignore, Error **errp)
+{
+ if (g_slist_find(*ignore, c)) {
+ return true;
+ }
+ *ignore = g_slist_prepend(*ignore, c);
+ return bdrv_can_set_aio_context(c->bs, ctx, ignore, errp);
+}
+
+/* @ignore will accumulate all visited BdrvChild object. The caller is
+ * responsible for freeing the list afterwards. */
+bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx,
+ GSList **ignore, Error **errp)
+{
+ BdrvChild *c;
+
+ if (bdrv_get_aio_context(bs) == ctx) {
+ return true;
+ }
+
+ QLIST_FOREACH(c, &bs->parents, next_parent) {
+ if (!bdrv_parent_can_set_aio_context(c, ctx, ignore, errp)) {
+ return false;
+ }
+ }
+ QLIST_FOREACH(c, &bs->children, next) {
+ if (!bdrv_child_can_set_aio_context(c, ctx, ignore, errp)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp)
+{
+ GSList *ignore;
+ bool ret;
+
+ ignore = ignore_child ? g_slist_prepend(NULL, ignore_child) : NULL;
+ ret = bdrv_can_set_aio_context(bs, ctx, &ignore, errp);
+ g_slist_free(ignore);
+
+ if (!ret) {
+ return -EPERM;
+ }
+
+ ignore = ignore_child ? g_slist_prepend(NULL, ignore_child) : NULL;
+ bdrv_set_aio_context_ignore(bs, ctx, &ignore);
+ g_slist_free(ignore);
+
+ return 0;
+}
+
+int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
+ Error **errp)
+{
+ return bdrv_child_try_set_aio_context(bs, ctx, NULL, errp);
+}
+
void bdrv_add_aio_context_notifier(BlockDriverState *bs,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque)
s->target = NULL;
}
-static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
-{
- BackupBlockJob *s = container_of(job, BackupBlockJob, common);
-
- blk_set_aio_context(s->target, aio_context);
-}
-
void backup_do_checkpoint(BlockJob *job, Error **errp)
{
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
.abort = backup_abort,
.clean = backup_clean,
},
- .attached_aio_context = backup_attached_aio_context,
.drain = backup_drain,
};
uint64_t shared_perm;
bool disable_perm;
+ bool allow_aio_context_change;
bool allow_write_beyond_eof;
NotifierList remove_bs_notifiers, insert_bs_notifiers;
static void blk_root_change_media(BdrvChild *child, bool load);
static void blk_root_resize(BdrvChild *child);
+static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore, Error **errp);
+static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore);
+
static char *blk_root_get_parent_desc(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
.attach = blk_root_attach,
.detach = blk_root_detach,
+
+ .can_set_aio_ctx = blk_root_can_set_aio_ctx,
+ .set_aio_ctx = blk_root_set_aio_ctx,
};
/*
blk->allow_write_beyond_eof = allow;
}
+void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
+{
+ blk->allow_aio_context_change = allow;
+}
+
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size)
{
return blk_get_aio_context(blk_acb->blk);
}
-void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
+static void blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ bool update_root_node)
{
BlockDriverState *bs = blk_bs(blk);
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
throttle_group_attach_aio_context(tgm, new_context);
bdrv_drained_end(bs);
}
- bdrv_set_aio_context(bs, new_context);
+ if (update_root_node) {
+ GSList *ignore = g_slist_prepend(NULL, blk->root);
+ bdrv_set_aio_context_ignore(bs, new_context, &ignore);
+ g_slist_free(ignore);
+ }
}
}
+void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
+{
+ blk_do_set_aio_context(blk, new_context, true);
+}
+
+static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore, Error **errp)
+{
+ BlockBackend *blk = child->opaque;
+
+ if (blk->allow_aio_context_change) {
+ return true;
+ }
+
+ /* Only manually created BlockBackends that are not attached to anything
+ * can change their AioContext without updating their user. */
+ if (!blk->name || blk->dev) {
+ /* TODO Add BB name/QOM path */
+ error_setg(errp, "Cannot change iothread of active block backend");
+ return false;
+ }
+
+ return true;
+}
+
+static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore)
+{
+ BlockBackend *blk = child->opaque;
+ blk_do_set_aio_context(blk, ctx, false);
+}
+
void blk_add_aio_context_notifier(BlockBackend *blk,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque)
*
*/
-#ifndef BLOCK_CRYPTO_H__
-#define BLOCK_CRYPTO_H__
+#ifndef BLOCK_CRYPTO_H
+#define BLOCK_CRYPTO_H
#define BLOCK_CRYPTO_OPT_DEF_KEY_SECRET(prefix, helpstr) \
{ \
QCryptoBlockOpenOptions *
block_crypto_open_opts_init(QDict *opts, Error **errp);
-#endif /* BLOCK_CRYPTO_H__ */
+#endif /* BLOCK_CRYPTO_H */
#ifdef CONFIG_XFS
static int xfs_write_zeroes(BDRVRawState *s, int64_t offset, uint64_t bytes)
{
+ int64_t len;
struct xfs_flock64 fl;
int err;
+ len = lseek(s->fd, 0, SEEK_END);
+ if (len < 0) {
+ return -errno;
+ }
+
+ if (offset + bytes > len) {
+ /* XFS_IOC_ZERO_RANGE does not increase the file length */
+ if (ftruncate(s->fd, offset + bytes) < 0) {
+ return -errno;
+ }
+ }
+
memset(&fl, 0, sizeof(fl));
fl.l_whence = SEEK_SET;
fl.l_start = offset;
off_t data = 0, hole = 0;
int ret;
+ assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
+
ret = fd_open(bs);
if (ret < 0) {
return ret;
/* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
*pnum = MIN(bytes, hole - offset);
+
+ /*
+ * We are not allowed to return partial sectors, though, so
+ * round up if necessary.
+ */
+ if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
+ int64_t file_length = raw_getlength(bs);
+ if (file_length > 0) {
+ /* Ignore errors, this is just a safeguard */
+ assert(hole == file_length);
+ }
+ *pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
+ }
+
ret = BDRV_BLOCK_DATA;
} else {
/* On a hole, compute bytes to the beginning of the next extent. */
static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
size_t size)
{
- if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
+ if (size > BDRV_REQUEST_MAX_BYTES) {
return -EIO;
}
return rwco.ret;
}
-/*
- * Process a synchronous request using coroutines
- */
-static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
- int nb_sectors, bool is_write, BdrvRequestFlags flags)
-{
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf,
- nb_sectors * BDRV_SECTOR_SIZE);
-
- if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
- return -EINVAL;
- }
-
- return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
- &qiov, is_write, flags);
-}
-
-/* return < 0 if error. See bdrv_write() for the return codes */
-int bdrv_read(BdrvChild *child, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
-{
- return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
-}
-
-/* Return < 0 if error. Important errors are:
- -EIO generic I/O error (may happen for all errors)
- -ENOMEDIUM No media inserted.
- -EINVAL Invalid sector number or nb_sectors
- -EACCES Trying to write a read-only device
-*/
-int bdrv_write(BdrvChild *child, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
-{
- return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
-}
-
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int bytes, BdrvRequestFlags flags)
{
return qiov->size;
}
+/* See bdrv_pwrite() for the return codes */
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
return qiov->size;
}
+/* Return no. of bytes on success or < 0 on error. Important errors are:
+ -EIO generic I/O error (may happen for all errors)
+ -ENOMEDIUM No media inserted.
+ -EINVAL Invalid offset or number of bytes
+ -EACCES Trying to write a read-only device
+*/
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
- assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
+ assert(bytes <= BDRV_REQUEST_MAX_BYTES);
assert(drv->bdrv_co_readv);
return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
- assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
+ assert(bytes <= BDRV_REQUEST_MAX_BYTES);
assert(drv->bdrv_co_writev);
ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
return !!s->in_flight;
}
-static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
-{
- MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
-
- blk_set_aio_context(s->target, new_context);
-}
-
static void mirror_drain(BlockJob *job)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
.complete = mirror_complete,
},
.drained_poll = mirror_drained_poll,
- .attached_aio_context = mirror_attached_aio_context,
.drain = mirror_drain,
};
.complete = mirror_complete,
},
.drained_poll = mirror_drained_poll,
- .attached_aio_context = mirror_attached_aio_context,
.drain = mirror_drain,
};
* ensure that. */
blk_set_force_allow_inactivate(s->target);
}
+ blk_set_allow_aio_context_change(s->target, true);
s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;
return cluster_offset;
}
- nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
- (cluster_offset >> 9);
+ nb_csectors =
+ (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE -
+ (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE);
cluster_offset |= QCOW_OFLAG_COMPRESSED |
((uint64_t)nb_csectors << s->csize_shift);
switch (ctype) {
case QCOW2_CLUSTER_COMPRESSED:
{
- int nb_csectors;
- nb_csectors = ((l2_entry >> s->csize_shift) &
- s->csize_mask) + 1;
- qcow2_free_clusters(bs,
- (l2_entry & s->cluster_offset_mask) & ~511,
- nb_csectors * 512, type);
+ int64_t offset = (l2_entry & s->cluster_offset_mask)
+ & QCOW2_COMPRESSED_SECTOR_MASK;
+ int size = QCOW2_COMPRESSED_SECTOR_SIZE *
+ (((l2_entry >> s->csize_shift) & s->csize_mask) + 1);
+ qcow2_free_clusters(bs, offset, size, type);
}
break;
case QCOW2_CLUSTER_NORMAL:
nb_csectors = ((entry >> s->csize_shift) &
s->csize_mask) + 1;
if (addend != 0) {
+ uint64_t coffset = (entry & s->cluster_offset_mask)
+ & QCOW2_COMPRESSED_SECTOR_MASK;
ret = update_refcount(
- bs, (entry & s->cluster_offset_mask) & ~511,
- nb_csectors * 512, abs(addend), addend < 0,
+ bs, coffset,
+ nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE,
+ abs(addend), addend < 0,
QCOW2_DISCARD_SNAPSHOT);
if (ret < 0) {
goto fail;
nb_csectors = ((l2_entry >> s->csize_shift) &
s->csize_mask) + 1;
l2_entry &= s->cluster_offset_mask;
- ret = qcow2_inc_refcounts_imrt(bs, res,
- refcount_table, refcount_table_size,
- l2_entry & ~511, nb_csectors * 512);
+ ret = qcow2_inc_refcounts_imrt(
+ bs, res, refcount_table, refcount_table_size,
+ l2_entry & QCOW2_COMPRESSED_SECTOR_MASK,
+ nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE);
if (ret < 0) {
goto fail;
}
on_disk_refblock = (void *)((char *) *refcount_table +
refblock_index * s->cluster_size);
- ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
- on_disk_refblock, s->cluster_sectors);
+ ret = bdrv_pwrite(bs->file, refblock_offset, on_disk_refblock,
+ s->cluster_size);
if (ret < 0) {
fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
goto fail;
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
- s->cluster_sectors = 1 << (s->cluster_bits - BDRV_SECTOR_BITS);
/* Initialise version 3 header fields */
if (header.version == 2) {
coffset = file_cluster_offset & s->cluster_offset_mask;
nb_csectors = ((file_cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
- csize = nb_csectors * 512 - (coffset & 511);
+ csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
+ (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK);
buf = g_try_malloc(csize);
if (!buf) {
#define MIN_CLUSTER_BITS 9
#define MAX_CLUSTER_BITS 21
+/* Defined in the qcow2 spec (compressed cluster descriptor) */
+#define QCOW2_COMPRESSED_SECTOR_SIZE 512U
+#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1))
+
/* Must be at least 2 to cover COW */
#define MIN_L2_CACHE_SIZE 2 /* cache entries */
typedef struct BDRVQcow2State {
int cluster_bits;
int cluster_size;
- int cluster_sectors;
int l2_slice_size;
int l2_bits;
int l2_size;
uint64_t unused2[7];
} QEMU_PACKED VdiHeader;
+QEMU_BUILD_BUG_ON(sizeof(VdiHeader) != 512);
+
typedef struct {
/* The block map entries are little endian (even in memory). */
uint32_t *bmap;
logout("\n");
- ret = bdrv_read(bs->file, 0, (uint8_t *)&header, 1);
+ ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
goto fail;
}
goto fail;
}
- ret = bdrv_read(bs->file, s->bmap_sector, (uint8_t *)s->bmap,
- bmap_size);
+ ret = bdrv_pread(bs->file, header.offset_bmap, s->bmap,
+ bmap_size * SECTOR_SIZE);
if (ret < 0) {
goto fail_free_bmap;
}
assert(VDI_IS_ALLOCATED(bmap_first));
*header = s->header;
vdi_header_to_le(header);
- ret = bdrv_write(bs->file, 0, block, 1);
+ ret = bdrv_pwrite(bs->file, 0, block, sizeof(VdiHeader));
g_free(block);
block = NULL;
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
logout("will write %u block map sectors starting from entry %u\n",
n_sectors, bmap_first);
- ret = bdrv_write(bs->file, offset, base, n_sectors);
+ ret = bdrv_pwrite(bs->file, offset * SECTOR_SIZE, base,
+ n_sectors * SECTOR_SIZE);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
DLOG(fprintf(stderr, "sectors %" PRId64 "+%" PRId64
" allocated\n", sector_num,
n >> BDRV_SECTOR_BITS));
- if (bdrv_read(s->qcow, sector_num, buf + i * 0x200,
- n >> BDRV_SECTOR_BITS)) {
+ if (bdrv_pread(s->qcow, sector_num * BDRV_SECTOR_SIZE,
+ buf + i * 0x200, n) < 0) {
return -1;
}
i += (n >> BDRV_SECTOR_BITS) - 1;
if (res) {
return -1;
}
- res = bdrv_write(s->qcow, offset, s->cluster_buffer, 1);
- if (res) {
+ res = bdrv_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE,
+ s->cluster_buffer, BDRV_SECTOR_SIZE);
+ if (res < 0) {
return -2;
}
}
* Use qcow backend. Commit later.
*/
DLOG(fprintf(stderr, "Write to qcow backend: %d + %d\n", (int)sector_num, nb_sectors));
- ret = bdrv_write(s->qcow, sector_num, buf, nb_sectors);
+ ret = bdrv_pwrite(s->qcow, sector_num * BDRV_SECTOR_SIZE, buf,
+ nb_sectors * BDRV_SECTOR_SIZE);
if (ret < 0) {
fprintf(stderr, "Error writing to qcow backend\n");
return ret;
}
}
-static void block_job_attached_aio_context(AioContext *new_context,
- void *opaque);
-static void block_job_detach_aio_context(void *opaque);
-
void block_job_free(Job *job)
{
BlockJob *bjob = container_of(job, BlockJob, job);
bs->job = NULL;
block_job_remove_all_bdrv(bjob);
- blk_remove_aio_context_notifier(bjob->blk,
- block_job_attached_aio_context,
- block_job_detach_aio_context, bjob);
blk_unref(bjob->blk);
error_free(bjob->blocker);
}
-static void block_job_attached_aio_context(AioContext *new_context,
- void *opaque)
-{
- BlockJob *job = opaque;
- const JobDriver *drv = job->job.driver;
- BlockJobDriver *bjdrv = container_of(drv, BlockJobDriver, job_driver);
-
- job->job.aio_context = new_context;
- if (bjdrv->attached_aio_context) {
- bjdrv->attached_aio_context(job, new_context);
- }
-
- job_resume(&job->job);
-}
-
void block_job_drain(Job *job)
{
BlockJob *bjob = container_of(job, BlockJob, job);
}
}
-static void block_job_detach_aio_context(void *opaque)
-{
- BlockJob *job = opaque;
-
- /* In case the job terminates during aio_poll()... */
- job_ref(&job->job);
-
- job_pause(&job->job);
-
- while (!job->job.paused && !job_is_completed(&job->job)) {
- job_drain(&job->job);
- }
-
- job->job.aio_context = NULL;
- job_unref(&job->job);
-}
-
static char *child_job_get_parent_desc(BdrvChild *c)
{
BlockJob *job = c->opaque;
job_resume(&job->job);
}
+static bool child_job_can_set_aio_ctx(BdrvChild *c, AioContext *ctx,
+ GSList **ignore, Error **errp)
+{
+ BlockJob *job = c->opaque;
+ GSList *l;
+
+ for (l = job->nodes; l; l = l->next) {
+ BdrvChild *sibling = l->data;
+ if (!bdrv_child_can_set_aio_context(sibling, ctx, ignore, errp)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx,
+ GSList **ignore)
+{
+ BlockJob *job = c->opaque;
+ GSList *l;
+
+ for (l = job->nodes; l; l = l->next) {
+ BdrvChild *sibling = l->data;
+ if (g_slist_find(*ignore, sibling)) {
+ continue;
+ }
+ *ignore = g_slist_prepend(*ignore, sibling);
+ bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore);
+ }
+
+ job->job.aio_context = ctx;
+}
+
static const BdrvChildRole child_job = {
.get_parent_desc = child_job_get_parent_desc,
.drained_begin = child_job_drained_begin,
.drained_poll = child_job_drained_poll,
.drained_end = child_job_drained_end,
+ .can_set_aio_ctx = child_job_can_set_aio_ctx,
+ .set_aio_ctx = child_job_set_aio_ctx,
.stay_at_node = true,
};
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
- blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
- block_job_detach_aio_context, job);
+ blk_set_allow_aio_context_change(blk, true);
/* Only set speed when necessary to avoid NotSupported error */
if (speed != 0) {
fi
# Remove old dependency files to make sure that they get properly regenerated
-rm -f *-config-devices.mak.d
+rm -f */config-devices.mak.d
if test -z "$python"
then
# Probe for guest agent support/options
if [ "$guest_agent" != "no" ]; then
- if [ "$linux" = "yes" -o "$bsd" = "yes" -o "$solaris" = "yes" -o "$mingw32" = "yes" ] ; then
+ if [ "$softmmu" = no -a "$want_tools" = no ] ; then
+ guest_agent=no
+ elif [ "$linux" = "yes" -o "$bsd" = "yes" -o "$solaris" = "yes" -o "$mingw32" = "yes" ] ; then
tools="qemu-ga $tools"
guest_agent=yes
elif [ "$guest_agent" != yes ]; then
*
*/
-#include <inttypes.h>
-
#include "qemu/osdep.h"
+
#include "err.h"
#include "addrspace.h"
#include "pe.h"
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
-#include <inttypes.h>
-
#include "qemu/osdep.h"
+
#include "pdb.h"
#include "err.h"
*
*/
-#ifndef ELF2DMP_ELF_H
-#define ELF2DMP_ELF_H
+#ifndef EMPF2DMP_QEMU_ELF_H
+#define EMPF2DMP_QEMU_ELF_H
#include "elf.h"
Elf64_Phdr *elf64_getphdr(void *map);
Elf64_Half elf_getphdrnum(void *map);
-#endif /* ELF2DMP_ELF_H */
+#endif /* ELF2DMP_QEMU_ELF_H */
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
{
int i;
- VhostUserMemory *memory = &vmsg->payload.memory;
+ VhostUserMemory m = vmsg->payload.memory, *memory = &m;
dev->nregions = memory->nregions;
DPRINT("Nregions: %d\n", memory->nregions);
vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
{
int i;
- VhostUserMemory *memory = &vmsg->payload.memory;
+ VhostUserMemory m = vmsg->payload.memory, *memory = &m;
for (i = 0; i < dev->nregions; i++) {
VuDevRegion *r = &dev->regions[i];
static bool
vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
{
- struct vhost_vring_addr *vra = &vmsg->payload.addr;
+ struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr;
unsigned int index = vra->index;
VuVirtq *vq = &dev->vq[index];
features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
}
+ if (dev->iface->get_config && dev->iface->set_config) {
+ features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
+ }
+
if (dev->iface->get_protocol_features) {
features |= dev->iface->get_protocol_features(dev);
}
*/
#include "qemu/osdep.h"
-#include "sys/poll.h"
-#include "sys/ioctl.h"
-#include "pthread.h"
-#include "syslog.h"
-
-#include "infiniband/verbs.h"
-#include "infiniband/umad.h"
-#include "infiniband/umad_types.h"
-#include "infiniband/umad_sa.h"
-#include "infiniband/umad_cm.h"
+#include <sys/poll.h>
+#include <sys/ioctl.h>
+#include <pthread.h>
+#include <syslog.h>
+
+#include <infiniband/verbs.h>
+#include <infiniband/umad.h>
+#include <infiniband/umad_types.h>
+#include <infiniband/umad_sa.h>
+#include <infiniband/umad_cm.h>
#include "rdmacm-mux.h"
#define RDMACM_MUX_H
#include "linux/if.h"
-#include "infiniband/verbs.h"
-#include "infiniband/umad.h"
-#include "rdma/rdma_user_cm.h"
+#include <infiniband/verbs.h>
+#include <infiniband/umad.h>
+#include <rdma/rdma_user_cm.h>
typedef enum RdmaCmMuxMsgType {
RDMACM_MUX_MSG_TYPE_REQ = 0,
# We support all the 32 bit boards so need all their config
include arm-softmmu.mak
-CONFIG_AUX=y
-CONFIG_DDC=y
-CONFIG_DPCD=y
-CONFIG_XLNX_ZYNQMP=y
CONFIG_XLNX_ZYNQMP_ARM=y
CONFIG_XLNX_VERSAL=y
-CONFIG_ARM_SMMUV3=y
# Default configuration for arm-softmmu
-CONFIG_PCI=y
-CONFIG_PCI_DEVICES=y
-CONFIG_PCI_TESTDEV=y
-CONFIG_VGA=y
-CONFIG_NAND=y
-CONFIG_ECC=y
-CONFIG_SERIAL=y
-CONFIG_MAX7310=y
-CONFIG_WM8750=y
-CONFIG_TWL92230=y
-CONFIG_TSC2005=y
-CONFIG_LM832X=y
-CONFIG_TMP105=y
-CONFIG_TMP421=y
-CONFIG_PCA9552=y
-CONFIG_STELLARIS=y
-CONFIG_STELLARIS_INPUT=y
-CONFIG_STELLARIS_ENET=y
-CONFIG_SSD0303=y
-CONFIG_SSD0323=y
-CONFIG_DDC=y
-CONFIG_SII9022=y
-CONFIG_ADS7846=y
-CONFIG_MAX111X=y
-CONFIG_SSI_SD=y
-CONFIG_SSI_M25P80=y
-CONFIG_LAN9118=y
-CONFIG_SMC91C111=y
-CONFIG_ALLWINNER_EMAC=y
-CONFIG_IMX_FEC=y
-CONFIG_FTGMAC100=y
-CONFIG_DS1338=y
-CONFIG_PFLASH_CFI01=y
-CONFIG_PFLASH_CFI02=y
-CONFIG_MICRODRIVE=y
-CONFIG_USB_MUSB=y
-CONFIG_USB_EHCI_SYSBUS=y
-CONFIG_PLATFORM_BUS=y
-CONFIG_VIRTIO_MMIO=y
-
-CONFIG_ARM11MPCORE=y
-CONFIG_A9MPCORE=y
-CONFIG_A15MPCORE=y
-
+# TODO: ARM_V7M is currently always required - make this more flexible!
CONFIG_ARM_V7M=y
-CONFIG_NETDUINO2=y
-CONFIG_ARM_GIC=y
-CONFIG_ARM_TIMER=y
-CONFIG_ARM_MPTIMER=y
-CONFIG_A9_GTIMER=y
-CONFIG_PL011=y
-CONFIG_PL022=y
-CONFIG_PL031=y
-CONFIG_PL041=y
-CONFIG_PL050=y
-CONFIG_PL061=y
-CONFIG_PL080=y
-CONFIG_PL110=y
-CONFIG_PL181=y
-CONFIG_PL190=y
-CONFIG_PL310=y
-CONFIG_PL330=y
-CONFIG_CADENCE=y
-CONFIG_XGMAC=y
-CONFIG_EXYNOS4=y
-CONFIG_PXA2XX=y
-CONFIG_BITBANG_I2C=y
-CONFIG_FRAMEBUFFER=y
-CONFIG_XILINX_SPIPS=y
-CONFIG_ZYNQ_DEVCFG=y
+# CONFIG_PCI_DEVICES=n
+# CONFIG_TEST_DEVICES=n
-CONFIG_ARM11SCU=y
-CONFIG_A9SCU=y
-CONFIG_DIGIC=y
-CONFIG_MARVELL_88W8618=y
-CONFIG_OMAP=y
-CONFIG_TSC210X=y
-CONFIG_BLIZZARD=y
-CONFIG_ONENAND=y
-CONFIG_TUSB6010=y
-CONFIG_IMX=y
-CONFIG_MAINSTONE=y
-CONFIG_MPS2=y
+CONFIG_ARM_VIRT=y
+CONFIG_CUBIEBOARD=y
+CONFIG_EXYNOS4=y
+CONFIG_HIGHBANK=y
+CONFIG_INTEGRATOR=y
+CONFIG_FSL_IMX31=y
+CONFIG_MUSICPAL=y
CONFIG_MUSCA=y
+CONFIG_CHEETAH=y
+CONFIG_SX1=y
CONFIG_NSERIES=y
-CONFIG_RASPI=y
+CONFIG_STELLARIS=y
CONFIG_REALVIEW=y
-CONFIG_ZAURUS=y
-CONFIG_ZYNQ=y
-CONFIG_STM32F2XX_TIMER=y
-CONFIG_STM32F2XX_USART=y
-CONFIG_STM32F2XX_SYSCFG=y
-CONFIG_STM32F2XX_ADC=y
-CONFIG_STM32F2XX_SPI=y
-CONFIG_STM32F205_SOC=y
-CONFIG_NRF51_SOC=y
-
-CONFIG_CMSDK_APB_TIMER=y
-CONFIG_CMSDK_APB_DUALTIMER=y
-CONFIG_CMSDK_APB_UART=y
-CONFIG_CMSDK_APB_WATCHDOG=y
-
-CONFIG_MPS2_FPGAIO=y
-CONFIG_MPS2_SCC=y
-
-CONFIG_TZ_MPC=y
-CONFIG_TZ_MSC=y
-CONFIG_TZ_PPC=y
-CONFIG_ARMSSE=y
-CONFIG_IOTKIT_SECCTL=y
-CONFIG_IOTKIT_SYSCTL=y
-CONFIG_IOTKIT_SYSINFO=y
-CONFIG_ARMSSE_CPUID=y
-CONFIG_ARMSSE_MHU=y
-
CONFIG_VERSATILE=y
-CONFIG_VERSATILE_PCI=y
-CONFIG_VERSATILE_I2C=y
-
-CONFIG_PCI_EXPRESS=y
-CONFIG_PCI_EXPRESS_GENERIC_BRIDGE=y
-
-CONFIG_SDHCI=y
-CONFIG_INTEGRATOR=y
-CONFIG_INTEGRATOR_DEBUG=y
-
-CONFIG_ALLWINNER_A10_PIT=y
-CONFIG_ALLWINNER_A10_PIC=y
-CONFIG_ALLWINNER_A10=y
-
-CONFIG_FSL_IMX6=y
-CONFIG_FSL_IMX31=y
+CONFIG_VEXPRESS=y
+CONFIG_ZYNQ=y
+CONFIG_MAINSTONE=y
+CONFIG_GUMSTIX=y
+CONFIG_SPITZ=y
+CONFIG_TOSA=y
+CONFIG_Z2=y
+CONFIG_COLLIE=y
+CONFIG_ASPEED_SOC=y
+CONFIG_NETDUINO2=y
+CONFIG_MPS2=y
+CONFIG_RASPI=y
+CONFIG_DIGIC=y
+CONFIG_SABRELITE=y
+CONFIG_EMCRAFT_SF2=y
+CONFIG_MICROBIT=y
CONFIG_FSL_IMX25=y
CONFIG_FSL_IMX7=y
CONFIG_FSL_IMX6UL=y
-
-CONFIG_IMX_I2C=y
-
-CONFIG_PCIE_PORT=y
-CONFIG_XIO3130=y
-CONFIG_IOH3420=y
-CONFIG_I82801B11=y
-CONFIG_ACPI=y
-CONFIG_ARM_VIRT=y
-CONFIG_SMBIOS=y
-CONFIG_ASPEED_SOC=y
-CONFIG_SMBUS_EEPROM=y
-CONFIG_GPIO_KEY=y
-CONFIG_MSF2=y
-CONFIG_FW_CFG_DMA=y
-CONFIG_XILINX_AXI=y
-CONFIG_PCI_EXPRESS_DESIGNWARE=y
-
-CONFIG_STRONGARM=y
-CONFIG_HIGHBANK=y
-CONFIG_MUSICPAL=y
-
-# for realview and versatilepb
-CONFIG_LSI_SCSI_PCI=y
*
*/
-#ifndef NANOMIPS_DISASSEMBLER_H
-#define NANOMIPS_DISASSEMBLER_H
+#ifndef DISAS_NANOMIPS_H
+#define DISAS_NANOMIPS_H
#include <string>
variable::
MINIKCONF_ARGS = \
- $@ $*-config.devices.mak.d $< $(MINIKCONF_INPUTS) \
+ $@ $*/config-devices.mak.d $< $(MINIKCONF_INPUTS) \
CONFIG_KVM=$(CONFIG_KVM) \
CONFIG_SPICE=$(CONFIG_SPICE) \
CONFIG_TPM=$(CONFIG_TPM) \
int (*renameat)(FsContext *ctx, V9fsPath *olddir, const char *old_name,
V9fsPath *newdir, const char *new_name);
int (*unlinkat)(FsContext *ctx, V9fsPath *dir, const char *name, int flags);
- void *opaque;
};
#endif
*
*/
-#ifndef _FSDEV_THROTTLE_H
-#define _FSDEV_THROTTLE_H
+#ifndef QEMU_FSDEV_THROTTLE_H
+#define QEMU_FSDEV_THROTTLE_H
#include "block/aio.h"
#include "qemu/main-loop.h"
struct iovec *, int);
void fsdev_throttle_cleanup(FsThrottle *);
-#endif /* _FSDEV_THROTTLE_H */
+
+#endif /* QEMU_FSDEV_THROTTLE_H */
#include "qemu/error-report.h"
#include "qemu/option.h"
+/*
+ * A table to store the various file systems and their callback operations.
+ * -----------------
+ * fstype | ops
+ * -----------------
+ * local | local_ops
+ * . |
+ * . |
+ * . |
+ * . |
+ * -----------------
+ * etc
+ */
+typedef struct FsDriverTable {
+ const char *name;
+ FileOperations *ops;
+ const char **opts;
+} FsDriverTable;
+
+typedef struct FsDriverListEntry {
+ FsDriverEntry fse;
+ QTAILQ_ENTRY(FsDriverListEntry) next;
+} FsDriverListEntry;
+
static QTAILQ_HEAD(, FsDriverListEntry) fsdriver_entries =
QTAILQ_HEAD_INITIALIZER(fsdriver_entries);
+#define COMMON_FS_DRIVER_OPTIONS "id", "fsdriver", "readonly"
+
static FsDriverTable FsDrivers[] = {
- { .name = "local", .ops = &local_ops},
- { .name = "synth", .ops = &synth_ops},
- { .name = "proxy", .ops = &proxy_ops},
+ {
+ .name = "local",
+ .ops = &local_ops,
+ .opts = (const char * []) {
+ COMMON_FS_DRIVER_OPTIONS,
+ "security_model",
+ "path",
+ "writeout",
+ "fmode",
+ "dmode",
+ "throttling.bps-total",
+ "throttling.bps-read",
+ "throttling.bps-write",
+ "throttling.iops-total",
+ "throttling.iops-read",
+ "throttling.iops-write",
+ "throttling.bps-total-max",
+ "throttling.bps-read-max",
+ "throttling.bps-write-max",
+ "throttling.iops-total-max",
+ "throttling.iops-read-max",
+ "throttling.iops-write-max",
+ "throttling.bps-total-max-length",
+ "throttling.bps-read-max-length",
+ "throttling.bps-write-max-length",
+ "throttling.iops-total-max-length",
+ "throttling.iops-read-max-length",
+ "throttling.iops-write-max-length",
+ "throttling.iops-size",
+ },
+ },
+ {
+ .name = "synth",
+ .ops = &synth_ops,
+ .opts = (const char * []) {
+ COMMON_FS_DRIVER_OPTIONS,
+ },
+ },
+ {
+ .name = "proxy",
+ .ops = &proxy_ops,
+ .opts = (const char * []) {
+ COMMON_FS_DRIVER_OPTIONS,
+ "socket",
+ "sock_fd",
+ "writeout",
+ },
+ },
};
+static int validate_opt(void *opaque, const char *name, const char *value,
+ Error **errp)
+{
+ FsDriverTable *drv = opaque;
+ const char **opt;
+
+ for (opt = drv->opts; *opt; opt++) {
+ if (!strcmp(*opt, name)) {
+ return 0;
+ }
+ }
+
+ error_setg(errp, "'%s' is invalid for fsdriver '%s'", name, drv->name);
+ return -1;
+}
+
int qemu_fsdev_add(QemuOpts *opts, Error **errp)
{
int i;
return -1;
}
+ if (qemu_opt_foreach(opts, validate_opt, &FsDrivers[i], errp)) {
+ return -1;
+ }
+
fsle = g_malloc0(sizeof(*fsle));
fsle->fse.fsdev_id = g_strdup(fsdev_id);
fsle->fse.ops = FsDrivers[i].ops;
#define QEMU_FSDEV_H
#include "file-op-9p.h"
-
-/*
- * A table to store the various file systems and their callback operations.
- * -----------------
- * fstype | ops
- * -----------------
- * local | local_ops
- * . |
- * . |
- * . |
- * . |
- * -----------------
- * etc
- */
-typedef struct FsDriverTable {
- const char *name;
- FileOperations *ops;
-} FsDriverTable;
-
-typedef struct FsDriverListEntry {
- FsDriverEntry fse;
- QTAILQ_ENTRY(FsDriverListEntry) next;
-} FsDriverListEntry;
-
int qemu_fsdev_add(QemuOpts *opts, Error **errp);
FsDriverEntry *get_fsdev_fsentry(char *id);
extern FileOperations local_ops;
-extern FileOperations handle_ops;
extern FileOperations synth_ops;
extern FileOperations proxy_ops;
#endif
MemHotplugState acpi_memory_hotplug;
} PIIX4PMState;
-#define TYPE_PIIX4_PM "PIIX4_PM"
-
#define PIIX4_PM(obj) \
OBJECT_CHECK(PIIX4PMState, (obj), TYPE_PIIX4_PM)
piix4_pm_add_propeties(s);
}
-Object *piix4_pm_find(void)
-{
- bool ambig;
- Object *o = object_resolve_path_type("", TYPE_PIIX4_PM, &ambig);
-
- if (ambig || !o) {
- return NULL;
- }
- return o;
-}
-
I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
qemu_irq sci_irq, qemu_irq smi_irq,
int smm_enabled, DeviceState **piix4_pm)
config ARM_VIRT
bool
+ imply PCI_DEVICES
+ imply TEST_DEVICES
+ imply VFIO_AMD_XGBE
imply VFIO_PLATFORM
+ imply VFIO_XGMAC
+ select A15MPCORE
+ select ACPI
+ select ARM_SMMUV3
+ select GPIO_KEY
+ select FW_CFG_DMA
+ select PCI_EXPRESS
+ select PCI_EXPRESS_GENERIC_BRIDGE
+ select PFLASH_CFI01
+ select PL011 # UART
+ select PL031 # RTC
+ select PL061 # GPIO
+ select PLATFORM_BUS
+ select SMBIOS
+ select VIRTIO_MMIO
+
+config CHEETAH
+ bool
+ select OMAP
+ select TSC210X
+
+config CUBIEBOARD
+ bool
+ select ALLWINNER_A10
config DIGIC
bool
select PTIMER
+ select PFLASH_CFI02
config EXYNOS4
bool
+ select A9MPCORE
+ select I2C
+ select LAN9118
+ select PL310 # cache controller
select PTIMER
+ select SDHCI
+ select USB_EHCI_SYSBUS
config HIGHBANK
bool
+ select A9MPCORE
+ select A15MPCORE
+ select AHCI
+ select ARM_TIMER # sp804
+ select ARM_V7M
+ select PL011 # UART
+ select PL022 # Serial port
+ select PL031 # RTC
+ select PL061 # GPIO
+ select PL310 # cache controller
+ select XGMAC # ethernet
config INTEGRATOR
bool
+ select ARM_TIMER
+ select INTEGRATOR_DEBUG
+ select PL011 # UART
+ select PL031 # RTC
+ select PL050 # keyboard/mouse
+ select PL110 # pl111 LCD controller
+ select PL181 # display
+ select SMC91C111
config MAINSTONE
bool
+ select PXA2XX
+ select PFLASH_CFI01
+ select SMC91C111
+
+config MUSCA
+ bool
+ select ARMSSE
+ select PL011
+ select PL031
config MUSICPAL
bool
+ select BITBANG_I2C
+ select MARVELL_88W8618
select PTIMER
+ select PFLASH_CFI02
+ select SERIAL
+ select WM8750
config NETDUINO2
bool
+ select STM32F205_SOC
config NSERIES
bool
+ select OMAP
+ select TMP105 # tempature sensor
+ select BLIZZARD # LCD/TV controller
+ select ONENAND
+ select TSC210X # touchscreen/sensors/audio
+ select TSC2005 # touchscreen/sensors/keypad
+ select LM832X # GPIO keyboard chip
+ select TWL92230 # energy-management
+ select TUSB6010
config OMAP
bool
+ select FRAMEBUFFER
+ select I2C
+ select ECC
+ select NAND
+ select PFLASH_CFI01
+ select SD
+ select SERIAL
config PXA2XX
bool
+ select FRAMEBUFFER
+ select I2C
+ select SERIAL
+ select SD
+ select SSI
+ select USB_OHCI
+
+config GUMSTIX
+ bool
+ select PFLASH_CFI01
+ select SMC91C111
+ select PXA2XX
+
+config TOSA
+ bool
+ select ZAURUS # scoop
+ select MICRODRIVE
+ select PXA2XX
+
+config SPITZ
+ bool
+ select ADS7846 # display
+ select MAX111X # A/D converter
+ select WM8750 # audio codec
+ select MAX7310 # GPIO expander
+ select ZAURUS # scoop
+ select NAND # memory
+ select ECC # Error-correcting for NAND
+ select MICRODRIVE
+ select PXA2XX
+
+config Z2
+ bool
+ select PFLASH_CFI01
+ select WM8750
+ select PL011 # UART
+ select PXA2XX
config REALVIEW
bool
+ imply PCI_DEVICES
+ imply PCI_TESTDEV
+ select SMC91C111
+ select LAN9118
+ select A9MPCORE
+ select A15MPCORE
+ select ARM11MPCORE
+ select ARM_TIMER
+ select VERSATILE_PCI
+ select WM8750 # audio codec
+ select LSI_SCSI_PCI
+ select PCI
+ select PL011 # UART
+ select PL031 # RTC
+ select PL041 # audio codec
+ select PL050 # keyboard/mouse
+ select PL061 # GPIO
+ select PL080 # DMA controller
+ select PL110
+ select PL181 # display
+ select PL310 # cache controller
+ select VERSATILE_I2C
+ select DS1338 # I2C RTC+NVRAM
+ select USB_OHCI
+
+config SABRELITE
+ bool
+ select FSL_IMX6
+ select SSI_M25P80
config STELLARIS
bool
+ select ARM_V7M
+ select CMSDK_APB_WATCHDOG
+ select I2C
+ select PL011 # UART
+ select PL022 # Serial port
+ select PL061 # GPIO
+ select SSD0303 # OLED display
+ select SSD0323 # OLED display
+ select SSI_SD
+ select STELLARIS_INPUT
+ select STELLARIS_ENET # ethernet
config STRONGARM
bool
+ select PXA2XX
+
+config COLLIE
+ bool
+ select PFLASH_CFI01
+ select ZAURUS # scoop
+ select STRONGARM
+
+config SX1
+ bool
+ select OMAP
config VERSATILE
bool
+ select ARM_TIMER # sp804
+ select PFLASH_CFI01
+ select LSI_SCSI_PCI
+ select PL050 # keyboard/mouse
+ select PL080 # DMA controller
+ select PL190 # Vector PIC
+ select REALVIEW
+ select USB_OHCI
+
+config VEXPRESS
+ bool
+ select A9MPCORE
+ select A15MPCORE
+ select ARM_MPTIMER
+ select ARM_TIMER # sp804
+ select LAN9118
+ select PFLASH_CFI01
+ select PL011 # UART
+ select PL041 # audio codec
+ select PL181 # display
+ select REALVIEW
+ select SII9022
+ select VIRTIO_MMIO
config ZYNQ
bool
+ select A9MPCORE
+ select CADENCE # UART
+ select PFLASH_CFI02
+ select PL330
+ select SDHCI
+ select SSI_M25P80
+ select USB_EHCI_SYSBUS
+ select XILINX # UART
+ select XILINX_AXI
+ select XILINX_SPI
+ select XILINX_SPIPS
+ select ZYNQ_DEVCFG
config ARM_V7M
bool
config ALLWINNER_A10
bool
+ select AHCI
+ select ALLWINNER_A10_PIT
+ select ALLWINNER_A10_PIC
+ select ALLWINNER_EMAC
+ select SERIAL
config RASPI
bool
+ select FRAMEBUFFER
+ select PL011 # UART
+ select SDHCI
config STM32F205_SOC
bool
+ select ARM_V7M
+ select STM32F2XX_TIMER
+ select STM32F2XX_USART
+ select STM32F2XX_SYSCFG
+ select STM32F2XX_ADC
+ select STM32F2XX_SPI
config XLNX_ZYNQMP_ARM
bool
+ select AHCI
+ select ARM_GIC
+ select CADENCE
+ select DDC
+ select DPCD
+ select SDHCI
+ select SSI
+ select SSI_M25P80
+ select XILINX_AXI
+ select XILINX_SPIPS
+ select XLNX_ZYNQMP
config XLNX_VERSAL
bool
+ select ARM_GIC
+ select PL011
+ select CADENCE
+ select VIRTIO_MMIO
config FSL_IMX25
bool
+ select IMX
+ select IMX_FEC
+ select IMX_I2C
+ select DS1338
config FSL_IMX31
bool
+ select SERIAL
+ select IMX
+ select IMX_I2C
+ select LAN9118
config FSL_IMX6
bool
+ select A9MPCORE
+ select IMX
+ select IMX_FEC
+ select IMX_I2C
+ select SDHCI
config ASPEED_SOC
bool
+ select DS1338
+ select FTGMAC100
+ select I2C
+ select PCA9552
+ select SERIAL
+ select SMBUS_EEPROM
+ select SSI
+ select SSI_M25P80
+ select TMP105
+ select TMP421
config MPS2
bool
+ select ARMSSE
+ select LAN9118
+ select MPS2_FPGAIO
+ select MPS2_SCC
+ select PL022 # Serial port
+ select PL080 # DMA controller
config FSL_IMX7
bool
+ imply PCI_DEVICES
+ imply TEST_DEVICES
+ select A15MPCORE
+ select PCI
+ select IMX
+ select IMX_FEC
+ select IMX_I2C
+ select PCI_EXPRESS_DESIGNWARE
+ select SDHCI
config ARM_SMMUV3
bool
config FSL_IMX6UL
bool
+ select A15MPCORE
+ select IMX
+ select IMX_FEC
+ select IMX_I2C
+ select SDHCI
+
+config MICROBIT
+ bool
+ select NRF51_SOC
config NRF51_SOC
bool
+ select I2C
+ select ARM_V7M
+
+config EMCRAFT_SF2
+ bool
+ select MSF2
+ select SSI_M25P80
config MSF2
bool
+ select ARM_V7M
select PTIMER
+ select SERIAL
+ select SSI
config ZAURUS
bool
+ select NAND
+ select ECC
config A9MPCORE
bool
+ select A9_GTIMER
+ select A9SCU # snoop control unit
+ select ARM_GIC
+ select ARM_MPTIMER
config A15MPCORE
bool
+ select ARM_GIC
config ARM11MPCORE
bool
+ select ARM11SCU
config ARMSSE
bool
+ select ARM_V7M
+ select ARMSSE_CPUID
+ select ARMSSE_MHU
+ select CMSDK_APB_TIMER
+ select CMSDK_APB_DUALTIMER
+ select CMSDK_APB_UART
+ select CMSDK_APB_WATCHDOG
+ select IOTKIT_SECCTL
+ select IOTKIT_SYSCTL
+ select IOTKIT_SYSINFO
+ select TZ_MPC
+ select TZ_MSC
+ select TZ_PPC
config ARMSSE_CPUID
bool
config ARMSSE_MHU
bool
-
-config MUSCA
- bool
-obj-y += boot.o sysbus-fdt.o
+obj-y += boot.o
+obj-$(CONFIG_PLATFORM_BUS) += sysbus-fdt.o
obj-$(CONFIG_ARM_VIRT) += virt.o
obj-$(CONFIG_ACPI) += virt-acpi-build.o
obj-$(CONFIG_DIGIC) += digic_boards.o
obj-$(CONFIG_EXYNOS4) += exynos4_boards.o
+obj-$(CONFIG_EMCRAFT_SF2) += msf2-som.o
obj-$(CONFIG_HIGHBANK) += highbank.o
obj-$(CONFIG_INTEGRATOR) += integratorcp.o
obj-$(CONFIG_MAINSTONE) += mainstone.o
+obj-$(CONFIG_MICROBIT) += microbit.o
obj-$(CONFIG_MUSICPAL) += musicpal.o
obj-$(CONFIG_NETDUINO2) += netduino2.o
obj-$(CONFIG_NSERIES) += nseries.o
-obj-$(CONFIG_OMAP) += omap_sx1.o palm.o
-obj-$(CONFIG_PXA2XX) += gumstix.o spitz.o tosa.o z2.o
+obj-$(CONFIG_SX1) += omap_sx1.o
+obj-$(CONFIG_CHEETAH) += palm.o
+obj-$(CONFIG_GUMSTIX) += gumstix.o
+obj-$(CONFIG_SPITZ) += spitz.o
+obj-$(CONFIG_TOSA) += tosa.o
+obj-$(CONFIG_Z2) += z2.o
obj-$(CONFIG_REALVIEW) += realview.o
obj-$(CONFIG_STELLARIS) += stellaris.o
-obj-$(CONFIG_STRONGARM) += collie.o
-obj-$(CONFIG_VERSATILE) += vexpress.o versatilepb.o
+obj-$(CONFIG_COLLIE) += collie.o
+obj-$(CONFIG_VERSATILE) += versatilepb.o
+obj-$(CONFIG_VEXPRESS) += vexpress.o
obj-$(CONFIG_ZYNQ) += xilinx_zynq.o
+obj-$(CONFIG_SABRELITE) += sabrelite.o
obj-$(CONFIG_ARM_V7M) += armv7m.o
obj-$(CONFIG_EXYNOS4) += exynos4210.o
obj-$(CONFIG_XLNX_VERSAL) += xlnx-versal.o xlnx-versal-virt.o
obj-$(CONFIG_FSL_IMX25) += fsl-imx25.o imx25_pdk.o
obj-$(CONFIG_FSL_IMX31) += fsl-imx31.o kzm.o
-obj-$(CONFIG_FSL_IMX6) += fsl-imx6.o sabrelite.o
+obj-$(CONFIG_FSL_IMX6) += fsl-imx6.o
obj-$(CONFIG_ASPEED_SOC) += aspeed_soc.o aspeed.o
obj-$(CONFIG_MPS2) += mps2.o
obj-$(CONFIG_MPS2) += mps2-tz.o
-obj-$(CONFIG_MSF2) += msf2-soc.o msf2-som.o
+obj-$(CONFIG_MSF2) += msf2-soc.o
obj-$(CONFIG_MUSCA) += musca.o
obj-$(CONFIG_ARMSSE) += armsse.o
obj-$(CONFIG_FSL_IMX7) += fsl-imx7.o mcimx7d-sabre.o
obj-$(CONFIG_ARM_SMMUV3) += smmu-common.o smmuv3.o
obj-$(CONFIG_FSL_IMX6UL) += fsl-imx6ul.o mcimx6ul-evk.o
-obj-$(CONFIG_NRF51_SOC) += nrf51_soc.o microbit.o
+obj-$(CONFIG_NRF51_SOC) += nrf51_soc.o
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HW_ARM_SMMU_V3_INTERNAL_H
-#define HW_ARM_SMMU_V3_INTERNAL_H
+#ifndef HW_ARM_SMMUV3_INTERNAL_H
+#define HW_ARM_SMMUV3_INTERNAL_H
#include "hw/arm/smmu-common.h"
}
qemu_sglist_destroy(&qsg);
} else {
- if (unlikely(qemu_iovec_to_buf(&iov, 0, ptr, len) != len)) {
+ if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
trace_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
depends on PCI
select SERIAL
+config SERIAL_PCI_MULTI
+ bool
+ default y if PCI_DEVICES
+ depends on PCI
+ select SERIAL
+
config VIRTIO_SERIAL
bool
default y
common-obj-$(CONFIG_SERIAL) += serial.o
common-obj-$(CONFIG_SERIAL_ISA) += serial-isa.o
common-obj-$(CONFIG_SERIAL_PCI) += serial-pci.o
+common-obj-$(CONFIG_SERIAL_PCI_MULTI) += serial-pci-multi.o
common-obj-$(CONFIG_VIRTIO_SERIAL) += virtio-console.o
common-obj-$(CONFIG_XILINX) += xilinx_uartlite.o
common-obj-$(CONFIG_XEN) += xen_console.o
break;
case SERIAL_DATA:
trace_escc_mem_writeb_data(CHN_C(s), val);
+ /*
+ * Lower the irq when data is written to the Tx buffer and no other
+ * interrupts are currently pending. The irq will be raised again once
+ * the Tx buffer becomes empty below.
+ */
+ s->txint = 0;
+ escc_update_irq(s);
s->tx = val;
if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { // tx enabled
if (qemu_chr_fe_backend_connected(&s->chr)) {
/*
* QEMU GRLIB APB UART Emulator
*
- * Copyright (c) 2010-2011 AdaCore
+ * Copyright (c) 2010-2019 AdaCore
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
*/
#include "qemu/osdep.h"
+#include "hw/sparc/grlib.h"
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
#define FIFO_LENGTH 1024
-#define TYPE_GRLIB_APB_UART "grlib,apbuart"
#define GRLIB_APB_UART(obj) \
OBJECT_CHECK(UART, (obj), TYPE_GRLIB_APB_UART)
--- /dev/null
+/*
+ * QEMU 16550A multi UART emulation
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2008 Citrix Systems, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* see docs/specs/pci-serial.txt */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/char/serial.h"
+#include "hw/pci/pci.h"
+
+#define PCI_SERIAL_MAX_PORTS 4
+
+typedef struct PCIMultiSerialState {
+ PCIDevice dev;
+ MemoryRegion iobar;
+ uint32_t ports;
+ char *name[PCI_SERIAL_MAX_PORTS];
+ SerialState state[PCI_SERIAL_MAX_PORTS];
+ uint32_t level[PCI_SERIAL_MAX_PORTS];
+ qemu_irq *irqs;
+ uint8_t prog_if;
+} PCIMultiSerialState;
+
+static void multi_serial_pci_exit(PCIDevice *dev)
+{
+ PCIMultiSerialState *pci = DO_UPCAST(PCIMultiSerialState, dev, dev);
+ SerialState *s;
+ int i;
+
+ for (i = 0; i < pci->ports; i++) {
+ s = pci->state + i;
+ serial_exit_core(s);
+ memory_region_del_subregion(&pci->iobar, &s->io);
+ g_free(pci->name[i]);
+ }
+ qemu_free_irqs(pci->irqs, pci->ports);
+}
+
+static void multi_serial_irq_mux(void *opaque, int n, int level)
+{
+ PCIMultiSerialState *pci = opaque;
+ int i, pending = 0;
+
+ pci->level[n] = level;
+ for (i = 0; i < pci->ports; i++) {
+ if (pci->level[i]) {
+ pending = 1;
+ }
+ }
+ pci_set_irq(&pci->dev, pending);
+}
+
+static void multi_serial_pci_realize(PCIDevice *dev, Error **errp)
+{
+ PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
+ PCIMultiSerialState *pci = DO_UPCAST(PCIMultiSerialState, dev, dev);
+ SerialState *s;
+ Error *err = NULL;
+ int i, nr_ports = 0;
+
+ switch (pc->device_id) {
+ case 0x0003:
+ nr_ports = 2;
+ break;
+ case 0x0004:
+ nr_ports = 4;
+ break;
+ }
+ assert(nr_ports > 0);
+ assert(nr_ports <= PCI_SERIAL_MAX_PORTS);
+
+ pci->dev.config[PCI_CLASS_PROG] = pci->prog_if;
+ pci->dev.config[PCI_INTERRUPT_PIN] = 0x01;
+ memory_region_init(&pci->iobar, OBJECT(pci), "multiserial", 8 * nr_ports);
+ pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->iobar);
+ pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci,
+ nr_ports);
+
+ for (i = 0; i < nr_ports; i++) {
+ s = pci->state + i;
+ s->baudbase = 115200;
+ serial_realize_core(s, &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ multi_serial_pci_exit(dev);
+ return;
+ }
+ s->irq = pci->irqs[i];
+ pci->name[i] = g_strdup_printf("uart #%d", i + 1);
+ memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s,
+ pci->name[i], 8);
+ memory_region_add_subregion(&pci->iobar, 8 * i, &s->io);
+ pci->ports++;
+ }
+}
+
+static const VMStateDescription vmstate_pci_multi_serial = {
+ .name = "pci-serial-multi",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, PCIMultiSerialState),
+ VMSTATE_STRUCT_ARRAY(state, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS,
+ 0, vmstate_serial, SerialState),
+ VMSTATE_UINT32_ARRAY(level, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property multi_2x_serial_pci_properties[] = {
+ DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr),
+ DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr),
+ DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static Property multi_4x_serial_pci_properties[] = {
+ DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr),
+ DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr),
+ DEFINE_PROP_CHR("chardev3", PCIMultiSerialState, state[2].chr),
+ DEFINE_PROP_CHR("chardev4", PCIMultiSerialState, state[3].chr),
+ DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
+ pc->realize = multi_serial_pci_realize;
+ pc->exit = multi_serial_pci_exit;
+ pc->vendor_id = PCI_VENDOR_ID_REDHAT;
+ pc->device_id = PCI_DEVICE_ID_REDHAT_SERIAL2;
+ pc->revision = 1;
+ pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL;
+ dc->vmsd = &vmstate_pci_multi_serial;
+ dc->props = multi_2x_serial_pci_properties;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static void multi_4x_serial_pci_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
+ pc->realize = multi_serial_pci_realize;
+ pc->exit = multi_serial_pci_exit;
+ pc->vendor_id = PCI_VENDOR_ID_REDHAT;
+ pc->device_id = PCI_DEVICE_ID_REDHAT_SERIAL4;
+ pc->revision = 1;
+ pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL;
+ dc->vmsd = &vmstate_pci_multi_serial;
+ dc->props = multi_4x_serial_pci_properties;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static const TypeInfo multi_2x_serial_pci_info = {
+ .name = "pci-serial-2x",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIMultiSerialState),
+ .class_init = multi_2x_serial_pci_class_initfn,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static const TypeInfo multi_4x_serial_pci_info = {
+ .name = "pci-serial-4x",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIMultiSerialState),
+ .class_init = multi_4x_serial_pci_class_initfn,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static void multi_serial_pci_register_types(void)
+{
+ type_register_static(&multi_2x_serial_pci_info);
+ type_register_static(&multi_4x_serial_pci_info);
+}
+
+type_init(multi_serial_pci_register_types)
#include "hw/char/serial.h"
#include "hw/pci/pci.h"
-#define PCI_SERIAL_MAX_PORTS 4
-
typedef struct PCISerialState {
PCIDevice dev;
SerialState state;
uint8_t prog_if;
} PCISerialState;
-typedef struct PCIMultiSerialState {
- PCIDevice dev;
- MemoryRegion iobar;
- uint32_t ports;
- char *name[PCI_SERIAL_MAX_PORTS];
- SerialState state[PCI_SERIAL_MAX_PORTS];
- uint32_t level[PCI_SERIAL_MAX_PORTS];
- qemu_irq *irqs;
- uint8_t prog_if;
-} PCIMultiSerialState;
-
-static void multi_serial_pci_exit(PCIDevice *dev);
static void serial_pci_realize(PCIDevice *dev, Error **errp)
{
pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io);
}
-static void multi_serial_irq_mux(void *opaque, int n, int level)
-{
- PCIMultiSerialState *pci = opaque;
- int i, pending = 0;
-
- pci->level[n] = level;
- for (i = 0; i < pci->ports; i++) {
- if (pci->level[i]) {
- pending = 1;
- }
- }
- pci_set_irq(&pci->dev, pending);
-}
-
-static void multi_serial_pci_realize(PCIDevice *dev, Error **errp)
-{
- PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
- PCIMultiSerialState *pci = DO_UPCAST(PCIMultiSerialState, dev, dev);
- SerialState *s;
- Error *err = NULL;
- int i, nr_ports = 0;
-
- switch (pc->device_id) {
- case 0x0003:
- nr_ports = 2;
- break;
- case 0x0004:
- nr_ports = 4;
- break;
- }
- assert(nr_ports > 0);
- assert(nr_ports <= PCI_SERIAL_MAX_PORTS);
-
- pci->dev.config[PCI_CLASS_PROG] = pci->prog_if;
- pci->dev.config[PCI_INTERRUPT_PIN] = 0x01;
- memory_region_init(&pci->iobar, OBJECT(pci), "multiserial", 8 * nr_ports);
- pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->iobar);
- pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci,
- nr_ports);
-
- for (i = 0; i < nr_ports; i++) {
- s = pci->state + i;
- s->baudbase = 115200;
- serial_realize_core(s, &err);
- if (err != NULL) {
- error_propagate(errp, err);
- multi_serial_pci_exit(dev);
- return;
- }
- s->irq = pci->irqs[i];
- pci->name[i] = g_strdup_printf("uart #%d", i+1);
- memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s,
- pci->name[i], 8);
- memory_region_add_subregion(&pci->iobar, 8 * i, &s->io);
- pci->ports++;
- }
-}
-
static void serial_pci_exit(PCIDevice *dev)
{
PCISerialState *pci = DO_UPCAST(PCISerialState, dev, dev);
qemu_free_irq(s->irq);
}
-static void multi_serial_pci_exit(PCIDevice *dev)
-{
- PCIMultiSerialState *pci = DO_UPCAST(PCIMultiSerialState, dev, dev);
- SerialState *s;
- int i;
-
- for (i = 0; i < pci->ports; i++) {
- s = pci->state + i;
- serial_exit_core(s);
- memory_region_del_subregion(&pci->iobar, &s->io);
- g_free(pci->name[i]);
- }
- qemu_free_irqs(pci->irqs, pci->ports);
-}
-
static const VMStateDescription vmstate_pci_serial = {
.name = "pci-serial",
.version_id = 1,
}
};
-static const VMStateDescription vmstate_pci_multi_serial = {
- .name = "pci-serial-multi",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (VMStateField[]) {
- VMSTATE_PCI_DEVICE(dev, PCIMultiSerialState),
- VMSTATE_STRUCT_ARRAY(state, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS,
- 0, vmstate_serial, SerialState),
- VMSTATE_UINT32_ARRAY(level, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS),
- VMSTATE_END_OF_LIST()
- }
-};
-
static Property serial_pci_properties[] = {
DEFINE_PROP_CHR("chardev", PCISerialState, state.chr),
DEFINE_PROP_UINT8("prog_if", PCISerialState, prog_if, 0x02),
DEFINE_PROP_END_OF_LIST(),
};
-static Property multi_2x_serial_pci_properties[] = {
- DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr),
- DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr),
- DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static Property multi_4x_serial_pci_properties[] = {
- DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr),
- DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr),
- DEFINE_PROP_CHR("chardev3", PCIMultiSerialState, state[2].chr),
- DEFINE_PROP_CHR("chardev4", PCIMultiSerialState, state[3].chr),
- DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02),
- DEFINE_PROP_END_OF_LIST(),
-};
-
static void serial_pci_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
-static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
- pc->realize = multi_serial_pci_realize;
- pc->exit = multi_serial_pci_exit;
- pc->vendor_id = PCI_VENDOR_ID_REDHAT;
- pc->device_id = PCI_DEVICE_ID_REDHAT_SERIAL2;
- pc->revision = 1;
- pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL;
- dc->vmsd = &vmstate_pci_multi_serial;
- dc->props = multi_2x_serial_pci_properties;
- set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
-}
-
-static void multi_4x_serial_pci_class_initfn(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
- pc->realize = multi_serial_pci_realize;
- pc->exit = multi_serial_pci_exit;
- pc->vendor_id = PCI_VENDOR_ID_REDHAT;
- pc->device_id = PCI_DEVICE_ID_REDHAT_SERIAL4;
- pc->revision = 1;
- pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL;
- dc->vmsd = &vmstate_pci_multi_serial;
- dc->props = multi_4x_serial_pci_properties;
- set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
-}
-
static const TypeInfo serial_pci_info = {
.name = "pci-serial",
.parent = TYPE_PCI_DEVICE,
},
};
-static const TypeInfo multi_2x_serial_pci_info = {
- .name = "pci-serial-2x",
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PCIMultiSerialState),
- .class_init = multi_2x_serial_pci_class_initfn,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- },
-};
-
-static const TypeInfo multi_4x_serial_pci_info = {
- .name = "pci-serial-4x",
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PCIMultiSerialState),
- .class_init = multi_4x_serial_pci_class_initfn,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- },
-};
-
static void serial_pci_register_types(void)
{
type_register_static(&serial_pci_info);
- type_register_static(&multi_2x_serial_pci_info);
- type_register_static(&multi_4x_serial_pci_info);
}
type_init(serial_pci_register_types)
rom->addr = addr;
rom->romsize = max_len ? max_len : len;
rom->datasize = len;
+ g_assert(rom->romsize >= rom->datasize);
rom->data = g_malloc0(rom->datasize);
memcpy(rom->data, blob, len);
rom_insert(rom);
config PL110
bool
+ select FRAMEBUFFER
config SII9022
bool
depends on I2C
+ select DDC
config SSD0303
bool
config DPCD
bool
+ select AUX
config ATI_VGA
bool
* No 3D at all yet (maybe after 2D works, but feel free to improve it)
*/
+#include "qemu/osdep.h"
#include "ati_int.h"
#include "ati_regs.h"
#include "vga_regs.h"
* This work is licensed under the GNU GPL license version 2 or later.
*/
+#include "qemu/osdep.h"
#include "ati_int.h"
#include "ati_regs.h"
#include "qemu/log.h"
+#include "qemu/osdep.h"
#include "ati_int.h"
#ifdef DEBUG_ATI
#ifndef ATI_INT_H
#define ATI_INT_H
-#include "qemu/osdep.h"
#include "hw/pci/pci.h"
#include "vga_int.h"
*
*/
-#ifndef LINUX_VIDEO_VGA_H
-#define LINUX_VIDEO_VGA_H
+#ifndef HW_VGA_REGS_H
+#define HW_VGA_REGS_H
/* Some of the code below is taken from SVGAlib. The original,
unmodified copyright notice for that code is below. */
/* VGA graphics controller bit masks */
#define VGA_GR06_GRAPHICS_MODE 0x01
-#endif /* LINUX_VIDEO_VGA_H */
+#endif /* HW_VGA_REGS_H */
config VERSATILE_I2C
bool
- select I2C
+ select BITBANG_I2C
config ACPI_SMBUS
bool
select PC_ACPI
select PCI_EXPRESS_Q35
select LPC_ICH9
- select AHCI
+ select AHCI_ICH9
select DIMM
select SMBIOS
select VMPORT
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/cpu.h"
+#include "hw/acpi/piix4.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/acpi/bios-linker-loader.h"
#include "hw/loader.h"
*data = fadt;
}
+static Object *object_resolve_type_unambiguous(const char *typename)
+{
+ bool ambig;
+ Object *o = object_resolve_path_type("", typename, &ambig);
+
+ if (ambig || !o) {
+ return NULL;
+ }
+ return o;
+}
+
static void acpi_get_pm_info(AcpiPmInfo *pm)
{
- Object *piix = piix4_pm_find();
- Object *lpc = ich9_lpc_find();
+ Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM);
+ Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE);
Object *obj = piix ? piix : lpc;
QObject *o;
pm->cpu_hp_io_base = 0;
pm->pcihp_io_base = 0;
pm->pcihp_io_len = 0;
+ assert(obj);
init_common_fadt_data(obj, &pm->fadt);
if (piix) {
/* w2k requires FADT(rev1) or it won't boot, keep PC compatible */
pm->fadt.flags |= 1 << ACPI_FADT_F_RESET_REG_SUP;
pm->cpu_hp_io_base = ICH9_CPU_HOTPLUG_IO_BASE;
}
- assert(obj);
/* The above need not be conditional on machine type because the reset port
* happens to be the same on PIIX (pc) and ICH9 (q35). */
static void acpi_get_misc_info(AcpiMiscInfo *info)
{
- Object *piix = piix4_pm_find();
- Object *lpc = ich9_lpc_find();
+ Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM);
+ Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE);
assert(!!piix != !!lpc);
if (piix) {
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef AMD_IOMMU_H_
-#define AMD_IOMMU_H_
+#ifndef AMD_IOMMU_H
+#define AMD_IOMMU_H
#include "hw/hw.h"
#include "hw/pci/pci.h"
select IDE_QDEV
config AHCI
+ bool
+ select IDE_QDEV
+
+config AHCI_ICH9
bool
default y if PCI_DEVICES
depends on PCI
- select IDE_QDEV
+ select AHCI
config IDE_SII3112
bool
common-obj-$(CONFIG_IDE_VIA) += via.o
common-obj-$(CONFIG_MICRODRIVE) += microdrive.o
common-obj-$(CONFIG_AHCI) += ahci.o
-common-obj-$(CONFIG_AHCI) += ich.o
+common-obj-$(CONFIG_AHCI_ICH9) += ich.o
common-obj-$(CONFIG_ALLWINNER_A10) += ahci-allwinner.o
common-obj-$(CONFIG_IDE_SII3112) += sii3112.o
#define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI)
-#endif /* HW_IDE_AHCI_H */
+#endif /* HW_IDE_AHCI_INTERNAL_H */
config PCKBD
bool
default y
+ select PS2
depends on ISA_BUS
config PL050
bool
+ select PS2
+
+config PS2
+ bool
config STELLARIS_INPUT
bool
config VIRTIO_INPUT_HOST
bool
default y
- depends on VIRTIO && LINUX
+ depends on VIRTIO_INPUT && LINUX
+
+config VHOST_USER_INPUT
+ bool
+ default y
+ depends on VIRTIO_INPUT && VHOST_USER
config TSC210X
bool
common-obj-$(CONFIG_LM832X) += lm832x.o
common-obj-$(CONFIG_PCKBD) += pckbd.o
common-obj-$(CONFIG_PL050) += pl050.o
-common-obj-y += ps2.o
+common-obj-$(CONFIG_PS2) += ps2.o
common-obj-$(CONFIG_STELLARIS_INPUT) += stellaris_input.o
common-obj-$(CONFIG_TSC2005) += tsc2005.o
common-obj-$(CONFIG_VIRTIO_INPUT) += virtio-input.o
common-obj-$(CONFIG_VIRTIO_INPUT) += virtio-input-hid.o
-ifeq ($(CONFIG_LINUX),y)
-common-obj-$(CONFIG_VIRTIO_INPUT) += virtio-input-host.o
-endif
+common-obj-$(CONFIG_VIRTIO_INPUT_HOST) += virtio-input-host.o
+common-obj-$(CONFIG_VHOST_USER_INPUT) += vhost-user-input.o
obj-$(CONFIG_MILKYMIST) += milkymist-softusb.o
obj-$(CONFIG_PXA2XX) += pxa2xx_keypad.o
--- /dev/null
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+
+#include "hw/qdev.h"
+#include "hw/virtio/virtio-input.h"
+
+static int vhost_input_config_change(struct vhost_dev *dev)
+{
+ error_report("vhost-user-input: unhandled backend config change");
+ return -1;
+}
+
+static const VhostDevConfigOps config_ops = {
+ .vhost_dev_config_notifier = vhost_input_config_change,
+};
+
+static void vhost_input_realize(DeviceState *dev, Error **errp)
+{
+ VHostUserInput *vhi = VHOST_USER_INPUT(dev);
+ VirtIOInput *vinput = VIRTIO_INPUT(dev);
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+
+ vhost_dev_set_config_notifier(&vhi->vhost->dev, &config_ops);
+ vinput->cfg_size = sizeof_field(virtio_input_config, u);
+ if (vhost_user_backend_dev_init(vhi->vhost, vdev, 2, errp) == -1) {
+ return;
+ }
+}
+
+static void vhost_input_change_active(VirtIOInput *vinput)
+{
+ VHostUserInput *vhi = VHOST_USER_INPUT(vinput);
+
+ if (vinput->active) {
+ vhost_user_backend_start(vhi->vhost);
+ } else {
+ vhost_user_backend_stop(vhi->vhost);
+ }
+}
+
+static void vhost_input_get_config(VirtIODevice *vdev, uint8_t *config_data)
+{
+ VirtIOInput *vinput = VIRTIO_INPUT(vdev);
+ VHostUserInput *vhi = VHOST_USER_INPUT(vdev);
+ int ret;
+
+ memset(config_data, 0, vinput->cfg_size);
+
+ ret = vhost_dev_get_config(&vhi->vhost->dev, config_data, vinput->cfg_size);
+ if (ret) {
+ error_report("vhost-user-input: get device config space failed");
+ return;
+ }
+}
+
+static void vhost_input_set_config(VirtIODevice *vdev,
+ const uint8_t *config_data)
+{
+ VHostUserInput *vhi = VHOST_USER_INPUT(vdev);
+ int ret;
+
+ ret = vhost_dev_set_config(&vhi->vhost->dev, config_data,
+ 0, sizeof(virtio_input_config),
+ VHOST_SET_CONFIG_TYPE_MASTER);
+ if (ret) {
+ error_report("vhost-user-input: set device config space failed");
+ return;
+ }
+
+ virtio_notify_config(vdev);
+}
+
+static const VMStateDescription vmstate_vhost_input = {
+ .name = "vhost-user-input",
+ .unmigratable = 1,
+};
+
+static void vhost_input_class_init(ObjectClass *klass, void *data)
+{
+ VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_vhost_input;
+ vdc->get_config = vhost_input_get_config;
+ vdc->set_config = vhost_input_set_config;
+ vic->realize = vhost_input_realize;
+ vic->change_active = vhost_input_change_active;
+}
+
+static void vhost_input_init(Object *obj)
+{
+ VHostUserInput *vhi = VHOST_USER_INPUT(obj);
+
+ vhi->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
+ object_property_add_alias(obj, "chardev",
+ OBJECT(vhi->vhost), "chardev", &error_abort);
+}
+
+static void vhost_input_finalize(Object *obj)
+{
+ VHostUserInput *vhi = VHOST_USER_INPUT(obj);
+
+ object_unref(OBJECT(vhi->vhost));
+}
+
+static const TypeInfo vhost_input_info = {
+ .name = TYPE_VHOST_USER_INPUT,
+ .parent = TYPE_VIRTIO_INPUT,
+ .instance_size = sizeof(VHostUserInput),
+ .instance_init = vhost_input_init,
+ .instance_finalize = vhost_input_finalize,
+ .class_init = vhost_input_class_init,
+};
+
+static void vhost_input_register_types(void)
+{
+ type_register_static(&vhost_input_info);
+}
+
+type_init(vhost_input_register_types)
*
* (Multiprocessor and extended interrupt not supported)
*
- * Copyright (c) 2010-2011 AdaCore
+ * Copyright (c) 2010-2019 AdaCore
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
#define FORCE_OFFSET 0x80
#define EXTENDED_OFFSET 0xC0
-#define TYPE_GRLIB_IRQMP "grlib,irqmp"
#define GRLIB_IRQMP(obj) OBJECT_CHECK(IRQMP, (obj), TYPE_GRLIB_IRQMP)
typedef struct IRQMPState IRQMPState;
}
}
+#define SUCCESSIVE_IRQ_MAX_COUNT 10000
+
+static void delayed_ioapic_service_cb(void *opaque)
+{
+ IOAPICCommonState *s = opaque;
+
+ ioapic_service(s);
+}
+
static void ioapic_set_irq(void *opaque, int vector, int level)
{
IOAPICCommonState *s = opaque;
}
for (n = 0; n < IOAPIC_NUM_PINS; n++) {
entry = s->ioredtbl[n];
- if ((entry & IOAPIC_LVT_REMOTE_IRR)
- && (entry & IOAPIC_VECTOR_MASK) == vector) {
- trace_ioapic_clear_remote_irr(n, vector);
- s->ioredtbl[n] = entry & ~IOAPIC_LVT_REMOTE_IRR;
- if (!(entry & IOAPIC_LVT_MASKED) && (s->irr & (1 << n))) {
+
+ if ((entry & IOAPIC_VECTOR_MASK) != vector ||
+ ((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) != IOAPIC_TRIGGER_LEVEL) {
+ continue;
+ }
+
+ if (!(entry & IOAPIC_LVT_REMOTE_IRR)) {
+ continue;
+ }
+
+ trace_ioapic_clear_remote_irr(n, vector);
+ s->ioredtbl[n] = entry & ~IOAPIC_LVT_REMOTE_IRR;
+
+ if (!(entry & IOAPIC_LVT_MASKED) && (s->irr & (1 << n))) {
+ ++s->irq_eoi[vector];
+ if (s->irq_eoi[vector] >= SUCCESSIVE_IRQ_MAX_COUNT) {
+ /*
+ * Real hardware does not deliver the interrupt immediately
+ * during eoi broadcast, and this lets a buggy guest make
+ * slow progress even if it does not correctly handle a
+ * level-triggered interrupt. Emulate this behavior if we
+ * detect an interrupt storm.
+ */
+ s->irq_eoi[vector] = 0;
+ timer_mod_anticipate(s->delayed_ioapic_service_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ NANOSECONDS_PER_SECOND / 100);
+ trace_ioapic_eoi_delayed_reassert(vector);
+ } else {
ioapic_service(s);
}
+ } else {
+ s->irq_eoi[vector] = 0;
}
}
}
memory_region_init_io(&s->io_memory, OBJECT(s), &ioapic_io_ops, s,
"ioapic", 0x1000);
+ s->delayed_ioapic_service_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, delayed_ioapic_service_cb, s);
+
qdev_init_gpio_in(dev, ioapic_set_irq, IOAPIC_NUM_PINS);
ioapics[ioapic_no] = s;
qemu_add_machine_init_done_notifier(&s->machine_done);
}
+static void ioapic_unrealize(DeviceState *dev, Error **errp)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(dev);
+
+ timer_del(s->delayed_ioapic_service_timer);
+ timer_free(s->delayed_ioapic_service_timer);
+}
+
static Property ioapic_properties[] = {
DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF),
DEFINE_PROP_END_OF_LIST(),
DeviceClass *dc = DEVICE_CLASS(klass);
k->realize = ioapic_realize;
+ k->unrealize = ioapic_unrealize;
/*
* If APIC is in kernel, we need to update the kernel cache after
* migration, otherwise first 24 gsi routes will be invalid.
ioapic_set_remote_irr(int n) "set remote irr for pin %d"
ioapic_clear_remote_irr(int n, int vector) "clear remote irr for pin %d vector %d"
ioapic_eoi_broadcast(int vector) "EOI broadcast for vector %d"
+ioapic_eoi_delayed_reassert(int vector) "delayed reassert on EOI broadcast for vector %d"
ioapic_mem_read(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem read addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" retval 0x%"PRIx32
ioapic_mem_write(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem write addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" val 0x%"PRIx32
ioapic_set_irq(int vector, int level) "vector: %d level: %d"
.endianness = DEVICE_LITTLE_ENDIAN
};
-Object *ich9_lpc_find(void)
-{
- bool ambig;
- Object *o = object_resolve_path_type("", TYPE_ICH9_LPC_DEVICE, &ambig);
-
- if (ambig) {
- return NULL;
- }
- return o;
-}
-
static void ich9_lpc_get_sci_int(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
&smbus, &isa_bus);
/* GPU */
- dev = DEVICE(pci_create(pci_bus, -1, "ati-vga"));
- qdev_prop_set_uint32(dev, "vgamem_mb", 16);
- qdev_prop_set_uint16(dev, "x-device-id", 0x5159);
- qdev_init_nofail(dev);
+ if (vga_interface_type != VGA_NONE) {
+ dev = DEVICE(pci_create(pci_bus, -1, "ati-vga"));
+ qdev_prop_set_uint32(dev, "vgamem_mb", 16);
+ qdev_prop_set_uint16(dev, "x-device-id", 0x5159);
+ qdev_init_nofail(dev);
+ }
/* Populate SPD eeprom data */
spd_data = spd_data_generate(DDR, ram_size, &err);
config IMX
bool
select PTIMER
+ select SSI
+ select USB_EHCI_SYSBUS
config STM32F2XX_SYSCFG
bool
obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o
obj-$(CONFIG_MSF2) += msf2-sysreg.o
obj-$(CONFIG_NRF51_SOC) += nrf51_rng.o
+
+obj-$(CONFIG_GRLIB) += grlib_ahb_apb_pnp.o
--- /dev/null
+/*
+ * GRLIB AHB APB PNP
+ *
+ * Copyright (C) 2019 AdaCore
+ *
+ * Developed by :
+ * Frederic Konrad <frederic.konrad@adacore.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "hw/misc/grlib_ahb_apb_pnp.h"
+
+#define GRLIB_PNP_VENDOR_SHIFT (24)
+#define GRLIB_PNP_VENDOR_SIZE (8)
+#define GRLIB_PNP_DEV_SHIFT (12)
+#define GRLIB_PNP_DEV_SIZE (12)
+#define GRLIB_PNP_VER_SHIFT (5)
+#define GRLIB_PNP_VER_SIZE (5)
+#define GRLIB_PNP_IRQ_SHIFT (0)
+#define GRLIB_PNP_IRQ_SIZE (5)
+#define GRLIB_PNP_ADDR_SHIFT (20)
+#define GRLIB_PNP_ADDR_SIZE (12)
+#define GRLIB_PNP_MASK_SHIFT (4)
+#define GRLIB_PNP_MASK_SIZE (12)
+
+#define GRLIB_AHB_DEV_ADDR_SHIFT (20)
+#define GRLIB_AHB_DEV_ADDR_SIZE (12)
+#define GRLIB_AHB_ENTRY_SIZE (0x20)
+#define GRLIB_AHB_MAX_DEV (64)
+#define GRLIB_AHB_SLAVE_OFFSET (0x800)
+
+#define GRLIB_APB_DEV_ADDR_SHIFT (8)
+#define GRLIB_APB_DEV_ADDR_SIZE (12)
+#define GRLIB_APB_ENTRY_SIZE (0x08)
+#define GRLIB_APB_MAX_DEV (512)
+
+#define GRLIB_PNP_MAX_REGS (0x1000)
+
+typedef struct AHBPnp {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ uint32_t regs[GRLIB_PNP_MAX_REGS >> 2];
+ uint8_t master_count;
+ uint8_t slave_count;
+} AHBPnp;
+
+void grlib_ahb_pnp_add_entry(AHBPnp *dev, uint32_t address, uint32_t mask,
+ uint8_t vendor, uint16_t device, int slave,
+ int type)
+{
+ unsigned int reg_start;
+
+ /*
+ * AHB entries look like this:
+ *
+ * 31 -------- 23 -------- 11 ----- 9 -------- 4 --- 0
+ * | VENDOR ID | DEVICE ID | IRQ ? | VERSION | IRQ |
+ * --------------------------------------------------
+ * | USER |
+ * --------------------------------------------------
+ * | USER |
+ * --------------------------------------------------
+ * | USER |
+ * --------------------------------------------------
+ * | USER |
+ * --------------------------------------------------
+ * 31 ----------- 20 --- 15 ----------------- 3 ---- 0
+ * | ADDR[31..12] | 00PC | MASK | TYPE |
+ * --------------------------------------------------
+ * 31 ----------- 20 --- 15 ----------------- 3 ---- 0
+ * | ADDR[31..12] | 00PC | MASK | TYPE |
+ * --------------------------------------------------
+ * 31 ----------- 20 --- 15 ----------------- 3 ---- 0
+ * | ADDR[31..12] | 00PC | MASK | TYPE |
+ * --------------------------------------------------
+ * 31 ----------- 20 --- 15 ----------------- 3 ---- 0
+ * | ADDR[31..12] | 00PC | MASK | TYPE |
+ * --------------------------------------------------
+ */
+
+ if (slave) {
+ assert(dev->slave_count < GRLIB_AHB_MAX_DEV);
+ reg_start = (GRLIB_AHB_SLAVE_OFFSET
+ + (dev->slave_count * GRLIB_AHB_ENTRY_SIZE)) >> 2;
+ dev->slave_count++;
+ } else {
+ assert(dev->master_count < GRLIB_AHB_MAX_DEV);
+ reg_start = (dev->master_count * GRLIB_AHB_ENTRY_SIZE) >> 2;
+ dev->master_count++;
+ }
+
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_VENDOR_SHIFT,
+ GRLIB_PNP_VENDOR_SIZE,
+ vendor);
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_DEV_SHIFT,
+ GRLIB_PNP_DEV_SIZE,
+ device);
+ reg_start += 4;
+ /* AHB Memory Space */
+ dev->regs[reg_start] = type;
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_ADDR_SHIFT,
+ GRLIB_PNP_ADDR_SIZE,
+ extract32(address,
+ GRLIB_AHB_DEV_ADDR_SHIFT,
+ GRLIB_AHB_DEV_ADDR_SIZE));
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_MASK_SHIFT,
+ GRLIB_PNP_MASK_SIZE,
+ mask);
+}
+
+static uint64_t grlib_ahb_pnp_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AHBPnp *ahb_pnp = GRLIB_AHB_PNP(opaque);
+
+ return ahb_pnp->regs[offset >> 2];
+}
+
+static const MemoryRegionOps grlib_ahb_pnp_ops = {
+ .read = grlib_ahb_pnp_read,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void grlib_ahb_pnp_realize(DeviceState *dev, Error **errp)
+{
+ AHBPnp *ahb_pnp = GRLIB_AHB_PNP(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ memory_region_init_io(&ahb_pnp->iomem, OBJECT(dev), &grlib_ahb_pnp_ops,
+ ahb_pnp, TYPE_GRLIB_AHB_PNP, GRLIB_PNP_MAX_REGS);
+ sysbus_init_mmio(sbd, &ahb_pnp->iomem);
+}
+
+static void grlib_ahb_pnp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = grlib_ahb_pnp_realize;
+}
+
+static const TypeInfo grlib_ahb_pnp_info = {
+ .name = TYPE_GRLIB_AHB_PNP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AHBPnp),
+ .class_init = grlib_ahb_pnp_class_init,
+};
+
+/* APBPnp */
+
+typedef struct APBPnp {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ uint32_t regs[GRLIB_PNP_MAX_REGS >> 2];
+ uint32_t entry_count;
+} APBPnp;
+
+void grlib_apb_pnp_add_entry(APBPnp *dev, uint32_t address, uint32_t mask,
+ uint8_t vendor, uint16_t device, uint8_t version,
+ uint8_t irq, int type)
+{
+ unsigned int reg_start;
+
+ /*
+ * APB entries look like this:
+ *
+ * 31 -------- 23 -------- 11 ----- 9 ------- 4 --- 0
+ * | VENDOR ID | DEVICE ID | IRQ ? | VERSION | IRQ |
+ *
+ * 31 ---------- 20 --- 15 ----------------- 3 ---- 0
+ * | ADDR[20..8] | 0000 | MASK | TYPE |
+ */
+
+ assert(dev->entry_count < GRLIB_APB_MAX_DEV);
+ reg_start = (dev->entry_count * GRLIB_APB_ENTRY_SIZE) >> 2;
+ dev->entry_count++;
+
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_VENDOR_SHIFT,
+ GRLIB_PNP_VENDOR_SIZE,
+ vendor);
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_DEV_SHIFT,
+ GRLIB_PNP_DEV_SIZE,
+ device);
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_VER_SHIFT,
+ GRLIB_PNP_VER_SIZE,
+ version);
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_IRQ_SHIFT,
+ GRLIB_PNP_IRQ_SIZE,
+ irq);
+ reg_start += 1;
+ dev->regs[reg_start] = type;
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_ADDR_SHIFT,
+ GRLIB_PNP_ADDR_SIZE,
+ extract32(address,
+ GRLIB_APB_DEV_ADDR_SHIFT,
+ GRLIB_APB_DEV_ADDR_SIZE));
+ dev->regs[reg_start] = deposit32(dev->regs[reg_start],
+ GRLIB_PNP_MASK_SHIFT,
+ GRLIB_PNP_MASK_SIZE,
+ mask);
+}
+
+static uint64_t grlib_apb_pnp_read(void *opaque, hwaddr offset, unsigned size)
+{
+ APBPnp *apb_pnp = GRLIB_APB_PNP(opaque);
+
+ return apb_pnp->regs[offset >> 2];
+}
+
+static const MemoryRegionOps grlib_apb_pnp_ops = {
+ .read = grlib_apb_pnp_read,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void grlib_apb_pnp_realize(DeviceState *dev, Error **errp)
+{
+ APBPnp *apb_pnp = GRLIB_APB_PNP(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ memory_region_init_io(&apb_pnp->iomem, OBJECT(dev), &grlib_apb_pnp_ops,
+ apb_pnp, TYPE_GRLIB_APB_PNP, GRLIB_PNP_MAX_REGS);
+ sysbus_init_mmio(sbd, &apb_pnp->iomem);
+}
+
+static void grlib_apb_pnp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = grlib_apb_pnp_realize;
+}
+
+static const TypeInfo grlib_apb_pnp_info = {
+ .name = TYPE_GRLIB_APB_PNP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(APBPnp),
+ .class_init = grlib_apb_pnp_class_init,
+};
+
+static void grlib_ahb_apb_pnp_register_types(void)
+{
+ type_register_static(&grlib_ahb_pnp_info);
+ type_register_static(&grlib_apb_pnp_info);
+}
+
+type_init(grlib_ahb_apb_pnp_register_types)
config DP8393X
bool
+config NE2000_COMMON
+ bool
+
config NE2000_PCI
bool
default y if PCI_DEVICES
depends on PCI
+ select NE2000_COMMON
config EEPRO100_PCI
bool
bool
default y
depends on ISA_BUS
- depends on PCI # for NE2000State
- select NE2000_PCI
+ select NE2000_COMMON
config OPENCORES_ETH
bool
common-obj-$(CONFIG_DP8393X) += dp8393x.o
common-obj-$(CONFIG_XEN) += xen_nic.o
+common-obj-$(CONFIG_NE2000_COMMON) += ne2000.o
# PCI network cards
-common-obj-$(CONFIG_NE2000_PCI) += ne2000.o
+common-obj-$(CONFIG_NE2000_PCI) += ne2000-pci.o
common-obj-$(CONFIG_EEPRO100_PCI) += eepro100.o
common-obj-$(CONFIG_PCNET_PCI) += pcnet-pci.o
common-obj-$(CONFIG_PCNET_COMMON) += pcnet.o
if (size < sizeof(min_buf)) {
iov_to_buf(iov, iovcnt, 0, min_buf, size);
memset(&min_buf[size], 0, sizeof(min_buf) - size);
- e1000x_inc_reg_if_not_full(s->mac_reg, RUC);
min_iov.iov_base = filter_buf = min_buf;
min_iov.iov_len = size = sizeof(min_buf);
iovcnt = 1;
--- /dev/null
+/*
+ * QEMU NE2000 emulation (PCI bus)
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "ne2000.h"
+#include "sysemu/sysemu.h"
+
+typedef struct PCINE2000State {
+ PCIDevice dev;
+ NE2000State ne2000;
+} PCINE2000State;
+
+static const VMStateDescription vmstate_pci_ne2000 = {
+ .name = "ne2000",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, PCINE2000State),
+ VMSTATE_STRUCT(ne2000, PCINE2000State, 0, vmstate_ne2000, NE2000State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static NetClientInfo net_ne2000_info = {
+ .type = NET_CLIENT_DRIVER_NIC,
+ .size = sizeof(NICState),
+ .receive = ne2000_receive,
+};
+
+static void pci_ne2000_realize(PCIDevice *pci_dev, Error **errp)
+{
+ PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev);
+ NE2000State *s;
+ uint8_t *pci_conf;
+
+ pci_conf = d->dev.config;
+ pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
+
+ s = &d->ne2000;
+ ne2000_setup_io(s, DEVICE(pci_dev), 0x100);
+ pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io);
+ s->irq = pci_allocate_irq(&d->dev);
+
+ qemu_macaddr_default_if_unset(&s->c.macaddr);
+ ne2000_reset(s);
+
+ s->nic = qemu_new_nic(&net_ne2000_info, &s->c,
+ object_get_typename(OBJECT(pci_dev)),
+ pci_dev->qdev.id, s);
+ qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a);
+}
+
+static void pci_ne2000_exit(PCIDevice *pci_dev)
+{
+ PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev);
+ NE2000State *s = &d->ne2000;
+
+ qemu_del_nic(s->nic);
+ qemu_free_irq(s->irq);
+}
+
+static void ne2000_instance_init(Object *obj)
+{
+ PCIDevice *pci_dev = PCI_DEVICE(obj);
+ PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev);
+ NE2000State *s = &d->ne2000;
+
+ device_add_bootindex_property(obj, &s->c.bootindex,
+ "bootindex", "/ethernet-phy@0",
+ &pci_dev->qdev, NULL);
+}
+
+static Property ne2000_properties[] = {
+ DEFINE_NIC_PROPERTIES(PCINE2000State, ne2000.c),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ne2000_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = pci_ne2000_realize;
+ k->exit = pci_ne2000_exit;
+ k->romfile = "efi-ne2k_pci.rom",
+ k->vendor_id = PCI_VENDOR_ID_REALTEK;
+ k->device_id = PCI_DEVICE_ID_REALTEK_8029;
+ k->class_id = PCI_CLASS_NETWORK_ETHERNET;
+ dc->vmsd = &vmstate_pci_ne2000;
+ dc->props = ne2000_properties;
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+}
+
+static const TypeInfo ne2000_info = {
+ .name = "ne2k_pci",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCINE2000State),
+ .class_init = ne2000_class_init,
+ .instance_init = ne2000_instance_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static void ne2000_register_types(void)
+{
+ type_register_static(&ne2000_info);
+}
+
+type_init(ne2000_register_types)
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "hw/pci/pci.h"
#include "net/eth.h"
#include "ne2000.h"
#include "sysemu/sysemu.h"
#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
-typedef struct PCINE2000State {
- PCIDevice dev;
- NE2000State ne2000;
-} PCINE2000State;
-
void ne2000_reset(NE2000State *s)
{
int i;
}
};
-static const VMStateDescription vmstate_pci_ne2000 = {
- .name = "ne2000",
- .version_id = 3,
- .minimum_version_id = 3,
- .fields = (VMStateField[]) {
- VMSTATE_PCI_DEVICE(dev, PCINE2000State),
- VMSTATE_STRUCT(ne2000, PCINE2000State, 0, vmstate_ne2000, NE2000State),
- VMSTATE_END_OF_LIST()
- }
-};
-
static uint64_t ne2000_read(void *opaque, hwaddr addr,
unsigned size)
{
{
memory_region_init_io(&s->io, OBJECT(dev), &ne2000_ops, s, "ne2000", size);
}
-
-static NetClientInfo net_ne2000_info = {
- .type = NET_CLIENT_DRIVER_NIC,
- .size = sizeof(NICState),
- .receive = ne2000_receive,
-};
-
-static void pci_ne2000_realize(PCIDevice *pci_dev, Error **errp)
-{
- PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev);
- NE2000State *s;
- uint8_t *pci_conf;
-
- pci_conf = d->dev.config;
- pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
-
- s = &d->ne2000;
- ne2000_setup_io(s, DEVICE(pci_dev), 0x100);
- pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io);
- s->irq = pci_allocate_irq(&d->dev);
-
- qemu_macaddr_default_if_unset(&s->c.macaddr);
- ne2000_reset(s);
-
- s->nic = qemu_new_nic(&net_ne2000_info, &s->c,
- object_get_typename(OBJECT(pci_dev)), pci_dev->qdev.id, s);
- qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a);
-}
-
-static void pci_ne2000_exit(PCIDevice *pci_dev)
-{
- PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev);
- NE2000State *s = &d->ne2000;
-
- qemu_del_nic(s->nic);
- qemu_free_irq(s->irq);
-}
-
-static void ne2000_instance_init(Object *obj)
-{
- PCIDevice *pci_dev = PCI_DEVICE(obj);
- PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev);
- NE2000State *s = &d->ne2000;
-
- device_add_bootindex_property(obj, &s->c.bootindex,
- "bootindex", "/ethernet-phy@0",
- &pci_dev->qdev, NULL);
-}
-
-static Property ne2000_properties[] = {
- DEFINE_NIC_PROPERTIES(PCINE2000State, ne2000.c),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void ne2000_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
-
- k->realize = pci_ne2000_realize;
- k->exit = pci_ne2000_exit;
- k->romfile = "efi-ne2k_pci.rom",
- k->vendor_id = PCI_VENDOR_ID_REALTEK;
- k->device_id = PCI_DEVICE_ID_REALTEK_8029;
- k->class_id = PCI_CLASS_NETWORK_ETHERNET;
- dc->vmsd = &vmstate_pci_ne2000;
- dc->props = ne2000_properties;
- set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
-}
-
-static const TypeInfo ne2000_info = {
- .name = "ne2k_pci",
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PCINE2000State),
- .class_init = ne2000_class_init,
- .instance_init = ne2000_instance_init,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- },
-};
-
-static void ne2000_register_types(void)
-{
- type_register_static(&ne2000_info);
-}
-
-type_init(ne2000_register_types)
qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
file.fd = net->backend;
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
+ if (!virtio_queue_enabled(dev, net->dev.vq_index +
+ file.index)) {
+ /* Queue might not be ready for start */
+ continue;
+ }
r = vhost_net_set_backend(&net->dev, &file);
if (r < 0) {
r = -errno;
file.fd = -1;
if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
while (file.index-- > 0) {
+ if (!virtio_queue_enabled(dev, net->dev.vq_index +
+ file.index)) {
+ /* Queue might not be ready for start */
+ continue;
+ }
int r = vhost_net_set_backend(&net->dev, &file);
assert(r >= 0);
}
g_assert(false);
return 0;
}
+
+/* Required by ahci.c */
+bool msi_enabled(const PCIDevice *dev)
+{
+ return false;
+}
+
+void msi_notify(PCIDevice *dev, unsigned int vector)
+{
+ g_assert_not_reached();
+}
#include "qemu/error-report.h"
#include "hw/pci/pci.h"
#include "sysemu/dma.h"
-#include "stdio.h"
#define rdma_error_report(fmt, ...) \
error_report("%s: " fmt, "rdma", ## __VA_ARGS__)
*
*/
-#ifndef PVRDMA_QP_H
-#define PVRDMA_QP_H
+#ifndef PVRDMA_QP_OPS_H
+#define PVRDMA_QP_OPS_H
#include "pvrdma.h"
{
PCIDevice *pcid = PCI_DEVICE(s);
MegasasCmd *cmd = NULL;
- int frame_size = MFI_FRAME_SIZE * 16;
+ int frame_size = MEGASAS_MAX_SGE * sizeof(union mfi_sgl);
hwaddr frame_size_p = frame_size;
unsigned long index;
* See the COPYING file in the top-level directory.
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef SD_INTERNAL_H
-#define SD_INTERNAL_H
+
+#ifndef SDMMC_INTERNAL_H
+#define SDMMC_INTERNAL_H
#define SDMMC_CMD_MAX 64
/*
* QEMU Leon3 System Emulator
*
- * Copyright (c) 2010-2011 AdaCore
+ * Copyright (c) 2010-2019 AdaCore
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
#include "exec/address-spaces.h"
#include "hw/sparc/grlib.h"
+#include "hw/misc/grlib_ahb_apb_pnp.h"
/* Default system clock. */
#define CPU_CLK (40 * 1000 * 1000)
-#define PROM_FILENAME "u-boot.bin"
+#define LEON3_PROM_FILENAME "u-boot.bin"
+#define LEON3_PROM_OFFSET (0x00000000)
+#define LEON3_RAM_OFFSET (0x40000000)
#define MAX_PILS 16
+#define LEON3_UART_OFFSET (0x80000100)
+#define LEON3_UART_IRQ (3)
+
+#define LEON3_IRQMP_OFFSET (0x80000200)
+
+#define LEON3_TIMER_OFFSET (0x80000300)
+#define LEON3_TIMER_IRQ (6)
+#define LEON3_TIMER_COUNT (2)
+
+#define LEON3_APB_PNP_OFFSET (0x800FF000)
+#define LEON3_AHB_PNP_OFFSET (0xFFFFF000)
+
typedef struct ResetData {
SPARCCPU *cpu;
uint32_t entry; /* save kernel entry in case of reset */
target_ulong sp; /* initial stack pointer */
} ResetData;
+static uint32_t *gen_store_u32(uint32_t *code, hwaddr addr, uint32_t val)
+{
+ stl_p(code++, 0x82100000); /* mov %g0, %g1 */
+ stl_p(code++, 0x84100000); /* mov %g0, %g2 */
+ stl_p(code++, 0x03000000 +
+ extract32(addr, 10, 22));
+ /* sethi %hi(addr), %g1 */
+ stl_p(code++, 0x82106000 +
+ extract32(addr, 0, 10));
+ /* or %g1, addr, %g1 */
+ stl_p(code++, 0x05000000 +
+ extract32(val, 10, 22));
+ /* sethi %hi(val), %g2 */
+ stl_p(code++, 0x8410a000 +
+ extract32(val, 0, 10));
+ /* or %g2, val, %g2 */
+ stl_p(code++, 0xc4204000); /* st %g2, [ %g1 ] */
+
+ return code;
+}
+
+/*
+ * When loading a kernel in RAM the machine is expected to be in a different
+ * state (eg: initialized by the bootloader). This little code reproduces
+ * this behavior.
+ */
+static void write_bootloader(CPUSPARCState *env, uint8_t *base,
+ hwaddr kernel_addr)
+{
+ uint32_t *p = (uint32_t *) base;
+
+ /* Initialize the UARTs */
+ /* *UART_CONTROL = UART_RECEIVE_ENABLE | UART_TRANSMIT_ENABLE; */
+ p = gen_store_u32(p, 0x80000108, 3);
+
+ /* Initialize the TIMER 0 */
+ /* *GPTIMER_SCALER_RELOAD = 40 - 1; */
+ p = gen_store_u32(p, 0x80000304, 39);
+ /* *GPTIMER0_COUNTER_RELOAD = 0xFFFE; */
+ p = gen_store_u32(p, 0x80000314, 0xFFFFFFFE);
+ /* *GPTIMER0_CONFIG = GPTIMER_ENABLE | GPTIMER_RESTART; */
+ p = gen_store_u32(p, 0x80000318, 3);
+
+ /* JUMP to the entry point */
+ stl_p(p++, 0x82100000); /* mov %g0, %g1 */
+ stl_p(p++, 0x03000000 + extract32(kernel_addr, 10, 22));
+ /* sethi %hi(kernel_addr), %g1 */
+ stl_p(p++, 0x82106000 + extract32(kernel_addr, 0, 10));
+ /* or kernel_addr, %g1 */
+ stl_p(p++, 0x81c04000); /* jmp %g1 */
+ stl_p(p++, 0x01000000); /* nop */
+}
+
static void main_cpu_reset(void *opaque)
{
ResetData *s = (ResetData *)opaque;
int bios_size;
int prom_size;
ResetData *reset_info;
+ DeviceState *dev;
+ int i;
+ AHBPnp *ahb_pnp;
+ APBPnp *apb_pnp;
/* Init CPU */
cpu = SPARC_CPU(cpu_create(machine->cpu_type));
/* Reset data */
reset_info = g_malloc0(sizeof(ResetData));
reset_info->cpu = cpu;
- reset_info->sp = 0x40000000 + ram_size;
+ reset_info->sp = LEON3_RAM_OFFSET + ram_size;
qemu_register_reset(main_cpu_reset, reset_info);
- /* Allocate IRQ manager */
- grlib_irqmp_create(0x80000200, env, &cpu_irqs, MAX_PILS, &leon3_set_pil_in);
+ ahb_pnp = GRLIB_AHB_PNP(object_new(TYPE_GRLIB_AHB_PNP));
+ object_property_set_bool(OBJECT(ahb_pnp), true, "realized", &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(ahb_pnp), 0, LEON3_AHB_PNP_OFFSET);
+ grlib_ahb_pnp_add_entry(ahb_pnp, 0, 0, GRLIB_VENDOR_GAISLER,
+ GRLIB_LEON3_DEV, GRLIB_AHB_MASTER,
+ GRLIB_CPU_AREA);
+ apb_pnp = GRLIB_APB_PNP(object_new(TYPE_GRLIB_APB_PNP));
+ object_property_set_bool(OBJECT(apb_pnp), true, "realized", &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(apb_pnp), 0, LEON3_APB_PNP_OFFSET);
+ grlib_ahb_pnp_add_entry(ahb_pnp, LEON3_APB_PNP_OFFSET, 0xFFF,
+ GRLIB_VENDOR_GAISLER, GRLIB_APBMST_DEV,
+ GRLIB_AHB_SLAVE, GRLIB_AHBMEM_AREA);
+
+ /* Allocate IRQ manager */
+ dev = qdev_create(NULL, TYPE_GRLIB_IRQMP);
+ qdev_prop_set_ptr(dev, "set_pil_in", leon3_set_pil_in);
+ qdev_prop_set_ptr(dev, "set_pil_in_opaque", env);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, LEON3_IRQMP_OFFSET);
+ env->irq_manager = dev;
env->qemu_irq_ack = leon3_irq_manager;
+ cpu_irqs = qemu_allocate_irqs(grlib_irqmp_set_irq, dev, MAX_PILS);
+ grlib_apb_pnp_add_entry(apb_pnp, LEON3_IRQMP_OFFSET, 0xFFF,
+ GRLIB_VENDOR_GAISLER, GRLIB_IRQMP_DEV,
+ 2, 0, GRLIB_APBIO_AREA);
/* Allocate RAM */
if (ram_size > 1 * GiB) {
}
memory_region_allocate_system_memory(ram, NULL, "leon3.ram", ram_size);
- memory_region_add_subregion(address_space_mem, 0x40000000, ram);
+ memory_region_add_subregion(address_space_mem, LEON3_RAM_OFFSET, ram);
/* Allocate BIOS */
prom_size = 8 * MiB;
memory_region_init_ram(prom, NULL, "Leon3.bios", prom_size, &error_fatal);
memory_region_set_readonly(prom, true);
- memory_region_add_subregion(address_space_mem, 0x00000000, prom);
+ memory_region_add_subregion(address_space_mem, LEON3_PROM_OFFSET, prom);
/* Load boot prom */
if (bios_name == NULL) {
- bios_name = PROM_FILENAME;
+ bios_name = LEON3_PROM_FILENAME;
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
}
if (bios_size > 0) {
- ret = load_image_targphys(filename, 0x00000000, bios_size);
+ ret = load_image_targphys(filename, LEON3_PROM_OFFSET, bios_size);
if (ret < 0 || ret > prom_size) {
error_report("could not load prom '%s'", filename);
exit(1);
}
} else if (kernel_filename == NULL && !qtest_enabled()) {
- error_report("Can't read bios image %s", filename);
+ error_report("Can't read bios image '%s'", filename
+ ? filename
+ : LEON3_PROM_FILENAME);
exit(1);
}
g_free(filename);
exit(1);
}
if (bios_size <= 0) {
- /* If there is no bios/monitor, start the application. */
- env->pc = entry;
- env->npc = entry + 4;
- reset_info->entry = entry;
+ /*
+ * If there is no bios/monitor just start the application but put
+ * the machine in an initialized state through a little
+ * bootloader.
+ */
+ uint8_t *bootloader_entry;
+
+ bootloader_entry = memory_region_get_ram_ptr(prom);
+ write_bootloader(env, bootloader_entry, entry);
+ env->pc = LEON3_PROM_OFFSET;
+ env->npc = LEON3_PROM_OFFSET + 4;
+ reset_info->entry = LEON3_PROM_OFFSET;
}
}
/* Allocate timers */
- grlib_gptimer_create(0x80000300, 2, CPU_CLK, cpu_irqs, 6);
+ dev = qdev_create(NULL, TYPE_GRLIB_GPTIMER);
+ qdev_prop_set_uint32(dev, "nr-timers", LEON3_TIMER_COUNT);
+ qdev_prop_set_uint32(dev, "frequency", CPU_CLK);
+ qdev_prop_set_uint32(dev, "irq-line", LEON3_TIMER_IRQ);
+ qdev_init_nofail(dev);
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, LEON3_TIMER_OFFSET);
+ for (i = 0; i < LEON3_TIMER_COUNT; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), i,
+ cpu_irqs[LEON3_TIMER_IRQ + i]);
+ }
+
+ grlib_apb_pnp_add_entry(apb_pnp, LEON3_TIMER_OFFSET, 0xFFF,
+ GRLIB_VENDOR_GAISLER, GRLIB_GPTIMER_DEV,
+ 0, LEON3_TIMER_IRQ, GRLIB_APBIO_AREA);
/* Allocate uart */
if (serial_hd(0)) {
- grlib_apbuart_create(0x80000100, serial_hd(0), cpu_irqs[3]);
+ dev = qdev_create(NULL, TYPE_GRLIB_APB_UART);
+ qdev_prop_set_chr(dev, "chrdev", serial_hd(0));
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, LEON3_UART_OFFSET);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irqs[LEON3_UART_IRQ]);
+ grlib_apb_pnp_add_entry(apb_pnp, LEON3_UART_OFFSET, 0xFFF,
+ GRLIB_VENDOR_GAISLER, GRLIB_APBUART_DEV, 1,
+ LEON3_UART_IRQ, GRLIB_APBIO_AREA);
}
}
uint32_t initrd_size;
DriveInfo *fd[MAX_FD];
FWCfgState *fw_cfg;
- unsigned int num_vsimms;
DeviceState *dev;
SysBusDevice *s;
error_report("Unsupported depth: %d", graphic_depth);
exit (1);
}
- num_vsimms = 0;
- if (num_vsimms == 0) {
+ if (vga_interface_type != VGA_NONE) {
if (vga_interface_type == VGA_CG3) {
if (graphic_depth != 8) {
error_report("Unsupported depth: %d", graphic_depth);
}
}
- for (i = num_vsimms; i < MAX_VSIMMS; i++) {
+ for (i = 0; i < MAX_VSIMMS; i++) {
/* vsimm registers probed by OBP */
if (hwdef->vsimm[i].reg_base) {
empty_slot_init(hwdef->vsimm[i].reg_base, 0x2000);
/*
* QEMU GRLIB GPTimer Emulator
*
- * Copyright (c) 2010-2011 AdaCore
+ * Copyright (c) 2010-2019 AdaCore
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
*/
#include "qemu/osdep.h"
+#include "hw/sparc/grlib.h"
#include "hw/sysbus.h"
#include "qemu/timer.h"
#include "hw/ptimer.h"
#define COUNTER_RELOAD_OFFSET 0x04
#define TIMER_BASE 0x10
-#define TYPE_GRLIB_GPTIMER "grlib,gptimer"
#define GRLIB_GPTIMER(obj) \
OBJECT_CHECK(GPTimerUnit, (obj), TYPE_GRLIB_GPTIMER)
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
#ifndef HW_M48T59_INTERNAL_H
-#define HW_M48T59_INTERNAL_H 1
+#define HW_M48T59_INTERNAL_H
#define M48T59_DEBUG 0
*
* This file is licensed under the terms of the 3-clause BSD license
*/
-#ifndef _TPM_IOCTL_H_
-#define _TPM_IOCTL_H_
+
+#ifndef TPM_IOCTL_H
+#define TPM_IOCTL_H
#include <sys/uio.h>
#include <sys/ioctl.h>
CMD_SET_BUFFERSIZE,
};
-#endif /* _TPM_IOCTL_H */
+#endif /* TPM_IOCTL_H */
ifeq ($(CONFIG_VIRTIO_PCI),y)
obj-$(CONFIG_VHOST_VSOCK) += vhost-vsock-pci.o
obj-$(CONFIG_VHOST_USER_BLK) += vhost-user-blk-pci.o
+obj-$(CONFIG_VHOST_USER_INPUT) += vhost-user-input-pci.o
obj-$(CONFIG_VHOST_USER_SCSI) += vhost-user-scsi-pci.o
obj-$(CONFIG_VHOST_SCSI) += vhost-scsi-pci.o
obj-$(CONFIG_VIRTIO_INPUT_HOST) += virtio-input-host-pci.o
--- /dev/null
+/*
+ * This work is licensed under the terms of the GNU LGPL, version 2 or
+ * later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-input.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "virtio-pci.h"
+
+typedef struct VHostUserInputPCI VHostUserInputPCI;
+
+#define TYPE_VHOST_USER_INPUT_PCI "vhost-user-input-pci"
+
+#define VHOST_USER_INPUT_PCI(obj) \
+ OBJECT_CHECK(VHostUserInputPCI, (obj), TYPE_VHOST_USER_INPUT_PCI)
+
+struct VHostUserInputPCI {
+ VirtIOPCIProxy parent_obj;
+ VHostUserInput vhi;
+};
+
+static void vhost_user_input_pci_instance_init(Object *obj)
+{
+ VHostUserInputPCI *dev = VHOST_USER_INPUT_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vhi, sizeof(dev->vhi),
+ TYPE_VHOST_USER_INPUT);
+
+ object_property_add_alias(obj, "chardev",
+ OBJECT(&dev->vhi), "chardev",
+ &error_abort);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_input_pci_info = {
+ .generic_name = TYPE_VHOST_USER_INPUT_PCI,
+ .parent = TYPE_VIRTIO_INPUT_PCI,
+ .instance_size = sizeof(VHostUserInputPCI),
+ .instance_init = vhost_user_input_pci_instance_init,
+};
+
+static void vhost_user_input_pci_register(void)
+{
+ virtio_pci_types_register(&vhost_user_input_pci_info);
+}
+
+type_init(vhost_user_input_pci_register)
typedef struct VirtIOInputHostPCI VirtIOInputHostPCI;
-#define TYPE_VIRTIO_INPUT_HOST_PCI "virtio-input-host-pci-base"
+#define TYPE_VIRTIO_INPUT_HOST_PCI "virtio-input-host-pci"
#define VIRTIO_INPUT_HOST_PCI(obj) \
OBJECT_CHECK(VirtIOInputHostPCI, (obj), TYPE_VIRTIO_INPUT_HOST_PCI)
}
static const VirtioPCIDeviceTypeInfo virtio_input_host_pci_info = {
- .base_name = TYPE_VIRTIO_INPUT_HOST_PCI,
- .generic_name = "virtio-input-host-pci",
- .transitional_name = "virtio-input-host-pci-transitional",
- .non_transitional_name = "virtio-input-host-pci-non-transitional",
+ .generic_name = TYPE_VIRTIO_INPUT_HOST_PCI,
.parent = TYPE_VIRTIO_INPUT_PCI,
.instance_size = sizeof(VirtIOInputHostPCI),
.instance_init = virtio_host_initfn,
return vdev->vq[n].vring.desc;
}
+bool virtio_queue_enabled(VirtIODevice *vdev, int n)
+{
+ return virtio_queue_get_desc_addr(vdev, n) != 0;
+}
+
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.avail;
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _XTENSA_MEMORY_H
-#define _XTENSA_MEMORY_H
+#ifndef XTENSA_MEMORY_H
+#define XTENSA_MEMORY_H
#include "qemu-common.h"
#include "cpu.h"
*
*/
-#ifndef QAUTHZ_BASE_H__
-#define QAUTHZ_BASE_H__
+#ifndef QAUTHZ_BASE_H
+#define QAUTHZ_BASE_H
#include "qemu-common.h"
#include "qapi/error.h"
const char *identity,
Error **errp);
-#endif /* QAUTHZ_BASE_H__ */
-
+#endif /* QAUTHZ_BASE_H */
*
*/
-#ifndef QAUTHZ_LIST_H__
-#define QAUTHZ_LIST_H__
+#ifndef QAUTHZ_LIST_H
+#define QAUTHZ_LIST_H
#include "authz/base.h"
#include "qapi/qapi-types-authz.h"
const char *match);
-#endif /* QAUTHZ_LIST_H__ */
-
+#endif /* QAUTHZ_LIST_H */
*
*/
-#ifndef QAUTHZ_LIST_FILE_H__
-#define QAUTHZ_LIST_FILE_H__
+#ifndef QAUTHZ_LISTFILE_H
+#define QAUTHZ_LISTFILE_H
#include "authz/list.h"
#include "qapi/qapi-types-authz.h"
bool refresh,
Error **errp);
-
-#endif /* QAUTHZ_LIST_FILE_H__ */
-
+#endif /* QAUTHZ_LISTFILE_H */
*
*/
-#ifndef QAUTHZ_PAM_H__
-#define QAUTHZ_PAM_H__
+#ifndef QAUTHZ_PAMACCT_H
+#define QAUTHZ_PAMACCT_H
#include "authz/base.h"
const char *service,
Error **errp);
-
-#endif /* QAUTHZ_PAM_H__ */
+#endif /* QAUTHZ_PAMACCT_H */
*
*/
-#ifndef QAUTHZ_SIMPLE_H__
-#define QAUTHZ_SIMPLE_H__
+#ifndef QAUTHZ_SIMPLE_H
+#define QAUTHZ_SIMPLE_H
#include "authz/base.h"
Error **errp);
-#endif /* QAUTHZ_SIMPLE_H__ */
-
+#endif /* QAUTHZ_SIMPLE_H */
*/
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
-#endif /* QEMU_AIO_WAIT */
+#endif /* QEMU_AIO_WAIT_H */
BlockReopenQueue *queue, Error **errp);
void bdrv_reopen_commit(BDRVReopenState *reopen_state);
void bdrv_reopen_abort(BDRVReopenState *reopen_state);
-int bdrv_read(BdrvChild *child, int64_t sector_num,
- uint8_t *buf, int nb_sectors);
-int bdrv_write(BdrvChild *child, int64_t sector_num,
- const uint8_t *buf, int nb_sectors);
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int bytes, BdrvRequestFlags flags);
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
* This function must be called with iothread lock held.
*/
void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context);
+void bdrv_set_aio_context_ignore(BlockDriverState *bs,
+ AioContext *new_context, GSList **ignore);
+int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
+ Error **errp);
+int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp);
+bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx,
+ GSList **ignore, Error **errp);
+bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx,
+ GSList **ignore, Error **errp);
int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
* can update its reference. */
int (*update_filename)(BdrvChild *child, BlockDriverState *new_base,
const char *filename, Error **errp);
+
+ bool (*can_set_aio_ctx)(BdrvChild *child, AioContext *ctx,
+ GSList **ignore, Error **errp);
+ void (*set_aio_ctx)(BdrvChild *child, AioContext *ctx, GSList **ignore);
};
extern const BdrvChildRole child_file;
void bdrv_add_before_write_notifier(BlockDriverState *bs,
NotifierWithReturn *notifier);
-/**
- * bdrv_detach_aio_context:
- *
- * May be called from .bdrv_detach_aio_context() to detach children from the
- * current #AioContext. This is only needed by block drivers that manage their
- * own children. Both ->file and ->backing are automatically handled and
- * block drivers should not call this function on them explicitly.
- */
-void bdrv_detach_aio_context(BlockDriverState *bs);
-
-/**
- * bdrv_attach_aio_context:
- *
- * May be called from .bdrv_attach_aio_context() to attach children to the new
- * #AioContext. This is only needed by block drivers that manage their own
- * children. Both ->file and ->backing are automatically handled and block
- * drivers should not call this function on them explicitly.
- */
-void bdrv_attach_aio_context(BlockDriverState *bs,
- AioContext *new_context);
-
/**
* bdrv_add_aio_context_notifier:
*
-#ifndef CHARDEV_SPICE_H_
-#define CHARDEV_SPICE_H_
+#ifndef CHARDEV_SPICE_H
+#define CHARDEV_SPICE_H
#include <spice.h>
#include "chardev/char-fe.h"
#ifndef QEMU_CAPSTONE_H
-#define QEMU_CAPSTONE_H 1
+#define QEMU_CAPSTONE_H
#ifdef CONFIG_CAPSTONE
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
- * If the TLB contains a host virtual address suitable for direct RAM
- * access, then return it. Otherwise (TLB miss, TLB entry is for an
- * I/O access, etc) return NULL.
- *
- * This is the equivalent of the initial fast-path code used by
- * TCG backends for guest load and store accesses.
+ * If we can translate a host virtual address suitable for direct RAM
+ * access, without causing a guest exception, then return it.
+ * Otherwise (TLB entry is for an I/O access, guest software
+ * TLB fill required, etc) return NULL.
*/
+#ifdef CONFIG_USER_ONLY
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
- int access_type, int mmu_idx)
+ MMUAccessType access_type, int mmu_idx)
{
-#if defined(CONFIG_USER_ONLY)
return g2h(addr);
-#else
- CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr);
- abi_ptr tlb_addr;
- uintptr_t haddr;
-
- switch (access_type) {
- case 0:
- tlb_addr = tlbentry->addr_read;
- break;
- case 1:
- tlb_addr = tlb_addr_write(tlbentry);
- break;
- case 2:
- tlb_addr = tlbentry->addr_code;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (!tlb_hit(tlb_addr, addr)) {
- /* TLB entry is for a different page */
- return NULL;
- }
-
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- return NULL;
- }
-
- haddr = addr + tlbentry->addend;
- return (void *)haddr;
-#endif /* defined(CONFIG_USER_ONLY) */
}
+#else
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx);
+#endif
#endif /* CPU_LDST_H */
*/
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
-
-/*
- * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
- * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
- * be discarded and looked up again (e.g. via tlb_entry()).
- */
-void tlb_fill(CPUState *cpu, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
-
#endif
#if defined(CONFIG_USER_ONLY)
void translator_loop_temp_check(DisasContextBase *db);
-#endif /* EXEC__TRANSLATOR_H */
+#endif /* EXEC__TRANSLATOR_H */
#ifndef HW_ACPI_PIIX4_H
#define HW_ACPI_PIIX4_H
-Object *piix4_pm_find(void);
+#define TYPE_PIIX4_PM "PIIX4_PM"
#endif
/* Unmap the range of all the notifiers registered to @mr */
void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr);
-#endif /* HW_ARM_SMMU_COMMON */
+#endif /* HW_ARM_SMMU_COMMON_H */
-#ifndef HW_AUDIO_H
-#define HW_AUDIO_H
+#ifndef HW_SOUNDHW_H
+#define HW_SOUNDHW_H
void isa_register_soundhw(const char *name, const char *descr,
int (*init_isa)(ISABus *bus));
#ifndef HW_CPU_CLUSTER_H
#define HW_CPU_CLUSTER_H
-#include "qemu/osdep.h"
#include "hw/qdev.h"
/*
qemu_irq gsi[GSI_NUM_PINS];
} ICH9LPCState;
-Object *ich9_lpc_find(void);
-
#define Q35_MASK(bit, ms_bit, ls_bit) \
((uint##bit##_t)(((1ULL << ((ms_bit) + 1)) - 1) & ~((1ULL << ls_bit) - 1)))
SysBusDeviceClass parent_class;
DeviceRealize realize;
+ DeviceUnrealize unrealize;
void (*pre_save)(IOAPICCommonState *s);
void (*post_load)(IOAPICCommonState *s);
} IOAPICCommonClass;
uint8_t version;
uint64_t irq_count[IOAPIC_NUM_PINS];
int irq_level[IOAPIC_NUM_PINS];
+ int irq_eoi[IOAPIC_NUM_PINS];
+ QEMUTimer *delayed_ioapic_service_timer;
};
void ioapic_reset_common(DeviceState *dev);
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef IOMMU_COMMON_H
-#define IOMMU_COMMON_H
+#ifndef HW_I386_X86_IOMMU_H
+#define HW_I386_X86_IOMMU_H
#include "hw/sysbus.h"
#include "hw/pci/pci.h"
* THE SOFTWARE.
*/
-#ifndef HEATHROW_H
-#define HEATHROW_H
+#ifndef HW_INTC_HEATHROW_PIC_H
+#define HW_INTC_HEATHROW_PIC_H
#define TYPE_HEATHROW "heathrow"
#define HEATHROW(obj) OBJECT_CHECK(HeathrowState, (obj), TYPE_HEATHROW)
#define HEATHROW_NUM_IRQS 64
-#endif /* HEATHROW_H */
+#endif /* HW_INTC_HEATHROW_PIC_H */
* THE SOFTWARE.
*/
-#ifndef XLNX_PMU_IO_INTC_H
-#define XLNX_PMU_IO_INTC_H
+#ifndef HW_INTC_XLNX_PMU_IOMOD_INTC_H
+#define HW_INTC_XLNX_PMU_IOMOD_INTC_H
#include "hw/sysbus.h"
#include "hw/register.h"
RegisterInfo regs_info[XLNXPMUIOINTC_R_MAX];
} XlnxPMUIOIntc;
-#endif /* XLNX_PMU_IO_INTC_H */
+#endif /* HW_INTC_XLNX_PMU_IOMOD_INTC_H */
* + sysbus IRQ 1: interrupt for CPU 1
*/
-#ifndef HW_MISC_SSE_MHU_H
-#define HW_MISC_SSE_MHU_H
+#ifndef HW_MISC_ARMSSE_MHU_H
+#define HW_MISC_ARMSSE_MHU_H
#include "hw/sysbus.h"
--- /dev/null
+/*
+ * GRLIB AHB APB PNP
+ *
+ * Copyright (C) 2019 AdaCore
+ *
+ * Developed by :
+ * Frederic Konrad <frederic.konrad@adacore.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef GRLIB_AHB_APB_PNP_H
+#define GRLIB_AHB_APB_PNP_H
+
+#define TYPE_GRLIB_AHB_PNP "grlib,ahbpnp"
+#define GRLIB_AHB_PNP(obj) \
+ OBJECT_CHECK(AHBPnp, (obj), TYPE_GRLIB_AHB_PNP)
+typedef struct AHBPnp AHBPnp;
+
+#define TYPE_GRLIB_APB_PNP "grlib,apbpnp"
+#define GRLIB_APB_PNP(obj) \
+ OBJECT_CHECK(APBPnp, (obj), TYPE_GRLIB_APB_PNP)
+typedef struct APBPnp APBPnp;
+
+void grlib_ahb_pnp_add_entry(AHBPnp *dev, uint32_t address, uint32_t mask,
+ uint8_t vendor, uint16_t device, int slave,
+ int type);
+void grlib_apb_pnp_add_entry(APBPnp *dev, uint32_t address, uint32_t mask,
+ uint8_t vendor, uint16_t device, uint8_t version,
+ uint8_t irq, int type);
+
+/* VENDORS */
+#define GRLIB_VENDOR_GAISLER (0x01)
+/* DEVICES */
+#define GRLIB_LEON3_DEV (0x03)
+#define GRLIB_APBMST_DEV (0x06)
+#define GRLIB_APBUART_DEV (0x0C)
+#define GRLIB_IRQMP_DEV (0x0D)
+#define GRLIB_GPTIMER_DEV (0x11)
+/* TYPE */
+#define GRLIB_CPU_AREA (0x00)
+#define GRLIB_APBIO_AREA (0x01)
+#define GRLIB_AHBMEM_AREA (0x02)
+
+#define GRLIB_AHB_MASTER (0x00)
+#define GRLIB_AHB_SLAVE (0x01)
+
+#endif /* GRLIB_AHB_APB_PNP_H */
MemoryRegion mmio;
} IMX2WdtState;
-#endif /* IMX7_SNVS_H */
+#endif /* IMX2_WDT_H */
* the COPYING file in the top-level directory.
*
*/
+
#ifndef NRF51_RNG_H
#define NRF51_RNG_H
} NRF51RNGState;
-#endif /* NRF51_RNG_H_ */
+#endif /* NRF51_RNG_H */
* See the COPYING file in the top-level directory.
*/
-#ifndef HW_NET_NE2K_ISA_H
-#define HW_NET_NE2K_ISA_H
+#ifndef HW_NET_NE2000_ISA_H
+#define HW_NET_NE2000_ISA_H
#include "hw/hw.h"
#include "hw/qdev.h"
MemoryRegion mmio;
} DesignwarePCIEHost;
-#endif /* DESIGNWARE_H */
+#endif /* DESIGNWARE_H */
-#ifndef PCI_HOST_APB_H
-#define PCI_HOST_APB_H
+#ifndef HW_PCI_HOST_SABRE_H
+#define HW_PCI_HOST_SABRE_H
#include "hw/sparc/sun4u_iommu.h"
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_H
-#define _PPC_PNV_H
+
+#ifndef PPC_PNV_H
+#define PPC_PNV_H
#include "hw/boards.h"
#include "hw/sysbus.h"
#define PNV9_PSIHB_ESB_SIZE 0x0000000000010000ull
#define PNV9_PSIHB_ESB_BASE(chip) PNV9_CHIP_BASE(chip, 0x00060302031c0000ull)
-#endif /* _PPC_PNV_H */
+#endif /* PPC_PNV_H */
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_CORE_H
-#define _PPC_PNV_CORE_H
+
+#ifndef PPC_PNV_CORE_H
+#define PPC_PNV_CORE_H
#include "hw/cpu/core.h"
uint32_t id;
MemoryRegion xscom_regs;
} PnvQuad;
-#endif /* _PPC_PNV_CORE_H */
+#endif /* PPC_PNV_CORE_H */
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_LPC_H
-#define _PPC_PNV_LPC_H
+
+#ifndef PPC_PNV_LPC_H
+#define PPC_PNV_LPC_H
#include "hw/ppc/pnv_psi.h"
ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp);
int pnv_dt_lpc(struct PnvChip *chip, void *fdt, int root_offset);
-#endif /* _PPC_PNV_LPC_H */
+#endif /* PPC_PNV_LPC_H */
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_OCC_H
-#define _PPC_PNV_OCC_H
+
+#ifndef PPC_PNV_OCC_H
+#define PPC_PNV_OCC_H
#include "hw/ppc/pnv_psi.h"
int psi_irq;
} PnvOCCClass;
-#endif /* _PPC_PNV_OCC_H */
+#endif /* PPC_PNV_OCC_H */
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_PSI_H
-#define _PPC_PNV_PSI_H
+
+#ifndef PPC_PNV_PSI_H
+#define PPC_PNV_PSI_H
#include "hw/sysbus.h"
#include "hw/ppc/xics.h"
void pnv_psi_pic_print_info(Pnv9Psi *psi, Monitor *mon);
-#endif /* _PPC_PNV_PSI_H */
+#endif /* PPC_PNV_PSI_H */
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_XSCOM_H
-#define _PPC_PNV_XSCOM_H
+
+#ifndef PPC_PNV_XSCOM_H
+#define PPC_PNV_XSCOM_H
#include "qom/object.h"
const char *name,
uint64_t size);
-#endif /* _PPC_PNV_XSCOM_H */
+#endif /* PPC_PNV_XSCOM_H */
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
-#ifndef _SPAPR_OVEC_H
-#define _SPAPR_OVEC_H
+
+#ifndef SPAPR_OVEC_H
+#define SPAPR_OVEC_H
#include "cpu.h"
#include "migration/vmstate.h"
/* migration */
extern const VMStateDescription vmstate_spapr_ovec;
-#endif /* !defined (_SPAPR_OVEC_H) */
+#endif /* SPAPR_OVEC_H */
uint32_t aperture_size);
#endif
-
#ifndef HW_SCSI_EMULATION_H
-#define HW_SCSI_EMULATION_H 1
+#define HW_SCSI_EMULATION_H
typedef struct SCSIBlockLimits {
bool wsnz;
/*
* QEMU GRLIB Components
*
- * Copyright (c) 2010-2011 AdaCore
+ * Copyright (c) 2010-2019 AdaCore
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
*/
/* IRQMP */
+#define TYPE_GRLIB_IRQMP "grlib,irqmp"
typedef void (*set_pil_in_fn) (void *opaque, uint32_t pil_in);
void grlib_irqmp_ack(DeviceState *dev, int intno);
-static inline
-DeviceState *grlib_irqmp_create(hwaddr base,
- CPUSPARCState *env,
- qemu_irq **cpu_irqs,
- uint32_t nr_irqs,
- set_pil_in_fn set_pil_in)
-{
- DeviceState *dev;
-
- assert(cpu_irqs != NULL);
-
- dev = qdev_create(NULL, "grlib,irqmp");
- qdev_prop_set_ptr(dev, "set_pil_in", set_pil_in);
- qdev_prop_set_ptr(dev, "set_pil_in_opaque", env);
-
- qdev_init_nofail(dev);
-
- env->irq_manager = dev;
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- *cpu_irqs = qemu_allocate_irqs(grlib_irqmp_set_irq,
- dev,
- nr_irqs);
-
- return dev;
-}
-
/* GPTimer */
-
-static inline
-DeviceState *grlib_gptimer_create(hwaddr base,
- uint32_t nr_timers,
- uint32_t freq,
- qemu_irq *cpu_irqs,
- int base_irq)
-{
- DeviceState *dev;
- int i;
-
- dev = qdev_create(NULL, "grlib,gptimer");
- qdev_prop_set_uint32(dev, "nr-timers", nr_timers);
- qdev_prop_set_uint32(dev, "frequency", freq);
- qdev_prop_set_uint32(dev, "irq-line", base_irq);
-
- qdev_init_nofail(dev);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- for (i = 0; i < nr_timers; i++) {
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, cpu_irqs[base_irq + i]);
- }
-
- return dev;
-}
+#define TYPE_GRLIB_GPTIMER "grlib,gptimer"
/* APB UART */
-
-static inline
-DeviceState *grlib_apbuart_create(hwaddr base,
- Chardev *serial,
- qemu_irq irq)
-{
- DeviceState *dev;
-
- dev = qdev_create(NULL, "grlib,apbuart");
- qdev_prop_set_chr(dev, "chrdev", serial);
-
- qdev_init_nofail(dev);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
-
- return dev;
-}
+#define TYPE_GRLIB_APB_UART "grlib,apbuart"
#endif /* GRLIB_H */
* GNU GPL, version 2 or (at your option) any later version.
*/
-#ifndef HW_TIMER_PL031
-#define HW_TIMER_PL031
+#ifndef HW_TIMER_PL031_H
+#define HW_TIMER_PL031_H
#include "hw/sysbus.h"
* top-level directory.
*/
-#ifndef _QEMU_VHOST_VSOCK_H
-#define _QEMU_VHOST_VSOCK_H
+#ifndef QEMU_VHOST_VSOCK_H
+#define QEMU_VHOST_VSOCK_H
#include "hw/virtio/virtio.h"
#include "hw/virtio/vhost.h"
/*< public >*/
} VHostVSock;
-#endif /* _QEMU_VHOST_VSOCK_H */
+#endif /* QEMU_VHOST_VSOCK_H */
* top-level directory.
*/
-#ifndef _QEMU_VIRTIO_CRYPTO_H
-#define _QEMU_VIRTIO_CRYPTO_H
+#ifndef QEMU_VIRTIO_CRYPTO_H
+#define QEMU_VIRTIO_CRYPTO_H
#include "standard-headers/linux/virtio_crypto.h"
#include "hw/virtio/virtio.h"
uint8_t vhost_started;
} VirtIOCrypto;
-#endif /* _QEMU_VIRTIO_CRYPTO_H */
+#endif /* QEMU_VIRTIO_CRYPTO_H */
#define QEMU_VIRTIO_INPUT_H
#include "ui/input.h"
+#include "sysemu/vhost-user-backend.h"
/* ----------------------------------------------------------------- */
/* virtio input protocol */
#define VIRTIO_INPUT_HOST_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT_HOST)
+#define TYPE_VHOST_USER_INPUT "vhost-user-input"
+#define VHOST_USER_INPUT(obj) \
+ OBJECT_CHECK(VHostUserInput, (obj), TYPE_VHOST_USER_INPUT)
+#define VHOST_USER_INPUT_GET_PARENT_CLASS(obj) \
+ OBJECT_GET_PARENT_CLASS(obj, TYPE_VHOST_USER_INPUT)
+
typedef struct VirtIOInput VirtIOInput;
typedef struct VirtIOInputClass VirtIOInputClass;
typedef struct VirtIOInputConfig VirtIOInputConfig;
typedef struct VirtIOInputHID VirtIOInputHID;
typedef struct VirtIOInputHost VirtIOInputHost;
+typedef struct VHostUserInput VHostUserInput;
struct VirtIOInputConfig {
virtio_input_config config;
int fd;
};
+struct VHostUserInput {
+ VirtIOInput parent_obj;
+
+ VhostUserBackend *vhost;
+};
+
void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event);
void virtio_input_init_config(VirtIOInput *vinput,
virtio_input_config *config);
VIRTIO_F_IOMMU_PLATFORM, false)
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
+bool virtio_queue_enabled(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*/
-#ifndef ASPEED_WDT_H
-#define ASPEED_WDT_H
+
+#ifndef WDT_ASPEED_H
+#define WDT_ASPEED_H
#include "hw/sysbus.h"
uint32_t ext_pulse_width_mask;
} AspeedWDTState;
-#endif /* ASPEED_WDT_H */
+#endif /* WDT_ASPEED_H */
* Copyright (c) 2016, Citrix Systems, Inc.
*/
-#ifndef __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__
-#define __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__
+#ifndef XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H
+#define XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H
/*
* Start of day structure passed to PVH guests and to HVM guests in %ebx.
uint32_t reserved;
};
-#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */
+#endif /* XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H */
-#ifndef QEMU_HW_XEN_BACKEND_H
-#define QEMU_HW_XEN_BACKEND_H
+#ifndef HW_XEN_LEGACY_BACKEND_H
+#define HW_XEN_LEGACY_BACKEND_H
#include "hw/xen/xen_common.h"
#include "hw/xen/xen_pvdev.h"
int xen_config_dev_vkbd(int vdev);
int xen_config_dev_console(int vdev);
-#endif /* QEMU_HW_XEN_BACKEND_H */
+#endif /* HW_XEN_LEGACY_BACKEND_H */
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _XTENSA_MX_PIC_H
-#define _XTENSA_MX_PIC_H
+#ifndef XTENSA_MX_PIC_H
+#define XTENSA_MX_PIC_H
#include "exec/memory.h"
#include "hw/irq.h"
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef XTENSA_LIBISA_H
-#define XTENSA_LIBISA_H
+#ifndef HW_XTENSA_XTENSA_ISA_H
+#define HW_XTENSA_XTENSA_ISA_H
#ifdef __cplusplus
extern "C" {
#ifdef __cplusplus
}
#endif
-#endif /* XTENSA_LIBISA_H */
+#endif /* HW_XTENSA_XTENSA_ISA_H */
COLO_EVENT_FAILOVER,
};
-void colo_info_init(void);
-
void migrate_start_colo_process(MigrationState *s);
bool migration_in_colo_state(void);
COLOMode get_colo_mode(void);
/* failover */
-void colo_do_failover(MigrationState *s);
+void colo_do_failover(void);
void colo_checkpoint_notify(void *opaque);
#endif
* THE SOFTWARE.
*/
-#ifndef QEMU_FILE_H
-#define QEMU_FILE_H
+#ifndef MIGRATION_QEMU_FILE_TYPES_H
+#define MIGRATION_QEMU_FILE_TYPES_H
int qemu_file_get_error(QEMUFile *f);
#define VMSTATE_BUFFER_UNSAFE(_field, _state, _version, _size) \
VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, vmstate_info_buffer, _size)
+/*
+ * These VMSTATE_UNUSED*() macros can be used to fill in the holes
+ * when some of the vmstate fields are obsolete to be compatible with
+ * migrations between new/old binaries.
+ *
+ * CAUTION: when using any of the VMSTATE_UNUSED*() macros please be
+ * sure that the size passed in is the size that was actually *sent*
+ * rather than the size of the *structure*. One example is the
+ * boolean type - the size of the structure can vary depending on the
+ * definition of boolean, however the size we actually sent is always
+ * 1 byte (please refer to implementation of VMSTATE_BOOL_V and
+ * vmstate_info_bool). So here we should always pass in size==1
+ * rather than size==sizeof(bool).
+ */
#define VMSTATE_UNUSED_V(_v, _size) \
VMSTATE_UNUSED_BUFFER(NULL, _v, _size)
-#ifndef QEMU_DRM_H_
-#define QEMU_DRM_H_
+#ifndef QEMU_DRM_H
+#define QEMU_DRM_H
int qemu_drm_rendernode_open(const char *rendernode);
*
*/
-#ifndef QEMU_FILE_MONITOR_H
-#define QEMU_FILE_MONITOR_H
+#ifndef QEMU_FILEMONITOR_H
+#define QEMU_FILEMONITOR_H
#include "qemu-common.h"
const char *dirpath,
int64_t id);
-#endif /* QEMU_FILE_MONITOR_H */
+#endif /* QEMU_FILEMONITOR_H */
* Jozsef
*/
-#ifndef QEMU_JHASH_H__
-#define QEMU_JHASH_H__
+#ifndef QEMU_JHASH_H
+#define QEMU_JHASH_H
#include "qemu/bitops.h"
/* An arbitrary initial parameter */
#define JHASH_INITVAL 0xdeadbeef
-#endif /* QEMU_JHASH_H__ */
+#endif /* QEMU_JHASH_H */
#endif /* CONFIG_LIBPMEM */
-#endif /* !QEMU_PMEM_H */
+#endif /* QEMU_PMEM_H */
*/
#ifndef QEMU_STATS64_H
-#define QEMU_STATS64_H 1
+#define QEMU_STATS64_H
#include "qemu/atomic.h"
*/
#ifndef QEMU_SYS_MEMBARRIER_H
-#define QEMU_SYS_MEMBARRIER_H 1
+#define QEMU_SYS_MEMBARRIER_H
#ifdef CONFIG_MEMBARRIER
/* Only block reordering at the compiler level in the performance-critical
*/
#ifndef QEMU_SYSTEMD_H
-#define QEMU_SYSTEMD_H 1
+#define QEMU_SYSTEMD_H
#define FIRST_SOCKET_ACTIVATION_FD 3 /* defined by systemd ABI */
* This always includes at least the program counter; some targets
* will need to do more. If this hook is not implemented then the
* default is to call @set_pc(tb->pc).
- * @handle_mmu_fault: Callback for handling an MMU fault.
+ * @tlb_fill: Callback for handling a softmmu tlb miss or user-only
+ * address fault. For system mode, if the access is valid, call
+ * tlb_set_page and return true; if the access is invalid, and
+ * probe is true, return false; otherwise raise an exception and
+ * do not return. For user-only mode, always raise an exception
+ * and do not return.
* @get_phys_page_debug: Callback for obtaining a physical address.
* @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
* associated memory transaction attributes to use for the access.
Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
- int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_index);
+ bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
* the scsi code for linux.
*/
-#ifndef BLOCK_SCSI_H
-#define BLOCK_SCSI_H
+#ifndef SCSI_CONSTANTS_H
+#define SCSI_CONSTANTS_H
/*
* SCSI opcodes
#ifndef SCSI_UTILS_H
-#define SCSI_UTILS_H 1
+#define SCSI_UTILS_H
#ifdef CONFIG_LINUX
#include <scsi/sg.h>
void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
+void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
void blk_iostatus_enable(BlockBackend *blk);
bool blk_iostatus_is_enabled(const BlockBackend *blk);
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
*/
/* header to be included in non-HVF-specific code */
-#ifndef _HVF_H
-#define _HVF_H
+
+#ifndef HVF_H
+#define HVF_H
#include "qemu-common.h"
#include "qemu/bitops.h"
--- /dev/null
+/*
+ * QEMU vhost-user backend
+ *
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_VHOST_USER_BACKEND_H
+#define QEMU_VHOST_USER_BACKEND_H
+
+#include "qom/object.h"
+#include "exec/memory.h"
+#include "qemu/option.h"
+#include "qemu/bitmap.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "chardev/char-fe.h"
+#include "io/channel.h"
+
+#define TYPE_VHOST_USER_BACKEND "vhost-user-backend"
+#define VHOST_USER_BACKEND(obj) \
+ OBJECT_CHECK(VhostUserBackend, (obj), TYPE_VHOST_USER_BACKEND)
+#define VHOST_USER_BACKEND_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VhostUserBackendClass, (obj), TYPE_VHOST_USER_BACKEND)
+#define VHOST_USER_BACKEND_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VhostUserBackendClass, (klass), TYPE_VHOST_USER_BACKEND)
+
+typedef struct VhostUserBackend VhostUserBackend;
+typedef struct VhostUserBackendClass VhostUserBackendClass;
+
+struct VhostUserBackendClass {
+ ObjectClass parent_class;
+};
+
+struct VhostUserBackend {
+ /* private */
+ Object parent;
+
+ char *chr_name;
+ CharBackend chr;
+ VhostUserState vhost_user;
+ struct vhost_dev dev;
+ VirtIODevice *vdev;
+ bool started;
+ bool completed;
+};
+
+int vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
+ unsigned nvqs, Error **errp);
+void vhost_user_backend_start(VhostUserBackend *b);
+void vhost_user_backend_stop(VhostUserBackend *b);
+
+#endif
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
+
#ifndef QEMU_UI_KBD_STATE_H
-#define QEMU_UI_KBD_STATE_H 1
+#define QEMU_UI_KBD_STATE_H
#include "qapi/qapi-types-ui.h"
timer_del(&job->sleep_timer);
job->busy = true;
job_unlock();
- aio_co_wake(job->co);
+ aio_co_enter(job->aio_context, job->co);
}
void job_enter(Job *job)
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef TARGET_CPU_H
-#define TARGET_CPU_H
+#ifndef NIOS2_TARGET_CPU_H
+#define NIOS2_TARGET_CPU_H
static inline void cpu_clone_regs(CPUNios2State *env, target_ulong newsp)
{
-#ifndef TARGET_SIGNAL_H
-#define TARGET_SIGNAL_H
+#ifndef NIOS2_TARGET_SIGNAL_H
+#define NIOS2_TARGET_SIGNAL_H
/* this struct defines a stack used during syscall handling */
#include "../generic/signal.h"
-#endif /* TARGET_SIGNAL_H */
+#endif /* NIOS2_TARGET_SIGNAL_H */
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef TARGET_STRUCTS_H
-#define TARGET_STRUCTS_H
+#ifndef NIOS2_TARGET_STRUCTS_H
+#define NIOS2_TARGET_STRUCTS_H
struct target_ipc_perm {
abi_int __key; /* Key. */
-#ifndef TARGET_SYSCALL_H
-#define TARGET_SYSCALL_H
+#ifndef NIOS2_TARGET_SYSCALL_H
+#define NIOS2_TARGET_SYSCALL_H
#define UNAME_MACHINE "nios2"
#define UNAME_MINIMUM_RELEASE "3.19.0"
#define TARGET_MLOCKALL_MCL_CURRENT 1
#define TARGET_MLOCKALL_MCL_FUTURE 2
-#endif /* TARGET_SYSCALL_H */
+#endif /* NIOS2_TARGET_SYSCALL_H */
-#ifndef TARGET_CPU_H
-#define TARGET_CPU_H
+#ifndef RISCV_TARGET_CPU_H
+#define RISCV_TARGET_CPU_H
static inline void cpu_clone_regs(CPURISCVState *env, target_ulong newsp)
{
-#ifndef TARGET_SIGNAL_H
-#define TARGET_SIGNAL_H
+#ifndef RISCV_TARGET_SIGNAL_H
+#define RISCV_TARGET_SIGNAL_H
typedef struct target_sigaltstack {
abi_ulong ss_sp;
#include "../generic/signal.h"
-#endif /* TARGET_SIGNAL_H */
+#endif /* RISCV_TARGET_SIGNAL_H */
* This is a copy of ../aarch64/target_structs.h atm.
*
*/
-#ifndef TARGET_STRUCTS_H
-#define TARGET_STRUCTS_H
+#ifndef RISCV_TARGET_STRUCTS_H
+#define RISCV_TARGET_STRUCTS_H
struct target_ipc_perm {
abi_int __key; /* Key. */
switch (arg1) {
case TARGET_GSI_IEEE_FP_CONTROL:
{
- uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
-
- /* Copied from linux ieee_fpcr_to_swcr. */
- swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
- swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
- swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
- | SWCR_TRAP_ENABLE_DZE
- | SWCR_TRAP_ENABLE_OVF);
- swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
- | SWCR_TRAP_ENABLE_INE);
- swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
- swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
+ uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
+ uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
+
+ swcr &= ~SWCR_STATUS_MASK;
+ swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
if (put_user_u64 (swcr, arg2))
return -TARGET_EFAULT;
switch (arg1) {
case TARGET_SSI_IEEE_FP_CONTROL:
{
- uint64_t swcr, fpcr, orig_fpcr;
+ uint64_t swcr, fpcr;
if (get_user_u64 (swcr, arg2)) {
return -TARGET_EFAULT;
}
- orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
- fpcr = orig_fpcr & FPCR_DYN_MASK;
-
- /* Copied from linux ieee_swcr_to_fpcr. */
- fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
- fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
- fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
- | SWCR_TRAP_ENABLE_DZE
- | SWCR_TRAP_ENABLE_OVF)) << 48;
- fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
- | SWCR_TRAP_ENABLE_INE)) << 57;
- fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
- fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
+ /*
+ * The kernel calls swcr_update_status to update the
+ * status bits from the fpcr at every point that it
+ * could be queried. Therefore, we store the status
+ * bits only in FPCR.
+ */
+ ((CPUAlphaState *)cpu_env)->swcr
+ = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
+
+ fpcr = cpu_alpha_load_fpcr(cpu_env);
+ fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
+ fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
cpu_alpha_store_fpcr(cpu_env, fpcr);
ret = 0;
}
case TARGET_SSI_IEEE_RAISE_EXCEPTION:
{
- uint64_t exc, fpcr, orig_fpcr;
- int si_code;
+ uint64_t exc, fpcr, fex;
if (get_user_u64(exc, arg2)) {
return -TARGET_EFAULT;
}
+ exc &= SWCR_STATUS_MASK;
+ fpcr = cpu_alpha_load_fpcr(cpu_env);
- orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
-
- /* We only add to the exception status here. */
- fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
+ /* Old exceptions are not signaled. */
+ fex = alpha_ieee_fpcr_to_swcr(fpcr);
+ fex = exc & ~fex;
+ fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
+ fex &= ((CPUArchState *)cpu_env)->swcr;
+ /* Update the hardware fpcr. */
+ fpcr |= alpha_ieee_swcr_to_fpcr(exc);
cpu_alpha_store_fpcr(cpu_env, fpcr);
- ret = 0;
- /* Old exceptions are not signaled. */
- fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
-
- /* If any exceptions set by this call,
- and are unmasked, send a signal. */
- si_code = 0;
- if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
- si_code = TARGET_FPE_FLTRES;
- }
- if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
- si_code = TARGET_FPE_FLTUND;
- }
- if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
- si_code = TARGET_FPE_FLTOVF;
- }
- if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
- si_code = TARGET_FPE_FLTDIV;
- }
- if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
- si_code = TARGET_FPE_FLTINV;
- }
- if (si_code != 0) {
+ if (fex) {
+ int si_code = TARGET_FPE_FLTUNK;
target_siginfo_t info;
+
+ if (fex & SWCR_TRAP_ENABLE_DNO) {
+ si_code = TARGET_FPE_FLTUND;
+ }
+ if (fex & SWCR_TRAP_ENABLE_INE) {
+ si_code = TARGET_FPE_FLTRES;
+ }
+ if (fex & SWCR_TRAP_ENABLE_UNF) {
+ si_code = TARGET_FPE_FLTUND;
+ }
+ if (fex & SWCR_TRAP_ENABLE_OVF) {
+ si_code = TARGET_FPE_FLTOVF;
+ }
+ if (fex & SWCR_TRAP_ENABLE_DZE) {
+ si_code = TARGET_FPE_FLTDIV;
+ }
+ if (fex & SWCR_TRAP_ENABLE_INV) {
+ si_code = TARGET_FPE_FLTINV;
+ }
+
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = si_code;
queue_signal((CPUArchState *)cpu_env, info.si_signo,
QEMU_SI_FAULT, &info);
}
+ ret = 0;
}
break;
#define TARGET_FPE_FLTRES (6) /* floating point inexact result */
#define TARGET_FPE_FLTINV (7) /* floating point invalid operation */
#define TARGET_FPE_FLTSUB (8) /* subscript out of range */
-#define TARGET_NSIGFPE 8
+#define TARGET_FPE_FLTUNK (14) /* undiagnosed fp exception */
+#define TARGET_NSIGFPE 15
/*
* SIGSEGV si_codes
* Copyright (C) 2001 - 2009 Tensilica Inc.
*/
-#ifndef _XTENSA_UNISTD_H
-#define _XTENSA_UNISTD_H
+#ifndef XTENSA_SYSCALL_NR_H
+#define XTENSA_SYSCALL_NR_H
#define TARGET_NR_spill 0
#define TARGET_NR_xtensa 1
#define TARGET_NR_syscall_count 352
-#endif /* _XTENSA_UNISTD_H */
+#endif /* XTENSA_SYSCALL_NR_H */
-#ifndef XTENSA_TARGET_STRUCTS_T
-#define XTENSA_TARGET_STRUCTS_T
+#ifndef XTENSA_TARGET_STRUCTS_H
+#define XTENSA_TARGET_STRUCTS_H
struct target_ipc_perm {
abi_int __key; /* Key. */
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
-#ifndef _XTENSA_TERMBITS_H
-#define _XTENSA_TERMBITS_H
+#ifndef XTENSA_TERMBITS_H
+#define XTENSA_TERMBITS_H
#include <linux/posix_types.h>
#define TARGET_TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-#endif /* _XTENSA_TERMBITS_H */
+#endif /* XTENSA_TERMBITS_H */
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
- /* Refresh DIRTY_LOG_MIGRATION bit. */
+ /* Refresh DIRTY_MEMORY_MIGRATION bit. */
memory_region_transaction_begin();
memory_region_update_pending = true;
memory_region_transaction_commit();
{
global_dirty_log = false;
- /* Refresh DIRTY_LOG_MIGRATION bit. */
+ /* Refresh DIRTY_MEMORY_MIGRATION bit. */
memory_region_transaction_begin();
memory_region_update_pending = true;
memory_region_transaction_commit();
return;
}
- colo_do_failover(NULL);
+ colo_do_failover();
}
void failover_request_active(Error **errp)
}
}
-void colo_do_failover(MigrationState *s)
+void colo_do_failover(void)
{
/* Make sure VM stopped while failover happened. */
if (!colo_runstate_is_stopped()) {
}
}
-static void migrate_fd_cleanup(void *opaque)
+static void migrate_fd_cleanup(MigrationState *s)
{
- MigrationState *s = opaque;
-
qemu_bh_delete(s->cleanup_bh);
s->cleanup_bh = NULL;
block_cleanup_parameters(s);
}
+static void migrate_fd_cleanup_schedule(MigrationState *s)
+{
+ /*
+ * Ref the state for bh, because it may be called when
+ * there're already no other refs
+ */
+ object_ref(OBJECT(s));
+ qemu_bh_schedule(s->cleanup_bh);
+}
+
+static void migrate_fd_cleanup_bh(void *opaque)
+{
+ MigrationState *s = opaque;
+ migrate_fd_cleanup(s);
+ object_unref(OBJECT(s));
+}
+
void migrate_set_error(MigrationState *s, const Error *error)
{
qemu_mutex_lock(&s->error_mutex);
* locks.
*/
s->bytes_xfer = 0;
- s->xfer_limit = 0;
s->cleanup_bh = 0;
s->to_dst_file = NULL;
s->state = MIGRATION_STATUS_NONE;
error_report("%s: Unknown ending state %d", __func__, s->state);
break;
}
- qemu_bh_schedule(s->cleanup_bh);
+ migrate_fd_cleanup_schedule(s);
qemu_mutex_unlock_iothread();
}
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
s->expected_downtime = s->parameters.downtime_limit;
- s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
+ s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
if (error_in) {
migrate_fd_error(s, error_in);
migrate_fd_cleanup(s);
/*< public >*/
size_t bytes_xfer;
- size_t xfer_limit;
QemuThread thread;
QEMUBH *cleanup_bh;
QEMUFile *to_dst_file;
* - to make easier to know what to free at the end of migration
*
* This way we always know who is the owner of each "pages" struct,
- * and we don't need any loocking. It belongs to the migration thread
+ * and we don't need any locking. It belongs to the migration thread
* or to the channel thread. Switching is safe because the migration
* thread is using the channel mutex when changing it, and the channel
* have to had finish with its own, otherwise pending_job can't be
/**
* migration_bitmap_find_dirty: find the next dirty page from start
*
- * Called with rcu_read_lock() to protect migration_bitmap
- *
- * Returns the byte offset within memory region of the start of a dirty page
+ * Returns the page offset within memory region of the start of a dirty page
*
* @rs: current RAM state
* @rb: RAMBlock where to search for dirty pages
}
static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
- ram_addr_t start, ram_addr_t length)
+ ram_addr_t length)
{
rs->migration_dirty_pages +=
- cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
+ cpu_physical_memory_sync_dirty_bitmap(rb, 0, length,
&rs->num_dirty_pages_period);
}
qemu_mutex_lock(&rs->bitmap_mutex);
rcu_read_lock();
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- migration_bitmap_sync_range(rs, block, 0, block->used_length);
+ migration_bitmap_sync_range(rs, block, block->used_length);
}
ram_counters.remaining = ram_bytes_remaining();
rcu_read_unlock();
* find_dirty_block: find the next dirty page and update any state
* associated with the search process.
*
- * Returns if a page is found
+ * Returns true if a page is found
*
* @rs: current RAM state
* @pss: data about the state of the current dirty page scan
*
* Skips pages that are already sent (!dirty)
*
- * Returns if a queued page is found
+ * Returns true if a queued page is found
*
* @rs: current RAM state
* @pss: data about the state of the current dirty page scan
RAMBlock *block;
/* caller have hold iothread lock or is in a bh, so there is
- * no writing race against this migration_bitmap
+ * no writing race against the migration bitmap
*/
memory_global_dirty_log_stop();
/* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap.
- qemu_get_clock_ns() is a bit expensive, so we only check each some
+ qemu_clock_get_ns() is a bit expensive, so we only check each some
iterations
*/
if ((i & 63) == 0) {
memory_global_dirty_log_sync();
rcu_read_lock();
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
+ migration_bitmap_sync_range(ram_state, block, block->used_length);
}
rcu_read_unlock();
if (!se->ops || !se->ops->save_live_iterate) {
continue;
}
- if (se->ops && se->ops->is_active) {
- if (!se->ops->is_active(se->opaque)) {
- continue;
- }
+ if (se->ops->is_active &&
+ !se->ops->is_active(se->opaque)) {
+ continue;
}
- if (se->ops && se->ops->is_active_iterate) {
- if (!se->ops->is_active_iterate(se->opaque)) {
- continue;
- }
+ if (se->ops->is_active_iterate &&
+ !se->ops->is_active_iterate(se->opaque)) {
+ continue;
}
/*
* In the postcopy phase, any device that doesn't know how to
return -EINVAL;
}
- if (migration_is_blocked(errp)) {
- return -EINVAL;
- }
-
if (migrate_use_block()) {
error_setg(errp, "Block migration and snapshots are incompatible");
return -EINVAL;
return 0;
}
+static int qemu_loadvm_state_header(QEMUFile *f)
+{
+ unsigned int v;
+ int ret;
+
+ v = qemu_get_be32(f);
+ if (v != QEMU_VM_FILE_MAGIC) {
+ error_report("Not a migration stream");
+ return -EINVAL;
+ }
+
+ v = qemu_get_be32(f);
+ if (v == QEMU_VM_FILE_VERSION_COMPAT) {
+ error_report("SaveVM v2 format is obsolete and don't work anymore");
+ return -ENOTSUP;
+ }
+ if (v != QEMU_VM_FILE_VERSION) {
+ error_report("Unsupported migration stream version");
+ return -ENOTSUP;
+ }
+
+ if (migrate_get_current()->send_configuration) {
+ if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
+ error_report("Configuration section missing");
+ qemu_loadvm_state_cleanup();
+ return -EINVAL;
+ }
+ ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
+
+ if (ret) {
+ qemu_loadvm_state_cleanup();
+ return ret;
+ }
+ }
+ return 0;
+}
+
static int qemu_loadvm_state_setup(QEMUFile *f)
{
SaveStateEntry *se;
{
MigrationIncomingState *mis = migration_incoming_get_current();
Error *local_err = NULL;
- unsigned int v;
int ret;
if (qemu_savevm_state_blocked(&local_err)) {
return -EINVAL;
}
- v = qemu_get_be32(f);
- if (v != QEMU_VM_FILE_MAGIC) {
- error_report("Not a migration stream");
- return -EINVAL;
- }
-
- v = qemu_get_be32(f);
- if (v == QEMU_VM_FILE_VERSION_COMPAT) {
- error_report("SaveVM v2 format is obsolete and don't work anymore");
- return -ENOTSUP;
- }
- if (v != QEMU_VM_FILE_VERSION) {
- error_report("Unsupported migration stream version");
- return -ENOTSUP;
+ ret = qemu_loadvm_state_header(f);
+ if (ret) {
+ return ret;
}
if (qemu_loadvm_state_setup(f) != 0) {
return -EINVAL;
}
- if (migrate_get_current()->send_configuration) {
- if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
- error_report("Configuration section missing");
- qemu_loadvm_state_cleanup();
- return -EINVAL;
- }
- ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
-
- if (ret) {
- qemu_loadvm_state_cleanup();
- return ret;
- }
- }
-
cpu_synchronize_all_pre_loadvm();
ret = qemu_loadvm_state_main(f, mis);
AioContext *aio_context;
if (migration_is_blocked(errp)) {
- return false;
+ return ret;
}
if (!replay_can_snapshot()) {
void *opaque, QJSON *vmdesc)
{
const VMStateDescription **sub = vmsd->subsections;
- bool subsection_found = false;
+ bool vmdesc_has_subsections = false;
int ret = 0;
trace_vmstate_subsection_save_top(vmsd->name);
trace_vmstate_subsection_save_loop(vmsd->name, vmsdsub->name);
if (vmdesc) {
/* Only create subsection array when we have any */
- if (!subsection_found) {
+ if (!vmdesc_has_subsections) {
json_start_array(vmdesc, "subsections");
- subsection_found = true;
+ vmdesc_has_subsections = true;
}
json_start_object(vmdesc, NULL);
sub++;
}
- if (vmdesc && subsection_found) {
+ if (vmdesc_has_subsections) {
json_end_array(vmdesc);
}
return;
}
- gpa = cpu_get_phys_page_attrs_debug(mon_get_cpu(),
- addr & TARGET_PAGE_MASK, &attrs);
+ gpa = cpu_get_phys_page_attrs_debug(cs, addr & TARGET_PAGE_MASK, &attrs);
if (gpa == -1) {
monitor_printf(mon, "Unmapped\n");
} else {
break;
}
- assert(event_unhandled_count > 0);
-
qemu_mutex_lock(&event_mtx);
+ assert(event_unhandled_count > 0);
event_unhandled_count--;
qemu_cond_broadcast(&event_complete_cond);
qemu_mutex_unlock(&event_mtx);
* later. See the COPYING file in the top-level directory.
*/
-#ifndef QEMU_COLO_PROXY_H
-#define QEMU_COLO_PROXY_H
+#ifndef NET_COLO_H
+#define NET_COLO_H
#include "qemu/jhash.h"
#include "qemu/timer.h"
Packet *packet_new(const void *data, int size, int vnet_hdr_len);
void packet_destroy(void *opaque, void *user_data);
-#endif /* QEMU_COLO_PROXY_H */
+#endif /* NET_COLO_H */
}
if (vprefix6_len < 0 || vprefix6_len > 126) {
error_setg(errp,
- "Invalid prefix provided (prefix len must be in range 0-126");
+ "Invalid IPv6 prefix provided "
+ "(IPv6 prefix length must be between 0 and 126)");
return -1;
}
the new format, the ``-audiodev-help'' option can be used to convert
the current values of the environment variables to ``-audiodev'' options.
+@subsection -realtime (since 4.1)
+
+The @code{-realtime mlock=on|off} argument has been replaced by the
+@code{-overcommit mem-lock=on|off} argument.
+
+@subsection -virtfs_synth (since 4.1)
+
+The ``-virtfs_synth'' argument is now deprecated. Please use ``-fsdev synth''
+and ``-device virtio-9p-...'' instead.
+
@section QEMU Machine Protocol (QMP) commands
@subsection block-dirty-bitmap-add "autoload" parameter (since 2.12.0)
#include "qemu/option.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
+#include "qemu/units.h"
#include "qom/object_interfaces.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"
return res;
}
-#define IO_BUF_SIZE (2 * 1024 * 1024)
+#define IO_BUF_SIZE (2 * MiB)
/*
* Check if passed sectors are empty (not allocated or contain only 0 bytes)
int64_t n;
/* Probe up to 1 GiB at a time. */
- n = MIN(1 << 30, length - offset);
+ n = MIN(1 * GiB, length - offset);
ret = get_block_status(bs, offset, n, &next);
if (ret < 0) {
char backing_name[PATH_MAX];
QDict *options = NULL;
- if (bs->backing_format[0] != '\0') {
- options = qdict_new();
- qdict_put_str(options, "driver", bs->backing_format);
- }
-
- if (force_share) {
- if (!options) {
+ if (bs->backing) {
+ if (bs->backing_format[0] != '\0') {
options = qdict_new();
+ qdict_put_str(options, "driver", bs->backing_format);
}
- qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
- }
- bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
- blk_old_backing = blk_new_open(backing_name, NULL,
- options, src_flags, &local_err);
- if (!blk_old_backing) {
- error_reportf_err(local_err,
- "Could not open old backing file '%s': ",
- backing_name);
- ret = -1;
- goto out;
+
+ if (force_share) {
+ if (!options) {
+ options = qdict_new();
+ }
+ qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
+ }
+ bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
+ blk_old_backing = blk_new_open(backing_name, NULL,
+ options, src_flags, &local_err);
+ if (!blk_old_backing) {
+ error_reportf_err(local_err,
+ "Could not open old backing file '%s': ",
+ backing_name);
+ ret = -1;
+ goto out;
+ }
+ } else {
+ blk_old_backing = NULL;
}
if (out_baseimg[0]) {
*/
if (!unsafe) {
int64_t size;
- int64_t old_backing_size;
+ int64_t old_backing_size = 0;
int64_t new_backing_size = 0;
uint64_t offset;
int64_t n;
ret = -1;
goto out;
}
- old_backing_size = blk_getlength(blk_old_backing);
- if (old_backing_size < 0) {
- char backing_name[PATH_MAX];
+ if (blk_old_backing) {
+ old_backing_size = blk_getlength(blk_old_backing);
+ if (old_backing_size < 0) {
+ char backing_name[PATH_MAX];
- bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
- error_report("Could not get size of '%s': %s",
- backing_name, strerror(-old_backing_size));
- ret = -1;
- goto out;
+ bdrv_get_backing_filename(bs, backing_name,
+ sizeof(backing_name));
+ error_report("Could not get size of '%s': %s",
+ backing_name, strerror(-old_backing_size));
+ ret = -1;
+ goto out;
+ }
}
if (blk_new_backing) {
new_backing_size = blk_getlength(blk_new_backing);
}
for (offset = 0; offset < size; offset += n) {
+ bool buf_old_is_zero = false;
+
/* How many bytes can we handle with the next read? */
n = MIN(IO_BUF_SIZE, size - offset);
*/
if (offset >= old_backing_size) {
memset(buf_old, 0, n);
+ buf_old_is_zero = true;
} else {
if (offset + n > old_backing_size) {
n = old_backing_size - offset;
if (compare_buffers(buf_old + written, buf_new + written,
n - written, &pnum))
{
- ret = blk_pwrite(blk, offset + written,
- buf_old + written, pnum, 0);
+ if (buf_old_is_zero) {
+ ret = blk_pwrite_zeroes(blk, offset + written, pnum, 0);
+ } else {
+ ret = blk_pwrite(blk, offset + written,
+ buf_old + written, pnum, 0);
+ }
if (ret < 0) {
error_report("Error while writing to COW image: %s",
strerror(-ret));
Perform a consistency check on the disk image @var{filename}. The command can
output in the format @var{ofmt} which is either @code{human} or @code{json}.
+The JSON output is an object of QAPI type @code{ImageCheck}.
If @code{-r} is specified, qemu-img tries to repair any inconsistencies found
during the check. @code{-r leaks} repairs only cluster leaks, whereas
Give information about the disk image @var{filename}. Use it in
particular to know the size reserved on disk which can be different
from the displayed size. If VM snapshots are stored in the disk image,
-they are displayed too. The command can output in the format @var{ofmt}
-which is either @code{human} or @code{json}.
+they are displayed too.
If a disk image has a backing file chain, information about each disk image in
the chain can be recursively enumerated by using the option @code{--backing-chain}.
qemu-img info --backing-chain snap2.qcow2
@end example
+The command can output in the format @var{ofmt} which is either @code{human} or
+@code{json}. The JSON output is an object of QAPI type @code{ImageInfo}; with
+@code{--backing-chain}, it is an array of @code{ImageInfo} objects.
+
+@code{--output=human} reports the following information (for every image in the
+chain):
+@table @var
+@item image
+The image file name
+
+@item file format
+The image format
+
+@item virtual size
+The size of the guest disk
+
+@item disk size
+How much space the image file occupies on the host file system (may be shown as
+0 if this information is unavailable, e.g. because there is no file system)
+
+@item cluster_size
+Cluster size of the image format, if applicable
+
+@item encrypted
+Whether the image is encrypted (only present if so)
+
+@item cleanly shut down
+This is shown as @code{no} if the image is dirty and will have to be
+auto-repaired the next time it is opened in qemu.
+
+@item backing file
+The backing file name, if present
+
+@item backing file format
+The format of the backing file, if the image enforces it
+
+@item Snapshot list
+A list of all internal snapshots
+
+@item Format specific information
+Further information whose structure depends on the image format. This section
+is a textual representation of the respective @code{ImageInfoSpecific*} QAPI
+object (e.g. @code{ImageInfoSpecificQCow2} for qcow2 images).
+@end table
+
@item map [--object @var{objectdef}] [--image-opts] [-f @var{fmt}] [--output=@var{ofmt}] [-U] @var{filename}
Dump the metadata of image @var{filename} and its backing file chain.
to size logical volumes or SAN LUNs appropriately for the image that will be
placed in them. The values reported are guaranteed to be large enough to fit
the image. The command can output in the format @var{ofmt} which is either
-@code{human} or @code{json}.
+@code{human} or @code{json}. The JSON output is an object of QAPI type
+@code{BlockMeasureInfo}.
If the size @var{N} is given then act as if creating a new empty image file
using @command{qemu-img create}. If @var{filename} is given then act as if
{
int ret;
- if (bytes >> 9 > BDRV_REQUEST_MAX_SECTORS) {
+ if (bytes > BDRV_REQUEST_MAX_BYTES) {
return -ERANGE;
}
if (bytes < 0) {
print_cvtnum_err(bytes, argv[optind]);
return bytes;
- } else if (bytes >> BDRV_SECTOR_BITS > BDRV_REQUEST_MAX_SECTORS) {
+ } else if (bytes > BDRV_REQUEST_MAX_BYTES) {
printf("length cannot exceed %"PRIu64", given %s\n",
- (uint64_t)BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS,
- argv[optind]);
+ (uint64_t)BDRV_REQUEST_MAX_BYTES, argv[optind]);
return -EINVAL;
}
ETEXI
DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev,
- "-fsdev fsdriver,id=id[,path=path,][security_model={mapped-xattr|mapped-file|passthrough|none}]\n"
- " [,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd][,fmode=fmode][,dmode=dmode]\n"
+ "-fsdev local,id=id,path=path,security_model=mapped-xattr|mapped-file|passthrough|none\n"
+ " [,writeout=immediate][,readonly][,fmode=fmode][,dmode=dmode]\n"
" [[,throttling.bps-total=b]|[[,throttling.bps-read=r][,throttling.bps-write=w]]]\n"
" [[,throttling.iops-total=i]|[[,throttling.iops-read=r][,throttling.iops-write=w]]]\n"
" [[,throttling.bps-total-max=bm]|[[,throttling.bps-read-max=rm][,throttling.bps-write-max=wm]]]\n"
" [[,throttling.iops-total-max=im]|[[,throttling.iops-read-max=irm][,throttling.iops-write-max=iwm]]]\n"
- " [[,throttling.iops-size=is]]\n",
+ " [[,throttling.iops-size=is]]\n"
+ "-fsdev proxy,id=id,socket=socket[,writeout=immediate][,readonly]\n"
+ "-fsdev proxy,id=id,sock_fd=sock_fd[,writeout=immediate][,readonly]\n"
+ "-fsdev synth,id=id\n",
QEMU_ARCH_ALL)
STEXI
-@item -fsdev @var{fsdriver},id=@var{id},path=@var{path},[security_model=@var{security_model}][,writeout=@var{writeout}][,readonly][,socket=@var{socket}|sock_fd=@var{sock_fd}][,fmode=@var{fmode}][,dmode=@var{dmode}]
+@item -fsdev local,id=@var{id},path=@var{path},security_model=@var{security_model} [,writeout=@var{writeout}][,readonly][,fmode=@var{fmode}][,dmode=@var{dmode}] [,throttling.@var{option}=@var{value}[,throttling.@var{option}=@var{value}[,...]]]
+@itemx -fsdev proxy,id=@var{id},socket=@var{socket}[,writeout=@var{writeout}][,readonly]
+@itemx -fsdev proxy,id=@var{id},sock_fd=@var{sock_fd}[,writeout=@var{writeout}][,readonly]
+@itemx -fsdev synth,id=@var{id}[,readonly]
@findex -fsdev
Define a new file system device. Valid options are:
@table @option
-@item @var{fsdriver}
-This option specifies the fs driver backend to use.
-Currently "local" and "proxy" file system drivers are supported.
+@item local
+Accesses to the filesystem are done by QEMU.
+@item proxy
+Accesses to the filesystem are done by virtfs-proxy-helper(1).
+@item synth
+Synthetic filesystem, only used by QTests.
@item id=@var{id}
-Specifies identifier for this device
+Specifies identifier for this device.
@item path=@var{path}
Specifies the export path for the file system device. Files under
this path will be available to the 9p client on the guest.
read-write access is given.
@item socket=@var{socket}
Enables proxy filesystem driver to use passed socket file for communicating
-with virtfs-proxy-helper
+with virtfs-proxy-helper(1).
@item sock_fd=@var{sock_fd}
Enables proxy filesystem driver to use passed socket descriptor for
-communicating with virtfs-proxy-helper. Usually a helper like libvirt
-will create socketpair and pass one of the fds as sock_fd
+communicating with virtfs-proxy-helper(1). Usually a helper like libvirt
+will create socketpair and pass one of the fds as sock_fd.
@item fmode=@var{fmode}
Specifies the default mode for newly created files on the host. Works only
with security models "mapped-xattr" and "mapped-file".
@item dmode=@var{dmode}
Specifies the default mode for newly created directories on the host. Works
only with security models "mapped-xattr" and "mapped-file".
+@item throttling.bps-total=@var{b},throttling.bps-read=@var{r},throttling.bps-write=@var{w}
+Specify bandwidth throttling limits in bytes per second, either for all request
+types or for reads or writes only.
+@item throttling.bps-total-max=@var{bm},bps-read-max=@var{rm},bps-write-max=@var{wm}
+Specify bursts in bytes per second, either for all request types or for reads
+or writes only. Bursts allow the guest I/O to spike above the limit
+temporarily.
+@item throttling.iops-total=@var{i},throttling.iops-read=@var{r}, throttling.iops-write=@var{w}
+Specify request rate limits in requests per second, either for all request
+types or for reads or writes only.
+@item throttling.iops-total-max=@var{im},throttling.iops-read-max=@var{irm}, throttling.iops-write-max=@var{iwm}
+Specify bursts in requests per second, either for all request types or for reads
+or writes only. Bursts allow the guest I/O to spike above the limit temporarily.
+@item throttling.iops-size=@var{is}
+Let every @var{is} bytes of a request count as a new request for iops
+throttling purposes.
@end table
--fsdev option is used along with -device driver "virtio-9p-pci".
-@item -device virtio-9p-pci,fsdev=@var{id},mount_tag=@var{mount_tag}
-Options for virtio-9p-pci driver are:
+-fsdev option is used along with -device driver "virtio-9p-...".
+@item -device virtio-9p-@var{type},fsdev=@var{id},mount_tag=@var{mount_tag}
+Options for virtio-9p-... driver are:
@table @option
+@item @var{type}
+Specifies the variant to be used. Supported values are "pci", "ccw" or "device",
+depending on the machine type.
@item fsdev=@var{id}
-Specifies the id value specified along with -fsdev option
+Specifies the id value specified along with -fsdev option.
@item mount_tag=@var{mount_tag}
-Specifies the tag name to be used by the guest to mount this export point
+Specifies the tag name to be used by the guest to mount this export point.
@end table
ETEXI
DEF("virtfs", HAS_ARG, QEMU_OPTION_virtfs,
- "-virtfs local,path=path,mount_tag=tag,security_model=[mapped-xattr|mapped-file|passthrough|none]\n"
- " [,id=id][,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd][,fmode=fmode][,dmode=dmode]\n",
+ "-virtfs local,path=path,mount_tag=tag,security_model=mapped-xattr|mapped-file|passthrough|none\n"
+ " [,id=id][,writeout=immediate][,readonly][,fmode=fmode][,dmode=dmode]\n"
+ "-virtfs proxy,mount_tag=tag,socket=socket[,id=id][,writeout=immediate][,readonly]\n"
+ "-virtfs proxy,mount_tag=tag,sock_fd=sock_fd[,id=id][,writeout=immediate][,readonly]\n"
+ "-virtfs synth,mount_tag=tag[,id=id][,readonly]\n",
QEMU_ARCH_ALL)
STEXI
-@item -virtfs @var{fsdriver}[,path=@var{path}],mount_tag=@var{mount_tag}[,security_model=@var{security_model}][,writeout=@var{writeout}][,readonly][,socket=@var{socket}|sock_fd=@var{sock_fd}][,fmode=@var{fmode}][,dmode=@var{dmode}]
+@item -virtfs local,path=@var{path},mount_tag=@var{mount_tag} ,security_model=@var{security_model}[,writeout=@var{writeout}][,readonly] [,fmode=@var{fmode}][,dmode=@var{dmode}]
+@itemx -virtfs proxy,socket=@var{socket},mount_tag=@var{mount_tag} [,writeout=@var{writeout}][,readonly]
+@itemx -virtfs proxy,sock_fd=@var{sock_fd},mount_tag=@var{mount_tag} [,writeout=@var{writeout}][,readonly]
+@itemx -virtfs synth,mount_tag=@var{mount_tag}
@findex -virtfs
-The general form of a Virtual File system pass-through options are:
+Define a new filesystem device and expose it to the guest using a virtio-9p-device. The general form of a Virtual File system pass-through options are:
@table @option
-@item @var{fsdriver}
-This option specifies the fs driver backend to use.
-Currently "local" and "proxy" file system drivers are supported.
+@item local
+Accesses to the filesystem are done by QEMU.
+@item proxy
+Accesses to the filesystem are done by virtfs-proxy-helper(1).
+@item synth
+Synthetic filesystem, only used by QTests.
@item id=@var{id}
-Specifies identifier for this device
+Specifies identifier for the filesystem device
@item path=@var{path}
Specifies the export path for the file system device. Files under
this path will be available to the 9p client on the guest.
read-write access is given.
@item socket=@var{socket}
Enables proxy filesystem driver to use passed socket file for
-communicating with virtfs-proxy-helper. Usually a helper like libvirt
-will create socketpair and pass one of the fds as sock_fd
+communicating with virtfs-proxy-helper(1). Usually a helper like libvirt
+will create socketpair and pass one of the fds as sock_fd.
@item sock_fd
Enables proxy filesystem driver to use passed 'sock_fd' as the socket
-descriptor for interfacing with virtfs-proxy-helper
+descriptor for interfacing with virtfs-proxy-helper(1).
@item fmode=@var{fmode}
Specifies the default mode for newly created files on the host. Works only
with security models "mapped-xattr" and "mapped-file".
@item dmode=@var{dmode}
Specifies the default mode for newly created directories on the host. Works
only with security models "mapped-xattr" and "mapped-file".
+@item mount_tag=@var{mount_tag}
+Specifies the tag name to be used by the guest to mount this export point.
@end table
ETEXI
STEXI
@item -virtfs_synth
@findex -virtfs_synth
-Create synthetic file system image
+Create synthetic file system image. Note that this option is now deprecated.
+Please use @code{-fsdev synth} and @code{-device virtio-9p-...} instead.
ETEXI
DEF("iscsi", HAS_ARG, QEMU_OPTION_iscsi,
The file format is libpcap, so it can be analyzed with tools such as tcpdump
or Wireshark.
-@item -object colo-compare,id=@var{id},primary_in=@var{chardevid},secondary_in=@var{chardevid},outdev=@var{chardevid}[,vnet_hdr_support]
+@item -object colo-compare,id=@var{id},primary_in=@var{chardevid},secondary_in=@var{chardevid},outdev=@var{chardevid},iothread=@var{id}[,vnet_hdr_support]
Colo-compare gets packet from primary_in@var{chardevid} and secondary_in@var{chardevid}, than compare primary packet with
secondary packet. If the packets are same, we will output primary
packet to outdev@var{chardevid}, else we will notify colo-frame
do checkpoint and send primary packet to outdev@var{chardevid}.
-if it has the vnet_hdr_support flag, colo compare will send/recv packet with vnet_hdr_len.
+In order to improve efficiency, we need to put the task of comparison
+in another thread. If it has the vnet_hdr_support flag, colo compare
+will send/recv packet with vnet_hdr_len.
we must use it with the help of filter-mirror and filter-redirector.
-chardev socket,id=compare0-0,host=3.3.3.3,port=9001
-chardev socket,id=compare_out,host=3.3.3.3,port=9005,server,nowait
-chardev socket,id=compare_out0,host=3.3.3.3,port=9005
+-object iothread,id=iothread1
-object filter-mirror,id=m0,netdev=hn0,queue=tx,outdev=mirror0
-object filter-redirector,netdev=hn0,id=redire0,queue=rx,indev=compare_out
-object filter-redirector,netdev=hn0,id=redire1,queue=rx,outdev=compare0
--object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0
+-object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0,iothread=iothread1
secondary:
-netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,down script=/etc/qemu-ifdown
-#ifndef VSS_HANDLES
-#define VSS_HANDLES
+#ifndef VSS_HANDLES_H
+#define VSS_HANDLES_H
/* Constants for QGA VSS Provider */
return;
} else if (runstate_check(RUN_STATE_SUSPENDED)) {
return;
+ } else if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
+ error_setg(errp, "Migration is not finalized yet");
+ return;
}
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
+
#ifndef QEMU_PR_HELPER_H
-#define QEMU_PR_HELPER_H 1
+#define QEMU_PR_HELPER_H
#define PR_HELPER_CDB_SIZE 16
#define PR_HELPER_SENSE_SIZE 96
cc->set_pc = alpha_cpu_set_pc;
cc->gdb_read_register = alpha_cpu_gdb_read_register;
cc->gdb_write_register = alpha_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = alpha_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = alpha_cpu_do_transaction_failed;
cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
#define SWCR_STATUS_DNO (1U << 22)
#define SWCR_STATUS_MASK ((1U << 23) - (1U << 17))
+#define SWCR_STATUS_TO_EXCSUM_SHIFT 16
+
#define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK)
/* MMU modes definitions */
/* The FPCR, and disassembled portions thereof. */
uint32_t fpcr;
+#ifdef CONFIG_USER_ONLY
+ uint32_t swcr;
+#endif
uint32_t fpcr_exc_enable;
float_status fp_status;
uint8_t fpcr_dyn_round;
is returned if the signal was handled by the virtual CPU. */
int cpu_alpha_signal_handler(int host_signum, void *pinfo,
void *puc);
-int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
*pflags = env->flags & ENV_FLAG_TB_MASK;
}
+#ifdef CONFIG_USER_ONLY
+/* Copied from linux ieee_swcr_to_fpcr. */
+static inline uint64_t alpha_ieee_swcr_to_fpcr(uint64_t swcr)
+{
+ uint64_t fpcr = 0;
+
+ fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
+ fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
+ fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
+ | SWCR_TRAP_ENABLE_DZE
+ | SWCR_TRAP_ENABLE_OVF)) << 48;
+ fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
+ | SWCR_TRAP_ENABLE_INE)) << 57;
+ fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
+ fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
+
+ return fpcr;
+}
+
+/* Copied from linux ieee_fpcr_to_swcr. */
+static inline uint64_t alpha_ieee_fpcr_to_swcr(uint64_t fpcr)
+{
+ uint64_t swcr = 0;
+
+ swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
+ swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
+ swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
+ | SWCR_TRAP_ENABLE_DZE
+ | SWCR_TRAP_ENABLE_OVF);
+ swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE);
+ swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
+ swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
+
+ return swcr;
+}
+#endif /* CONFIG_USER_ONLY */
+
#endif /* ALPHA_CPU_H */
if (exc) {
env->fpcr |= exc;
exc &= ~ignore;
- if (exc) {
- exc &= env->fpcr_exc_enable;
- fp_exc_raise1(env, GETPC(), exc, regno, EXC_M_SWC);
+#ifdef CONFIG_USER_ONLY
+ /*
+ * In user mode, the kernel's software handler only
+ * delivers a signal if the exception is enabled.
+ */
+ if (!(exc & env->fpcr_exc_enable)) {
+ return;
+ }
+#else
+ /*
+ * In system mode, the software handler gets invoked
+ * for any non-ignored exception.
+ */
+ if (!exc) {
+ return;
}
+#endif
+ exc &= env->fpcr_exc_enable;
+ fp_exc_raise1(env, GETPC(), exc, regno, EXC_M_SWC);
}
}
#define CONVERT_BIT(X, SRC, DST) \
(SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
-uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
+uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
{
return (uint64_t)env->fpcr << 32;
}
-void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
+void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
{
uint32_t fpcr = val >> 32;
uint32_t t = 0;
env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
+
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Override some of these bits with the contents of ENV->SWCR.
+ * In system mode, some of these would trap to the kernel, at
+ * which point the kernel's handler would emulate and apply
+ * the software exception mask.
+ */
+ if (env->swcr & SWCR_MAP_DMZ) {
+ env->fp_status.flush_inputs_to_zero = 1;
+ }
+ if (env->swcr & SWCR_MAP_UMZ) {
+ env->fp_status.flush_to_zero = 1;
+ }
+ env->fpcr_exc_enable &= ~(alpha_ieee_swcr_to_fpcr(env->swcr) >> 32);
+#endif
}
uint64_t helper_load_fpcr(CPUAlphaState *env)
}
#if defined(CONFIG_USER_ONLY)
-int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
cs->exception_index = EXCP_MMFAULT;
cpu->env.trap_arg0 = address;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
/* Returns the OSF/1 entMM failure indication, or -1 on success. */
return (fail >= 0 ? -1 : phys);
}
-int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, int rw,
- int mmu_idx)
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
target_ulong phys;
int prot, fail;
- fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
+ fail = get_physical_address(env, addr, 1 << access_type,
+ mmu_idx, &phys, &prot);
if (unlikely(fail >= 0)) {
+ if (probe) {
+ return false;
+ }
cs->exception_index = EXCP_MMFAULT;
env->trap_arg0 = addr;
env->trap_arg1 = fail;
- env->trap_arg2 = (rw == 2 ? -1 : rw);
- return 1;
+ env->trap_arg2 = (access_type == MMU_INST_FETCH ? -1 : access_type);
+ cpu_loop_exit_restore(cs, retaddr);
}
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
}
#endif /* USER_ONLY */
void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
- static const char *linux_reg_names[] = {
- "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
- "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
- "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
- "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
+ static const char linux_reg_names[31][4] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "t12", "at", "gp", "sp"
};
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
int i;
- qemu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
+ qemu_fprintf(f, "PC " TARGET_FMT_lx " PS %02x\n",
env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
for (i = 0; i < 31; i++) {
- qemu_fprintf(f, "IR%02d %s " TARGET_FMT_lx "%c", i,
+ qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
linux_reg_names[i], cpu_alpha_load_gr(env, i),
(i % 3) == 2 ? '\n' : ' ');
}
- qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
+ qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
env->lock_addr, env->lock_value);
if (flags & CPU_DUMP_FPU) {
for (i = 0; i < 31; i++) {
- qemu_fprintf(f, "FIR%02d %016" PRIx64 "%c", i, env->fir[i],
+ qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
(i % 3) == 2 ? '\n' : ' ');
}
+ qemu_fprintf(f, "fpcr %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
}
qemu_fprintf(f, "\n");
}
env->error_code = 0;
cpu_loop_exit_restore(cs, retaddr);
}
-
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = alpha_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret != 0)) {
- /* Exception index and error code are already set */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
#endif /* CONFIG_USER_ONLY */
DEFINE_PROP_END_OF_LIST()
};
-#ifdef CONFIG_USER_ONLY
-static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- env->exception.vaddress = address;
- if (rw == 2) {
- cs->exception_index = EXCP_PREFETCH_ABORT;
- } else {
- cs->exception_index = EXCP_DATA_ABORT;
- }
- return 1;
-}
-#endif
-
static gchar *arm_gdb_arch_name(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt;
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
cc->disas_set_info = arm_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
+ cc->tlb_fill = arm_cpu_tlb_fill;
#endif
}
}
}
-/* Walk the page table and (if the mapping exists) add the page
- * to the TLB. Return false on success, or true on failure. Populate
- * fsr with ARM DFSR/IFSR fault register format value on failure.
- */
-bool arm_tlb_fill(CPUState *cs, vaddr address,
- MMUAccessType access_type, int mmu_idx,
- ARMMMUFaultInfo *fi)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- hwaddr phys_addr;
- target_ulong page_size;
- int prot;
- int ret;
- MemTxAttrs attrs = {};
-
- ret = get_phys_addr(env, address, access_type,
- core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
- &attrs, &prot, &page_size, fi, NULL);
- if (!ret) {
- /*
- * Map a single [sub]page. Regions smaller than our declared
- * target page size are handled specially, so for those we
- * pass in the exact addresses.
- */
- if (page_size >= TARGET_PAGE_SIZE) {
- phys_addr &= TARGET_PAGE_MASK;
- address &= TARGET_PAGE_MASK;
- }
- tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
- prot, mmu_idx, page_size);
- return 0;
- }
-
- return ret;
-}
-
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
MemTxAttrs *attrs)
{
#endif
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+#ifdef CONFIG_USER_ONLY
+ cpu->env.exception.vaddress = address;
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = EXCP_PREFETCH_ABORT;
+ } else {
+ cs->exception_index = EXCP_DATA_ABORT;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+#else
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot, ret;
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+
+ /*
+ * Walk the page table and (if the mapping exists) add the page
+ * to the TLB. On success, return true. Otherwise, if probing,
+ * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
+ * register format, and signal the fault.
+ */
+ ret = get_phys_addr(&cpu->env, address, access_type,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
+ if (likely(!ret)) {
+ /*
+ * Map a single [sub]page. Regions smaller than our declared
+ * target page size are handled specially, so for those we
+ * pass in the exact addresses.
+ */
+ if (page_size >= TARGET_PAGE_SIZE) {
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ }
+ tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
+ prot, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
+ }
+#endif
+}
+
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
/* Implement DC ZVA, which zeroes a fixed-length block of memory.
DEF_HELPER_2(neon_ceq_u16, i32, i32, i32)
DEF_HELPER_2(neon_ceq_u32, i32, i32, i32)
-DEF_HELPER_1(neon_abs_s8, i32, i32)
-DEF_HELPER_1(neon_abs_s16, i32, i32)
DEF_HELPER_1(neon_clz_u8, i32, i32)
DEF_HELPER_1(neon_clz_u16, i32, i32)
DEF_HELPER_1(neon_cls_s8, i32, i32)
return result != MEMTX_DECODE_ERROR;
}
-/* Do a page table walk and add page to TLB if possible */
-bool arm_tlb_fill(CPUState *cpu, vaddr address,
- MMUAccessType access_type, int mmu_idx,
- ARMMMUFaultInfo *fi);
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi) QEMU_NORETURN;
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */
NEON_VOP(ceq_u32, neon_u32, 1)
#undef NEON_FN
-#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
-NEON_VOP1(abs_s8, neon_s8, 4)
-NEON_VOP1(abs_s16, neon_s16, 2)
-#undef NEON_FN
-
/* Count Leading Sign/Zero Bits. */
static inline int do_clz8(uint8_t x)
{
return syn;
}
-static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- int mmu_idx, ARMMMUFaultInfo *fi)
+void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
int target_el;
raise_exception(env, exc, syn, target_el);
}
-/* try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- bool ret;
- ARMMMUFaultInfo fi = {};
-
- ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
- if (unlikely(ret)) {
- ARMCPU *cpu = ARM_CPU(cs);
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
-
- deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
- }
-}
-
/* Raise a data fault alignment exception for the specified virtual address */
void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
cpu_restore_state(cs, retaddr, true);
fi.type = ARMFault_Alignment;
- deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
+ arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
/* arm_cpu_do_transaction_failed: handle a memory system error response
fi.ea = arm_extabort_type(response);
fi.type = ARMFault_SyncExternal;
- deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
+ arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */
* in the real world, obviously.)
*
* Then there are the annoying special cases with watchpoints...
- *
- * TODO: Add a form of tlb_fill that does not raise an exception,
- * with a form of tlb_vaddr_to_host and a set of loads to match.
- * The non_fault_vaddr_to_host would handle everything, usually,
- * and the loads would handle the iomem path for watchpoints.
+ * TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true).
*/
host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
split = max_for_page(addr, mem_off, mem_max);
if (u) {
tcg_gen_neg_i64(tcg_rd, tcg_rn);
} else {
- TCGv_i64 tcg_zero = tcg_const_i64(0);
- tcg_gen_neg_i64(tcg_rd, tcg_rn);
- tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
- tcg_rn, tcg_rd);
- tcg_temp_free_i64(tcg_zero);
+ tcg_gen_abs_i64(tcg_rd, tcg_rn);
}
break;
case 0x2f: /* FABS */
}
break;
case 0xb:
- if (u) { /* NEG */
+ if (u) { /* ABS, NEG */
gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
- return;
+ } else {
+ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
}
- break;
+ return;
}
if (size == 3) {
gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
}
break;
- case 0xb: /* ABS, NEG */
- if (u) {
- tcg_gen_neg_i32(tcg_res, tcg_op);
- } else {
- TCGv_i32 tcg_zero = tcg_const_i32(0);
- tcg_gen_neg_i32(tcg_res, tcg_op);
- tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
- tcg_zero, tcg_op, tcg_res);
- tcg_temp_free_i32(tcg_zero);
- }
- break;
case 0x2f: /* FABS */
gen_helper_vfp_abss(tcg_res, tcg_op);
break;
tcg_temp_free_i32(tcg_zero);
break;
}
- case 0xb: /* ABS, NEG */
- if (u) {
- TCGv_i32 tcg_zero = tcg_const_i32(0);
- if (size) {
- gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
- } else {
- gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
- }
- tcg_temp_free_i32(tcg_zero);
- } else {
- if (size) {
- gen_helper_neon_abs_s16(tcg_res, tcg_op);
- } else {
- gen_helper_neon_abs_s8(tcg_res, tcg_op);
- }
- }
- break;
case 0x4: /* CLS, CLZ */
if (u) {
if (size == 0) {
static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
static const GVecGen2s op[4] = {
{ .fni8 = tcg_gen_vec_sub8_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_sve_subri_b,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list,
.vece = MO_8,
.scalar_first = true },
{ .fni8 = tcg_gen_vec_sub16_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_sve_subri_h,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list,
.vece = MO_16,
.scalar_first = true },
{ .fni4 = tcg_gen_sub_i32,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_sve_subri_s,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list,
.vece = MO_32,
.scalar_first = true },
{ .fni8 = tcg_gen_sub_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_sve_subri_d,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64,
.scalar_first = true }
tcg_temp_free_i32(tmp1);
}
-static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
-{
- TCGv_i32 c0 = tcg_const_i32(0);
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_neg_i32(tmp, src);
- tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
- tcg_temp_free_i32(c0);
- tcg_temp_free_i32(tmp);
-}
-
static void shifter_out_im(TCGv_i32 var, int shift)
{
if (shift == 0) {
tcg_gen_add_vec(vece, d, d, a);
}
+static const TCGOpcode vecop_list_ssra[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+};
+
const GVecGen2i ssra_op[4] = {
{ .fni8 = gen_ssra8_i64,
.fniv = gen_ssra_vec,
.load_dest = true,
- .opc = INDEX_op_sari_vec,
+ .opt_opc = vecop_list_ssra,
.vece = MO_8 },
{ .fni8 = gen_ssra16_i64,
.fniv = gen_ssra_vec,
.load_dest = true,
- .opc = INDEX_op_sari_vec,
+ .opt_opc = vecop_list_ssra,
.vece = MO_16 },
{ .fni4 = gen_ssra32_i32,
.fniv = gen_ssra_vec,
.load_dest = true,
- .opc = INDEX_op_sari_vec,
+ .opt_opc = vecop_list_ssra,
.vece = MO_32 },
{ .fni8 = gen_ssra64_i64,
.fniv = gen_ssra_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list_ssra,
.load_dest = true,
- .opc = INDEX_op_sari_vec,
.vece = MO_64 },
};
tcg_gen_add_vec(vece, d, d, a);
}
+static const TCGOpcode vecop_list_usra[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+};
+
const GVecGen2i usra_op[4] = {
{ .fni8 = gen_usra8_i64,
.fniv = gen_usra_vec,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_usra,
.vece = MO_8, },
{ .fni8 = gen_usra16_i64,
.fniv = gen_usra_vec,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_usra,
.vece = MO_16, },
{ .fni4 = gen_usra32_i32,
.fniv = gen_usra_vec,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_usra,
.vece = MO_32, },
{ .fni8 = gen_usra64_i64,
.fniv = gen_usra_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_usra,
.vece = MO_64, },
};
}
}
+static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
+
const GVecGen2i sri_op[4] = {
{ .fni8 = gen_shr8_ins_i64,
.fniv = gen_shr_ins_vec,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_sri,
.vece = MO_8 },
{ .fni8 = gen_shr16_ins_i64,
.fniv = gen_shr_ins_vec,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_sri,
.vece = MO_16 },
{ .fni4 = gen_shr32_ins_i32,
.fniv = gen_shr_ins_vec,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_sri,
.vece = MO_32 },
{ .fni8 = gen_shr64_ins_i64,
.fniv = gen_shr_ins_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list_sri,
.vece = MO_64 },
};
}
}
+static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
+
const GVecGen2i sli_op[4] = {
{ .fni8 = gen_shl8_ins_i64,
.fniv = gen_shl_ins_vec,
.load_dest = true,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list_sli,
.vece = MO_8 },
{ .fni8 = gen_shl16_ins_i64,
.fniv = gen_shl_ins_vec,
.load_dest = true,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list_sli,
.vece = MO_16 },
{ .fni4 = gen_shl32_ins_i32,
.fniv = gen_shl_ins_vec,
.load_dest = true,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list_sli,
.vece = MO_32 },
{ .fni8 = gen_shl64_ins_i64,
.fniv = gen_shl_ins_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list_sli,
.vece = MO_64 },
};
/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
* these tables are shared with AArch64 which does support them.
*/
+
+static const TCGOpcode vecop_list_mla[] = {
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+};
+
+static const TCGOpcode vecop_list_mls[] = {
+ INDEX_op_mul_vec, INDEX_op_sub_vec, 0
+};
+
const GVecGen3 mla_op[4] = {
{ .fni4 = gen_mla8_i32,
.fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
.load_dest = true,
+ .opt_opc = vecop_list_mla,
.vece = MO_8 },
{ .fni4 = gen_mla16_i32,
.fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
.load_dest = true,
+ .opt_opc = vecop_list_mla,
.vece = MO_16 },
{ .fni4 = gen_mla32_i32,
.fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
.load_dest = true,
+ .opt_opc = vecop_list_mla,
.vece = MO_32 },
{ .fni8 = gen_mla64_i64,
.fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
+ .opt_opc = vecop_list_mla,
.vece = MO_64 },
};
const GVecGen3 mls_op[4] = {
{ .fni4 = gen_mls8_i32,
.fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
.load_dest = true,
+ .opt_opc = vecop_list_mls,
.vece = MO_8 },
{ .fni4 = gen_mls16_i32,
.fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
.load_dest = true,
+ .opt_opc = vecop_list_mls,
.vece = MO_16 },
{ .fni4 = gen_mls32_i32,
.fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
.load_dest = true,
+ .opt_opc = vecop_list_mls,
.vece = MO_32 },
{ .fni8 = gen_mls64_i64,
.fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
+ .opt_opc = vecop_list_mls,
.vece = MO_64 },
};
tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
}
+static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
+
const GVecGen3 cmtst_op[4] = {
{ .fni4 = gen_helper_neon_tst_u8,
.fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list_cmtst,
.vece = MO_8 },
{ .fni4 = gen_helper_neon_tst_u16,
.fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list_cmtst,
.vece = MO_16 },
{ .fni4 = gen_cmtst_i32,
.fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list_cmtst,
.vece = MO_32 },
{ .fni8 = gen_cmtst_i64,
.fniv = gen_cmtst_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list_cmtst,
.vece = MO_64 },
};
tcg_temp_free_vec(x);
}
+static const TCGOpcode vecop_list_uqadd[] = {
+ INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
+};
+
const GVecGen4 uqadd_op[4] = {
{ .fniv = gen_uqadd_vec,
.fno = gen_helper_gvec_uqadd_b,
- .opc = INDEX_op_usadd_vec,
.write_aofs = true,
+ .opt_opc = vecop_list_uqadd,
.vece = MO_8 },
{ .fniv = gen_uqadd_vec,
.fno = gen_helper_gvec_uqadd_h,
- .opc = INDEX_op_usadd_vec,
.write_aofs = true,
+ .opt_opc = vecop_list_uqadd,
.vece = MO_16 },
{ .fniv = gen_uqadd_vec,
.fno = gen_helper_gvec_uqadd_s,
- .opc = INDEX_op_usadd_vec,
.write_aofs = true,
+ .opt_opc = vecop_list_uqadd,
.vece = MO_32 },
{ .fniv = gen_uqadd_vec,
.fno = gen_helper_gvec_uqadd_d,
- .opc = INDEX_op_usadd_vec,
.write_aofs = true,
+ .opt_opc = vecop_list_uqadd,
.vece = MO_64 },
};
tcg_temp_free_vec(x);
}
+static const TCGOpcode vecop_list_sqadd[] = {
+ INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
+};
+
const GVecGen4 sqadd_op[4] = {
{ .fniv = gen_sqadd_vec,
.fno = gen_helper_gvec_sqadd_b,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list_sqadd,
.write_aofs = true,
.vece = MO_8 },
{ .fniv = gen_sqadd_vec,
.fno = gen_helper_gvec_sqadd_h,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list_sqadd,
.write_aofs = true,
.vece = MO_16 },
{ .fniv = gen_sqadd_vec,
.fno = gen_helper_gvec_sqadd_s,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list_sqadd,
.write_aofs = true,
.vece = MO_32 },
{ .fniv = gen_sqadd_vec,
.fno = gen_helper_gvec_sqadd_d,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list_sqadd,
.write_aofs = true,
.vece = MO_64 },
};
tcg_temp_free_vec(x);
}
+static const TCGOpcode vecop_list_uqsub[] = {
+ INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
+};
+
const GVecGen4 uqsub_op[4] = {
{ .fniv = gen_uqsub_vec,
.fno = gen_helper_gvec_uqsub_b,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list_uqsub,
.write_aofs = true,
.vece = MO_8 },
{ .fniv = gen_uqsub_vec,
.fno = gen_helper_gvec_uqsub_h,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list_uqsub,
.write_aofs = true,
.vece = MO_16 },
{ .fniv = gen_uqsub_vec,
.fno = gen_helper_gvec_uqsub_s,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list_uqsub,
.write_aofs = true,
.vece = MO_32 },
{ .fniv = gen_uqsub_vec,
.fno = gen_helper_gvec_uqsub_d,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list_uqsub,
.write_aofs = true,
.vece = MO_64 },
};
tcg_temp_free_vec(x);
}
+static const TCGOpcode vecop_list_sqsub[] = {
+ INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
+};
+
const GVecGen4 sqsub_op[4] = {
{ .fniv = gen_sqsub_vec,
.fno = gen_helper_gvec_sqsub_b,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list_sqsub,
.write_aofs = true,
.vece = MO_8 },
{ .fniv = gen_sqsub_vec,
.fno = gen_helper_gvec_sqsub_h,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list_sqsub,
.write_aofs = true,
.vece = MO_16 },
{ .fniv = gen_sqsub_vec,
.fno = gen_helper_gvec_sqsub_s,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list_sqsub,
.write_aofs = true,
.vece = MO_32 },
{ .fniv = gen_sqsub_vec,
.fno = gen_helper_gvec_sqsub_d,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list_sqsub,
.write_aofs = true,
.vece = MO_64 },
};
case NEON_2RM_VNEG:
tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
break;
+ case NEON_2RM_VABS:
+ tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
+ break;
default:
elementwise:
}
tcg_temp_free_i32(tmp2);
break;
- case NEON_2RM_VABS:
- switch(size) {
- case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
- case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
- case 2: tcg_gen_abs_i32(tmp, tmp); break;
- default: abort();
- }
- break;
case NEON_2RM_VCGT0_F:
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
cc->set_pc = cris_cpu_set_pc;
cc->gdb_read_register = cris_cpu_gdb_read_register;
cc->gdb_write_register = cris_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = cris_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = cris_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_cris_cpu;
#endif
return !!(env->pregs[PR_CCS] & U_FLAG);
}
-int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
/* Support function regs. */
#define SFR_RW_GC_CFG 0][0
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
//#define CRIS_HELPER_DEBUG
cris_cpu_do_interrupt(cs);
}
-int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
cs->exception_index = 0xaa;
cpu->env.pregs[PR_EDA] = address;
- cpu_dump_state(cs, stderr, 0);
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
env->pregs[PR_CCS] = ccs;
}
-int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
struct cris_mmu_result res;
int prot, miss;
- int r = -1;
target_ulong phy;
- qemu_log_mask(CPU_LOG_MMU, "%s addr=%" VADDR_PRIx " pc=%x rw=%x\n",
- __func__, address, env->pc, rw);
miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
- rw, mmu_idx, 0);
- if (miss) {
- if (cs->exception_index == EXCP_BUSFAULT) {
- cpu_abort(cs,
- "CRIS: Illegal recursive bus fault."
- "addr=%" VADDR_PRIx " rw=%d\n",
- address, rw);
- }
-
- env->pregs[PR_EDA] = address;
- cs->exception_index = EXCP_BUSFAULT;
- env->fault_vector = res.bf_vec;
- r = 1;
- } else {
+ access_type, mmu_idx, 0);
+ if (likely(!miss)) {
/*
* Mask off the cache selection bit. The ETRAX busses do not
* see the top bit.
prot = res.prot;
tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
+ return true;
+ }
+
+ if (probe) {
+ return false;
}
- if (r > 0) {
- qemu_log_mask(CPU_LOG_MMU,
- "%s returns %d irqreq=%x addr=%" VADDR_PRIx " phy=%x vec=%x"
- " pc=%x\n", __func__, r, cs->interrupt_request, address,
- res.phy, res.bf_vec, env->pc);
+
+ if (cs->exception_index == EXCP_BUSFAULT) {
+ cpu_abort(cs, "CRIS: Illegal recursive bus fault."
+ "addr=%" VADDR_PRIx " access_type=%d\n",
+ address, access_type);
+ }
+
+ env->pregs[PR_EDA] = address;
+ cs->exception_index = EXCP_BUSFAULT;
+ env->fault_vector = res.bf_vec;
+ if (retaddr) {
+ if (cpu_restore_state(cs, retaddr, true)) {
+ /* Evaluate flags after retranslation. */
+ helper_top_evaluate_flags(env);
+ }
}
- return r;
+ cpu_loop_exit(cs);
}
void crisv10_cpu_do_interrupt(CPUState *cs)
#define D_LOG(...) do { } while (0)
#endif
-#if !defined(CONFIG_USER_ONLY)
-/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
- CPUCRISState *env = &cpu->env;
- int ret;
-
- D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
- env->pc, env->pregs[PR_EDA], (void *)retaddr);
- ret = cris_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- if (retaddr) {
- /* now we have a real cpu fault */
- if (cpu_restore_state(cs, retaddr, true)) {
- /* Evaluate flags after retranslation. */
- helper_top_evaluate_flags(env);
- }
- }
- cpu_loop_exit(cs);
- }
-}
-
-#endif
-
void helper_raise_exception(CPUCRISState *env, uint32_t index)
{
CPUState *cs = CPU(cris_env_get_cpu(env));
static int dec_abs_r(CPUCRISState *env, DisasContext *dc)
{
- TCGv t0;
-
LOG_DIS("abs $r%u, $r%u\n",
dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
- t0 = tcg_temp_new();
- tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
- tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
- tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
- tcg_temp_free(t0);
-
+ tcg_gen_abs_tl(cpu_R[dc->op2], cpu_R[dc->op1]);
cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
return 2;
cc->synchronize_from_tb = hppa_cpu_synchronize_from_tb;
cc->gdb_read_register = hppa_cpu_gdb_read_register;
cc->gdb_write_register = hppa_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = hppa_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_hppa_cpu;
#endif
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
-#ifdef CONFIG_USER_ONLY
-int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int midx);
-#else
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+#ifndef CONFIG_USER_ONLY
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot);
extern const MemoryRegionOps hppa_io_eir_ops;
#include "trace.h"
#ifdef CONFIG_USER_ONLY
-int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
- int size, int rw, int mmu_idx)
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
HPPACPU *cpu = HPPA_CPU(cs);
which would affect si_code. */
cs->exception_index = EXCP_DMP;
cpu->env.cr[CR_IOR] = address;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
return excp == EXCP_DTLB_MISS ? -1 : phys;
}
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType type, int mmu_idx, uintptr_t retaddr)
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
excp = hppa_get_physical_address(env, addr, mmu_idx,
a_prot, &phys, &prot);
if (unlikely(excp >= 0)) {
+ if (probe) {
+ return false;
+ }
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
/* Failure. Raise the indicated exception. */
cs->exception_index = excp;
/* Success! Store the translation into the QEMU TLB. */
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
}
/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
cc->gdb_write_register = x86_cpu_gdb_write_register;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->asidx_from_attrs = x86_asidx_from_attrs;
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
cc->cpu_exec_exit = x86_cpu_exec_exit;
#ifdef CONFIG_TCG
cc->tcg_initialize = tcg_x86_init;
+ cc->tlb_fill = x86_cpu_tlb_fill;
#endif
cc->disas_set_info = x86_disas_set_info;
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
/* helper.c */
-int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size,
- int is_write, int mmu_idx);
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
#ifndef CONFIG_USER_ONLY
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
}
-#if defined(CONFIG_USER_ONLY)
-int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
- int is_write, int mmu_idx)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
-
- /* user mode only emulation */
- is_write &= 1;
- env->cr[2] = addr;
- env->error_code = (is_write << PG_ERROR_W_BIT);
- env->error_code |= PG_ERROR_U_MASK;
- cs->exception_index = EXCP0E_PAGE;
- env->exception_is_int = 0;
- env->exception_next_eip = -1;
- return 1;
-}
-
-#else
-
+#if !defined(CONFIG_USER_ONLY)
static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot)
{
* 0 = nothing more to do
* 1 = generate PF fault
*/
-int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
- int is_write1, int mmu_idx)
+static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
+ int is_write1, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
return 1;
}
#endif
+
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+#ifdef CONFIG_USER_ONLY
+ /* user mode only emulation */
+ env->cr[2] = addr;
+ env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
+ env->error_code |= PG_ERROR_U_MASK;
+ cs->exception_index = EXCP0E_PAGE;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ cpu_loop_exit_restore(cs, retaddr);
+#else
+ env->retaddr = retaddr;
+ if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
+ /* FIXME: On error in get_hphys we have already jumped out. */
+ g_assert(!probe);
+ raise_exception_err_ra(env, cs->exception_index,
+ env->error_code, retaddr);
+ }
+ return true;
+#endif
+}
*
*/
-#ifndef _HAX_I386_H
-#define _HAX_I386_H
+#ifndef HAX_I386_H
+#define HAX_I386_H
#include "cpu.h"
#include "sysemu/hax.h"
/* Interface with HAX kernel module */
-#ifndef _HAX_INTERFACE_H
-#define _HAX_INTERFACE_H
+#ifndef HAX_INTERFACE_H
+#define HAX_INTERFACE_H
/* fx_layout has 3 formats table 3-56, 512bytes */
struct fx_layout {
*
*/
-#ifndef TARGET_I386_HAX_DARWIN_H
-#define TARGET_I386_HAX_DARWIN_H
+#ifndef TARGET_I386_HAX_POSIX_H
+#define TARGET_I386_HAX_POSIX_H
#include <sys/ioctl.h>
#define HAX_VCPU_SET_REGS _IOWR(0, 0xc7, struct vcpu_state_t)
#define HAX_VCPU_GET_REGS _IOWR(0, 0xc8, struct vcpu_state_t)
-#endif /* TARGET_I386_HAX_DARWIN_H */
+#endif /* TARGET_I386_HAX_POSIX_H */
*
*/
-#ifndef _HVF_I386_H
-#define _HVF_I386_H
+#ifndef HVF_I386_H
+#define HVF_I386_H
#include "sysemu/hvf.h"
#include "cpu.h"
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu->halted = 1;
ret = EXCP_HLT;
+ break;
}
ret = EXCP_INTERRUPT;
break;
* $FreeBSD$
*/
-#ifndef _VMCS_H_
-#define _VMCS_H_
+#ifndef VMCS_H
+#define VMCS_H
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
*/
#ifndef HVF_X86_H
-#define HVF_X86_H 1
+#define HVF_X86_H
typedef struct x86_register {
union {
*/
#ifndef HVF_X86_DECODE_H
-#define HVF_X86_DECODE_H 1
+#define HVF_X86_DECODE_H
#include "cpu.h"
#include "x86.h"
*/
#ifndef HVF_X86_DESCR_H
-#define HVF_X86_DESCR_H 1
+#define HVF_X86_DESCR_H
#include "x86.h"
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __X86_EMU_H__
-#define __X86_EMU_H__
+
+#ifndef X86_EMU_H
+#define X86_EMU_H
#include "x86.h"
#include "x86_decode.h"
/*
* x86 eflags functions
*/
-#ifndef __X86_FLAGS_H__
-#define __X86_FLAGS_H__
+
+#ifndef X86_FLAGS_H
+#define X86_FLAGS_H
#include "cpu.h"
void lflags_to_rflags(CPUX86State *env);
void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff);
-#endif /* __X86_FLAGS_H__ */
+#endif /* X86_FLAGS_H */
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __X86_MMU_H__
-#define __X86_MMU_H__
+
+#ifndef X86_MMU_H
+#define X86_MMU_H
#define PT_PRESENT (1 << 0)
#define PT_WRITE (1 << 1)
void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes);
void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes);
-#endif /* __X86_MMU_H__ */
+#endif /* X86_MMU_H */
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HVF_TASK
-#define HVF_TASK
+
+#ifndef HVF_X86_TASK_H
+#define HVF_X86_TASK_H
+
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
#endif
raise_exception_ra(env, EXCP05_BOUND, GETPC());
}
}
-
-#if !defined(CONFIG_USER_ONLY)
-/* try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
- int ret;
-
- env->retaddr = retaddr;
- ret = x86_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr);
- }
-}
-#endif
-#include "windows.h"
+#include <windows.h>
#include <WinHvPlatform.h>
#include <WinHvEmulation.h>
#include "exec/address-spaces.h"
#include "exec/ioport.h"
#include "qemu-common.h"
-#include "strings.h"
#include "sysemu/accel.h"
#include "sysemu/whpx.h"
#include "sysemu/sysemu.h"
cc->set_pc = lm32_cpu_set_pc;
cc->gdb_read_register = lm32_cpu_gdb_read_register;
cc->gdb_write_register = lm32_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = lm32_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = lm32_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_lm32_cpu;
#endif
#define cpu_list lm32_cpu_list
#define cpu_signal_handler cpu_lm32_signal_handler
-int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#include "exec/cpu-all.h"
#include "exec/semihost.h"
#include "exec/log.h"
-int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
LM32CPU *cpu = LM32_CPU(cs);
CPULM32State *env = &cpu->env;
} else {
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
}
-
- return 0;
+ return true;
}
hwaddr lm32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
return lm32_juart_get_jrx(env->juart_state);
}
-
-/* Try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = lm32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
#endif
cc->set_pc = m68k_cpu_set_pc;
cc->gdb_read_register = m68k_cpu_gdb_read_register;
cc->gdb_write_register = m68k_cpu_gdb_write_register;
- cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
+ cc->tlb_fill = m68k_cpu_tlb_fill;
#if defined(CONFIG_SOFTMMU)
- cc->do_unassigned_access = m68k_cpu_unassigned_access;
+ cc->do_transaction_failed = m68k_cpu_transaction_failed;
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
#endif
cc->disas_set_info = m68k_cpu_disas_set_info;
return (env->sr & SR_S) == 0 ? 1 : 0;
}
-int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
-void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr,
- bool is_write, bool is_exec, int is_asi,
- unsigned size);
+bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
#include "exec/cpu-all.h"
env->current_sp = new_sp;
}
-#if defined(CONFIG_USER_ONLY)
-
-int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
-{
- M68kCPU *cpu = M68K_CPU(cs);
-
- cs->exception_index = EXCP_ACCESS;
- cpu->env.mmu.ar = address;
- return 1;
-}
-
-#else
-
+#if !defined(CONFIG_USER_ONLY)
/* MMU: 68040 only */
static void print_address_zone(uint32_t logical, uint32_t physical,
int last_attr = -1, attr = -1;
M68kCPU *cpu = m68k_env_get_cpu(env);
CPUState *cs = CPU(cpu);
+ MemTxResult txres;
if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
/* 8k page */
tib_mask = M68K_4K_PAGE_MASK;
}
for (i = 0; i < M68K_ROOT_POINTER_ENTRIES; i++) {
- tia = ldl_phys(cs->as, M68K_POINTER_BASE(root_pointer) + i * 4);
- if (!M68K_UDT_VALID(tia)) {
+ tia = address_space_ldl(cs->as, M68K_POINTER_BASE(root_pointer) + i * 4,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK || !M68K_UDT_VALID(tia)) {
continue;
}
for (j = 0; j < M68K_ROOT_POINTER_ENTRIES; j++) {
- tib = ldl_phys(cs->as, M68K_POINTER_BASE(tia) + j * 4);
- if (!M68K_UDT_VALID(tib)) {
+ tib = address_space_ldl(cs->as, M68K_POINTER_BASE(tia) + j * 4,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK || !M68K_UDT_VALID(tib)) {
continue;
}
for (k = 0; k < tic_size; k++) {
- tic = ldl_phys(cs->as, (tib & tib_mask) + k * 4);
- if (!M68K_PDT_VALID(tic)) {
+ tic = address_space_ldl(cs->as, (tib & tib_mask) + k * 4,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK || !M68K_PDT_VALID(tic)) {
continue;
}
if (M68K_PDT_INDIRECT(tic)) {
- tic = ldl_phys(cs->as, M68K_INDIRECT_POINTER(tic));
+ tic = address_space_ldl(cs->as, M68K_INDIRECT_POINTER(tic),
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ continue;
+ }
}
last_logical = logical;
bool debug = access_type & ACCESS_DEBUG;
int page_bits;
int i;
+ MemTxResult txres;
/* Transparent Translation (physical = logical) */
for (i = 0; i < M68K_MAX_TTR; i++) {
/* Root Index */
entry = M68K_POINTER_BASE(next) | M68K_ROOT_INDEX(address);
- next = ldl_phys(cs->as, entry);
+ next = address_space_ldl(cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
if (!M68K_UDT_VALID(next)) {
return -1;
}
if (!(next & M68K_DESC_USED) && !debug) {
- stl_phys(cs->as, entry, next | M68K_DESC_USED);
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
}
if (next & M68K_DESC_WRITEPROT) {
if (access_type & ACCESS_PTEST) {
/* Pointer Index */
entry = M68K_POINTER_BASE(next) | M68K_POINTER_INDEX(address);
- next = ldl_phys(cs->as, entry);
+ next = address_space_ldl(cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
if (!M68K_UDT_VALID(next)) {
return -1;
}
if (!(next & M68K_DESC_USED) && !debug) {
- stl_phys(cs->as, entry, next | M68K_DESC_USED);
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
}
if (next & M68K_DESC_WRITEPROT) {
if (access_type & ACCESS_PTEST) {
entry = M68K_4K_PAGE_BASE(next) | M68K_4K_PAGE_INDEX(address);
}
- next = ldl_phys(cs->as, entry);
+ next = address_space_ldl(cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
if (!M68K_PDT_VALID(next)) {
return -1;
}
if (M68K_PDT_INDIRECT(next)) {
- next = ldl_phys(cs->as, M68K_INDIRECT_POINTER(next));
+ next = address_space_ldl(cs->as, M68K_INDIRECT_POINTER(next),
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
}
if (access_type & ACCESS_STORE) {
if (next & M68K_DESC_WRITEPROT) {
if (!(next & M68K_DESC_USED) && !debug) {
- stl_phys(cs->as, entry, next | M68K_DESC_USED);
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
}
} else if ((next & (M68K_DESC_MODIFIED | M68K_DESC_USED)) !=
(M68K_DESC_MODIFIED | M68K_DESC_USED) && !debug) {
- stl_phys(cs->as, entry,
- next | (M68K_DESC_MODIFIED | M68K_DESC_USED));
+ address_space_stl(cs->as, entry,
+ next | (M68K_DESC_MODIFIED | M68K_DESC_USED),
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
}
} else {
if (!(next & M68K_DESC_USED) && !debug) {
- stl_phys(cs->as, entry, next | M68K_DESC_USED);
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
}
}
}
return 0;
+
+txfail:
+ /*
+ * A page table load/store failed. TODO: we should really raise a
+ * suitable guest fault here if this is not a debug access.
+ * For now just return that the translation failed.
+ */
+ return -1;
}
hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return phys_addr;
}
-int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+/*
+ * Notify CPU of a pending interrupt. Prioritization and vectoring should
+ * be handled by the interrupt controller. Real hardware only requests
+ * the vector when the interrupt is acknowledged by the CPU. For
+ * simplicity we calculate it when the interrupt is signalled.
+ */
+void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
+{
+ CPUState *cs = CPU(cpu);
+ CPUM68KState *env = &cpu->env;
+
+ env->pending_level = level;
+ env->pending_vector = vector;
+ if (level) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+#endif
+
+bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType qemu_access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
+
+#ifndef CONFIG_USER_ONLY
hwaddr physical;
int prot;
int access_type;
address & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
}
- if (rw == 2) {
+ if (qemu_access_type == MMU_INST_FETCH) {
access_type = ACCESS_CODE;
- rw = 0;
} else {
access_type = ACCESS_DATA;
- if (rw) {
+ if (qemu_access_type == MMU_DATA_STORE) {
access_type |= ACCESS_STORE;
}
}
-
if (mmu_idx != MMU_USER_IDX) {
access_type |= ACCESS_SUPER;
}
ret = get_physical_address(&cpu->env, &physical, &prot,
address, access_type, &page_size);
- if (ret == 0) {
+ if (likely(ret == 0)) {
address &= TARGET_PAGE_MASK;
physical += address & (page_size - 1);
tlb_set_page(cs, address, physical,
prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
}
+
+ if (probe) {
+ return false;
+ }
+
/* page fault */
env->mmu.ssw = M68K_ATC_040;
switch (size) {
if (!(access_type & ACCESS_STORE)) {
env->mmu.ssw |= M68K_RW_040;
}
- env->mmu.ar = address;
- cs->exception_index = EXCP_ACCESS;
- return 1;
-}
-
-/* Notify CPU of a pending interrupt. Prioritization and vectoring should
- be handled by the interrupt controller. Real hardware only requests
- the vector when the interrupt is acknowledged by the CPU. For
- simplicitly we calculate it when the interrupt is signalled. */
-void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
-{
- CPUState *cs = CPU(cpu);
- CPUM68KState *env = &cpu->env;
+#endif
- env->pending_level = level;
- env->pending_vector = vector;
- if (level) {
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
- }
+ cs->exception_index = EXCP_ACCESS;
+ env->mmu.ar = address;
+ cpu_loop_exit_restore(cs, retaddr);
}
-#endif
-
uint32_t HELPER(bitrev)(uint32_t x)
{
x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau);
#else
-/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = m68k_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
static void cf_rte(CPUM68KState *env)
{
uint32_t sp;
do_interrupt_all(env, 1);
}
-void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write,
- bool is_exec, int is_asi, unsigned size)
+void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
-#ifdef DEBUG_UNASSIGNED
- qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n",
- addr, is_write, is_exec);
-#endif
- if (env == NULL) {
- /* when called from gdb, env is NULL */
- return;
- }
+
+ cpu_restore_state(cs, retaddr, true);
if (m68k_feature(env, M68K_FEATURE_M68040)) {
env->mmu.mmusr = 0;
if (env->sr & SR_S) { /* SUPERVISOR */
env->mmu.ssw |= M68K_TM_040_SUPER;
}
- if (is_exec) { /* instruction or data */
+ if (access_type == MMU_INST_FETCH) { /* instruction or data */
env->mmu.ssw |= M68K_TM_040_CODE;
} else {
env->mmu.ssw |= M68K_TM_040_DATA;
break;
}
- if (!is_write) {
+ if (access_type != MMU_DATA_STORE) {
env->mmu.ssw |= M68K_RW_040;
}
sr = tcg_temp_new();
tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
tcg_gen_or_i32(sr, sr, ccr);
+ tcg_temp_free(ccr);
return sr;
}
int32_t offset;
uint32_t base;
int op;
- TCGLabel *l1;
base = s->pc;
op = (insn >> 8) & 0xf;
}
if (op > 1) {
/* Bcc */
- l1 = gen_new_label();
+ TCGLabel *l1 = gen_new_label();
gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
gen_jmp_tb(s, 1, base + offset);
gen_set_label(l1);
tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
}
+ tcg_temp_free_i32(sz);
/* reg = (reg << shl) | (reg >> shr) | (x << shx); */
/* X = (reg >> size) & 1 */
X = tcg_temp_new();
- tcg_gen_shr_i32(X, reg, sz);
- tcg_gen_andi_i32(X, X, 1);
- tcg_temp_free(sz);
+ tcg_gen_extract_i32(X, reg, size, 1);
return X;
}
cc->set_pc = mb_cpu_set_pc;
cc->gdb_read_register = mb_cpu_gdb_read_register;
cc->gdb_write_register = mb_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = mb_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = mb_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = mb_cpu_transaction_failed;
cc->get_phys_page_debug = mb_cpu_get_phys_page_debug;
#endif
return MMU_KERNEL_IDX;
}
-int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#include "exec/cpu-all.h"
env->regs[14] = env->sregs[SR_PC];
}
-int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
cs->exception_index = 0xaa;
- cpu_dump_state(cs, stderr, 0);
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
-int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
+ struct microblaze_mmu_lookup lu;
unsigned int hit;
- int r = 1;
int prot;
- /* Translate if the MMU is available and enabled. */
- if (mmu_idx != MMU_NOMMU_IDX) {
- uint32_t vaddr, paddr;
- struct microblaze_mmu_lookup lu;
-
- hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
- if (hit) {
- vaddr = address & TARGET_PAGE_MASK;
- paddr = lu.paddr + vaddr - lu.vaddr;
-
- qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
- mmu_idx, vaddr, paddr, lu.prot);
- tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
- } else {
- env->sregs[SR_EAR] = address;
- qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
- mmu_idx, address);
-
- switch (lu.err) {
- case ERR_PROT:
- env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
- env->sregs[SR_ESR] |= (rw == 1) << 10;
- break;
- case ERR_MISS:
- env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
- env->sregs[SR_ESR] |= (rw == 1) << 10;
- break;
- default:
- abort();
- break;
- }
-
- if (cs->exception_index == EXCP_MMU) {
- cpu_abort(cs, "recursive faults\n");
- }
-
- /* TLB miss. */
- cs->exception_index = EXCP_MMU;
- }
- } else {
+ if (mmu_idx == MMU_NOMMU_IDX) {
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
+ return true;
}
- return r;
+
+ hit = mmu_translate(&env->mmu, &lu, address, access_type, mmu_idx);
+ if (likely(hit)) {
+ uint32_t vaddr = address & TARGET_PAGE_MASK;
+ uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
+
+ qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
+ mmu_idx, vaddr, paddr, lu.prot);
+ tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ /* TLB miss. */
+ if (probe) {
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
+ mmu_idx, address);
+
+ env->sregs[SR_EAR] = address;
+ switch (lu.err) {
+ case ERR_PROT:
+ env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 17 : 16;
+ env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ case ERR_MISS:
+ env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 19 : 18;
+ env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ default:
+ abort();
+ }
+
+ if (cs->exception_index == EXCP_MMU) {
+ cpu_abort(cs, "recursive faults\n");
+ }
+
+ /* TLB miss. */
+ cs->exception_index = EXCP_MMU;
+ cpu_loop_exit_restore(cs, retaddr);
}
void mb_cpu_do_interrupt(CPUState *cs)
#define D(x)
-#if !defined(CONFIG_USER_ONLY)
-
-/* Try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = mb_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-#endif
-
void helper_put(uint32_t id, uint32_t ctrl, uint32_t data)
{
int test = ctrl & STREAM_TEST;
cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->gdb_read_register = mips_cpu_gdb_read_register;
cc->gdb_write_register = mips_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->do_unassigned_access = mips_cpu_unassigned_access;
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
cc->disas_set_info = mips_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = mips_tcg_init;
+ cc->tlb_fill = mips_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = 73;
#endif
#endif
-int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
hwaddr physical;
int prot;
- int access_type;
+ int mips_access_type;
#endif
- int ret = 0;
-
-#if 0
- log_cpu_state(cs, 0);
-#endif
- qemu_log_mask(CPU_LOG_MMU,
- "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
- __func__, env->active_tc.PC, address, rw, mmu_idx);
+ int ret = TLBRET_BADADDR;
/* data access */
#if !defined(CONFIG_USER_ONLY)
/* XXX: put correct access by using cpu_restore_state() correctly */
- access_type = ACCESS_INT;
- ret = get_physical_address(env, &physical, &prot,
- address, rw, access_type, mmu_idx);
+ mips_access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mips_access_type, mmu_idx);
switch (ret) {
case TLBRET_MATCH:
qemu_log_mask(CPU_LOG_MMU,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
- ret = 0;
- } else if (ret < 0)
-#endif
- {
-#if !defined(CONFIG_USER_ONLY)
+ return true;
+ }
#if !defined(TARGET_MIPS64)
- if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
- /*
- * Memory reads during hardware page table walking are performed
- * as if they were kernel-mode load instructions.
- */
- int mode = (env->hflags & MIPS_HFLAG_KSU);
- bool ret_walker;
- env->hflags &= ~MIPS_HFLAG_KSU;
- ret_walker = page_table_walk_refill(env, address, rw, mmu_idx);
- env->hflags |= mode;
- if (ret_walker) {
- ret = get_physical_address(env, &physical, &prot,
- address, rw, access_type, mmu_idx);
- if (ret == TLBRET_MATCH) {
- tlb_set_page(cs, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
- mmu_idx, TARGET_PAGE_SIZE);
- ret = 0;
- return ret;
- }
+ if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
+ /*
+ * Memory reads during hardware page table walking are performed
+ * as if they were kernel-mode load instructions.
+ */
+ int mode = (env->hflags & MIPS_HFLAG_KSU);
+ bool ret_walker;
+ env->hflags &= ~MIPS_HFLAG_KSU;
+ ret_walker = page_table_walk_refill(env, address, access_type, mmu_idx);
+ env->hflags |= mode;
+ if (ret_walker) {
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mips_access_type, mmu_idx);
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
}
}
+ }
#endif
-#endif
- raise_mmu_exception(env, address, rw, ret);
- ret = 1;
+ if (probe) {
+ return false;
}
+#endif
- return ret;
+ raise_mmu_exception(env, address, access_type, ret);
+ do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
}
-#if !defined(CONFIG_USER_ONLY)
+#ifndef CONFIG_USER_ONLY
hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
{
hwaddr physical;
void cpu_mips_stop_count(CPUMIPSState *env);
/* helper.c */
-int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
/* op_helper.c */
uint32_t float_class_s(uint32_t arg, float_status *fst);
do_raise_exception_err(env, excp, error_code, retaddr);
}
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = mips_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
- do_raise_exception_err(env, cs->exception_index,
- env->error_code, retaddr);
- }
-}
-
void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr,
bool is_write, bool is_exec, int unused,
unsigned size)
cc->do_interrupt = moxie_cpu_do_interrupt;
cc->dump_state = moxie_cpu_dump_state;
cc->set_pc = moxie_cpu_set_pc;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = moxie_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = moxie_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_moxie_cpu;
#endif
*flags = 0;
}
-int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int mmu_idx);
+bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#endif /* MOXIE_CPU_H */
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = moxie_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
void helper_raise_exception(CPUMoxieState *env, int ex)
{
CPUState *cs = CPU(moxie_env_get_cpu(env));
cpu_loop_exit(cs);
}
-#if defined(CONFIG_USER_ONLY)
-
-void moxie_cpu_do_interrupt(CPUState *cs)
-{
- CPUState *cs = CPU(moxie_env_get_cpu(env));
-
- cs->exception_index = -1;
-}
-
-int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- MoxieCPU *cpu = MOXIE_CPU(cs);
-
- cs->exception_index = 0xaa;
- cpu->env.debug1 = address;
- cpu_dump_state(cs, stderr, 0);
- return 1;
-}
-
-#else /* !CONFIG_USER_ONLY */
-
-int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
MoxieCPU *cpu = MOXIE_CPU(cs);
CPUMoxieState *env = &cpu->env;
MoxieMMUResult res;
int prot, miss;
- target_ulong phy;
- int r = 1;
address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- miss = moxie_mmu_translate(&res, env, address, rw, mmu_idx);
- if (miss) {
- /* handle the miss. */
- phy = 0;
- cs->exception_index = MOXIE_EX_MMU_MISS;
- } else {
- phy = res.phy;
- r = 0;
+ miss = moxie_mmu_translate(&res, env, address, access_type, mmu_idx);
+ if (likely(!miss)) {
+ tlb_set_page(cs, address, res.phy, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
}
- tlb_set_page(cs, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
- return r;
-}
+ cs->exception_index = MOXIE_EX_MMU_MISS;
+ cpu_loop_exit_restore(cs, retaddr);
+}
void moxie_cpu_do_interrupt(CPUState *cs)
{
}
return phy;
}
-#endif
cc->dump_state = nios2_cpu_dump_state;
cc->set_pc = nios2_cpu_set_pc;
cc->disas_set_info = nios2_cpu_disas_set_info;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = nios2_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = nios2_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = nios2_cpu_do_unaligned_access;
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
#endif
* License along with this library; if not, see
* <http://www.gnu.org/licenses/lgpl-2.1.html>
*/
-#ifndef CPU_NIOS2_H
-#define CPU_NIOS2_H
+
+#ifndef NIOS2_CPU_H
+#define NIOS2_CPU_H
#include "qemu-common.h"
MMU_SUPERVISOR_IDX;
}
-int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address, int size,
- int rw, int mmu_idx);
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
static inline int cpu_interrupts_enabled(CPUNios2State *env)
{
*flags = (env->regs[CR_STATUS] & (CR_STATUS_EH | CR_STATUS_U));
}
-#endif /* CPU_NIOS2_H */
+#endif /* NIOS2_CPU_H */
env->regs[R_EA] = env->regs[R_PC] + 4;
}
-int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
cs->exception_index = 0xaa;
- /* Page 0x1000 is kuser helper */
- if (address < 0x1000 || address >= 0x2000) {
- cpu_dump_state(cs, stderr, 0);
- }
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
}
}
-static int cpu_nios2_handle_virtual_page(
- CPUState *cs, target_ulong address, int rw, int mmu_idx)
-{
- Nios2CPU *cpu = NIOS2_CPU(cs);
- CPUNios2State *env = &cpu->env;
- target_ulong vaddr, paddr;
- Nios2MMULookup lu;
- unsigned int hit;
- hit = mmu_translate(env, &lu, address, rw, mmu_idx);
- if (hit) {
- vaddr = address & TARGET_PAGE_MASK;
- paddr = lu.paddr + vaddr - lu.vaddr;
-
- if (((rw == 0) && (lu.prot & PAGE_READ)) ||
- ((rw == 1) && (lu.prot & PAGE_WRITE)) ||
- ((rw == 2) && (lu.prot & PAGE_EXEC))) {
-
- tlb_set_page(cs, vaddr, paddr, lu.prot,
- mmu_idx, TARGET_PAGE_SIZE);
- return 0;
- } else {
- /* Permission violation */
- cs->exception_index = (rw == 0) ? EXCP_TLBR :
- ((rw == 1) ? EXCP_TLBW :
- EXCP_TLBX);
- }
- } else {
- cs->exception_index = EXCP_TLBD;
- }
-
- if (rw == 2) {
- env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
- } else {
- env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
- }
- env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
- env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
- env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
- env->regs[CR_BADADDR] = address;
- return 1;
-}
-
-int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- Nios2CPU *cpu = NIOS2_CPU(cs);
- CPUNios2State *env = &cpu->env;
-
- if (cpu->mmu_present) {
- if (MMU_SUPERVISOR_IDX == mmu_idx) {
- if (address >= 0xC0000000) {
- /* Kernel physical page - TLB bypassed */
- address &= TARGET_PAGE_MASK;
- tlb_set_page(cs, address, address, PAGE_BITS,
- mmu_idx, TARGET_PAGE_SIZE);
- } else if (address >= 0x80000000) {
- /* Kernel virtual page */
- return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
- } else {
- /* User virtual page */
- return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
- }
- } else {
- if (address >= 0x80000000) {
- /* Illegal access from user mode */
- cs->exception_index = EXCP_SUPERA;
- env->regs[CR_BADADDR] = address;
- return 1;
- } else {
- /* User virtual page */
- return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
- }
- }
- } else {
- /* No MMU */
- address &= TARGET_PAGE_MASK;
- tlb_set_page(cs, address, address, PAGE_BITS,
- mmu_idx, TARGET_PAGE_SIZE);
- }
-
- return 0;
-}
-
hwaddr nios2_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2;
helper_raise_exception(env, EXCP_UNALIGN);
}
+
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ unsigned int excp = EXCP_TLBD;
+ target_ulong vaddr, paddr;
+ Nios2MMULookup lu;
+ unsigned int hit;
+
+ if (!cpu->mmu_present) {
+ /* No MMU */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ if (MMU_SUPERVISOR_IDX == mmu_idx) {
+ if (address >= 0xC0000000) {
+ /* Kernel physical page - TLB bypassed */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ } else {
+ if (address >= 0x80000000) {
+ /* Illegal access from user mode */
+ if (probe) {
+ return false;
+ }
+ cs->exception_index = EXCP_SUPERA;
+ env->regs[CR_BADADDR] = address;
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+ }
+
+ /* Virtual page. */
+ hit = mmu_translate(env, &lu, address, access_type, mmu_idx);
+ if (hit) {
+ vaddr = address & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+
+ if (((access_type == MMU_DATA_LOAD) && (lu.prot & PAGE_READ)) ||
+ ((access_type == MMU_DATA_STORE) && (lu.prot & PAGE_WRITE)) ||
+ ((access_type == MMU_INST_FETCH) && (lu.prot & PAGE_EXEC))) {
+ tlb_set_page(cs, vaddr, paddr, lu.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ /* Permission violation */
+ excp = (access_type == MMU_DATA_LOAD ? EXCP_TLBR :
+ access_type == MMU_DATA_STORE ? EXCP_TLBW : EXCP_TLBX);
+ }
+
+ if (probe) {
+ return false;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
+ } else {
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
+ }
+ env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
+ env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
+ env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
+
+ cs->exception_index = excp;
+ env->regs[CR_BADADDR] = address;
+ cpu_loop_exit_restore(cs, retaddr);
+}
#endif /* !CONFIG_USER_ONLY */
#define MMU_LOG(x)
#endif
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = nios2_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
void mmu_read_debug(CPUNios2State *env, uint32_t rn)
{
switch (rn) {
* License along with this library; if not, see
* <http://www.gnu.org/licenses/lgpl-2.1.html>
*/
-#ifndef MMU_NIOS2_H
-#define MMU_NIOS2_H
+
+#ifndef NIOS2_MMU_H
+#define NIOS2_MMU_H
typedef struct Nios2TLBEntry {
target_ulong tag;
void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v);
void mmu_init(CPUNios2State *env);
-#endif /* MMU_NIOS2_H */
+#endif /* NIOS2_MMU_H */
cc->set_pc = openrisc_cpu_set_pc;
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = openrisc_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = openrisc_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_openrisc_cpu;
#endif
int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
-int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int mmu_idx);
+bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
cpu->env.lock_addr = -1;
}
-int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
-#ifdef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
- raise_mmu_exception(cpu, address, EXCP_DPF);
- return 1;
-#else
- g_assert_not_reached();
+ int excp = EXCP_DPF;
+
+#ifndef CONFIG_USER_ONLY
+ int prot;
+ hwaddr phys_addr;
+
+ if (mmu_idx == MMU_NOMMU_IDX) {
+ /* The mmu is disabled; lookups never fail. */
+ get_phys_nommu(&phys_addr, &prot, addr);
+ excp = 0;
+ } else {
+ bool super = mmu_idx == MMU_SUPERVISOR_IDX;
+ int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
+ : access_type == MMU_DATA_STORE ? PAGE_WRITE
+ : PAGE_READ);
+ excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
+ }
+
+ if (likely(excp == 0)) {
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK,
+ phys_addr & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
#endif
+
+ raise_mmu_exception(cpu, addr, excp);
+ cpu_loop_exit_restore(cs, retaddr);
}
#ifndef CONFIG_USER_ONLY
return phys_addr;
}
}
-
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- OpenRISCCPU *cpu = OPENRISC_CPU(cs);
- int prot, excp;
- hwaddr phys_addr;
-
- if (mmu_idx == MMU_NOMMU_IDX) {
- /* The mmu is disabled; lookups never fail. */
- get_phys_nommu(&phys_addr, &prot, addr);
- excp = 0;
- } else {
- bool super = mmu_idx == MMU_SUPERVISOR_IDX;
- int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
- : access_type == MMU_DATA_STORE ? PAGE_WRITE
- : PAGE_READ);
- excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
- }
-
- if (unlikely(excp)) {
- raise_mmu_exception(cpu, addr, excp);
- cpu_loop_exit_restore(cs, retaddr);
- }
-
- tlb_set_page(cs, addr & TARGET_PAGE_MASK,
- phys_addr & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
-}
#endif
* is returned if the signal was handled by the virtual CPU.
*/
int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc);
-#if defined(CONFIG_USER_ONLY)
-int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
-#endif
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#if !defined(CONFIG_USER_ONLY)
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef MMU_BOOOK3S_V3_H
-#define MMU_BOOOK3S_V3_H
+#ifndef PPC_MMU_BOOK3S_V3_H
+#define PPC_MMU_BOOK3S_V3_H
#include "mmu-hash64.h"
#endif /* CONFIG_USER_ONLY */
-#endif /* MMU_BOOOK3S_V3_H */
+#endif /* PPC_MMU_BOOK3S_V3_H */
/*****************************************************************************/
-/*
- * try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- *
- * XXX: fix it to restore all registers
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
}
if (unlikely(ret != 0)) {
+ if (probe) {
+ return false;
+ }
raise_exception_err_ra(env, cs->exception_index, env->error_code,
retaddr);
}
+ return true;
}
/* abs - abs. */
static void gen_abs(DisasContext *ctx)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l1);
- tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- gen_set_label(l2);
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_abs_tl(d, a);
if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ gen_set_Rc0(ctx, d);
}
}
/* abso - abso. */
static void gen_abso(DisasContext *ctx)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGLabel *l3 = gen_new_label();
- /* Start with XER OV disabled, the most likely case */
- tcg_gen_movi_tl(cpu_ov, 0);
- tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l2);
- tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[rA(ctx->opcode)], 0x80000000, l1);
- tcg_gen_movi_tl(cpu_ov, 1);
- tcg_gen_movi_tl(cpu_so, 1);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l3);
- gen_set_label(l2);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- gen_set_label(l3);
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_ov, a, 0x80000000);
+ tcg_gen_abs_tl(d, a);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ gen_set_Rc0(ctx, d);
}
}
/* nabs - nabs. */
static void gen_nabs(DisasContext *ctx)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- gen_set_label(l2);
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_abs_tl(d, a);
+ tcg_gen_neg_tl(d, d);
if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ gen_set_Rc0(ctx, d);
}
}
/* nabso - nabso. */
static void gen_nabso(DisasContext *ctx)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- gen_set_label(l2);
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_abs_tl(d, a);
+ tcg_gen_neg_tl(d, d);
/* nabs never overflows */
tcg_gen_movi_tl(cpu_ov, 0);
if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ gen_set_Rc0(ctx, d);
}
}
tcg_temp_free_i32(t0); \
}
-static inline void gen_op_evabs(TCGv_i32 ret, TCGv_i32 arg1)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
-
- tcg_gen_brcondi_i32(TCG_COND_GE, arg1, 0, l1);
- tcg_gen_neg_i32(ret, arg1);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_mov_i32(ret, arg1);
- gen_set_label(l2);
-}
-GEN_SPEOP_ARITH1(evabs, gen_op_evabs);
+GEN_SPEOP_ARITH1(evabs, tcg_gen_abs_i32);
GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32);
GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32);
GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32);
} \
static void glue(gen_, NAME)(DisasContext *ctx) \
{ \
+ static const TCGOpcode vecop_list[] = { \
+ glue(glue(INDEX_op_, NORM), _vec), \
+ glue(glue(INDEX_op_, SAT), _vec), \
+ INDEX_op_cmp_vec, 0 \
+ }; \
static const GVecGen4 g = { \
.fniv = glue(glue(gen_, NAME), _vec), \
.fno = glue(gen_helper_, NAME), \
- .opc = glue(glue(INDEX_op_, SAT), _vec), \
+ .opt_opc = vecop_list, \
.write_aofs = true, \
.vece = VECE, \
}; \
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_ppc_cpu;
#endif
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = ppc_translate_init;
+ cc->tlb_fill = ppc_cpu_tlb_fill;
#endif
cc->disas_set_info = ppc_disas_set_info;
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/exec-all.h"
-int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
int exception, error_code;
- if (rw == 2) {
+ if (access_type == MMU_INST_FETCH) {
exception = POWERPC_EXCP_ISI;
error_code = 0x40000000;
} else {
exception = POWERPC_EXCP_DSI;
error_code = 0x40000000;
- if (rw) {
+ if (access_type == MMU_DATA_STORE) {
error_code |= 0x02000000;
}
env->spr[SPR_DAR] = address;
}
cs->exception_index = exception;
env->error_code = error_code;
-
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#endif
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = riscv_cpu_disas_set_info;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = riscv_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = riscv_translate_init;
+ cc->tlb_fill = riscv_cpu_tlb_fill;
#endif
/* For now, mark unmigratable: */
cc->vmsd = &vmstate_riscv_cpu;
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr);
-int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int mmu_idx);
+bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
char *riscv_isa_string(RISCVCPU *cpu);
void riscv_cpu_list(void);
env->badaddr = addr;
riscv_raise_exception(env, cs->exception_index, retaddr);
}
-
-/* called by qemu's softmmu to fill the qemu tlb */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
- ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret == TRANSLATE_FAIL) {
- RISCVCPU *cpu = RISCV_CPU(cs);
- CPURISCVState *env = &cpu->env;
- riscv_raise_exception(env, cs->exception_index, retaddr);
- }
-}
-
#endif
-int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
+#ifndef CONFIG_USER_ONLY
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
-#if !defined(CONFIG_USER_ONLY)
hwaddr pa = 0;
int prot;
-#endif
int ret = TRANSLATE_FAIL;
- qemu_log_mask(CPU_LOG_MMU,
- "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \
- %d\n", __func__, env->pc, address, rw, mmu_idx);
+ qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
+ __func__, address, access_type, mmu_idx);
+
+ ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx);
-#if !defined(CONFIG_USER_ONLY)
- ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx);
qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
- " prot %d\n", __func__, address, ret, pa, prot);
+ "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
+ " prot %d\n", __func__, address, ret, pa, prot);
+
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
- !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) {
+ !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << access_type)) {
ret = TRANSLATE_FAIL;
}
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
- } else if (ret == TRANSLATE_FAIL) {
- raise_mmu_exception(env, address, rw);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ raise_mmu_exception(env, address, access_type);
+ riscv_raise_exception(env, cs->exception_index, retaddr);
}
#else
- switch (rw) {
+ switch (access_type) {
case MMU_INST_FETCH:
cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
break;
cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
break;
}
+ cpu_loop_exit_restore(cs, retaddr);
#endif
- return ret;
}
/*
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _RISCV_PMP_H_
-#define _RISCV_PMP_H_
+#ifndef RISCV_PMP_H
+#define RISCV_PMP_H
typedef enum {
PMP_READ = 1 << 0,
feat-src = $(SRC_PATH)/target/$(TARGET_BASE_ARCH)/
feat-dst = $(BUILD_DIR)/$(TARGET_DIR)
ifneq ($(MAKECMDGOALS),clean)
-GENERATED_FILES += $(feat-dst)gen-features.h
+generated-files-y += $(feat-dst)gen-features.h
endif
$(feat-dst)gen-features.h: $(feat-dst)gen-features.h-timestamp
cc->set_pc = s390_cpu_set_pc;
cc->gdb_read_register = s390_cpu_gdb_read_register;
cc->gdb_write_register = s390_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = s390_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_s390_cpu;
cc->write_elf64_note = s390_cpu_write_elf64_note;
cc->disas_set_info = s390_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = s390x_translate_init;
+ cc->tlb_fill = s390_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
cs->exception_index = -1;
}
-int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
/* On real machines this value is dropped into LowMem. Since this
is userland, simply put this someplace that cpu_loop can find it. */
cpu->env.__excp_addr = address;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
}
}
-int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
- int rw, int mmu_idx)
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
target_ulong vaddr, raddr;
uint64_t asc;
- int prot;
+ int prot, fail;
qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
- __func__, orig_vaddr, rw, mmu_idx);
+ __func__, address, access_type, mmu_idx);
- vaddr = orig_vaddr;
+ vaddr = address;
if (mmu_idx < MMU_REAL_IDX) {
asc = cpu_mmu_idx_to_asc(mmu_idx);
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
- if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
- return 1;
- }
+ fail = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, true);
} else if (mmu_idx == MMU_REAL_IDX) {
/* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
- if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
- return 1;
- }
+ fail = mmu_translate_real(env, vaddr, access_type, &raddr, &prot);
} else {
- abort();
+ g_assert_not_reached();
}
/* check out of RAM access */
- if (!address_space_access_valid(&address_space_memory, raddr,
- TARGET_PAGE_SIZE, rw,
+ if (!fail &&
+ !address_space_access_valid(&address_space_memory, raddr,
+ TARGET_PAGE_SIZE, access_type,
MEMTXATTRS_UNSPECIFIED)) {
qemu_log_mask(CPU_LOG_MMU,
"%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
__func__, (uint64_t)raddr, (uint64_t)ram_size);
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
- return 1;
+ fail = 1;
}
- qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
- __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
+ if (!fail) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
+ __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
+ tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
- tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ cpu_restore_state(cs, retaddr, true);
+
+ /*
+ * The ILC value for code accesses is undefined. The important
+ * thing here is to *not* leave env->int_pgm_ilen set to ILEN_AUTO,
+ * which would cause do_program_interrupt to attempt to read from
+ * env->psw.addr again. C.f. the condition in trigger_page_fault,
+ * but is not universally applied.
+ *
+ * ??? If we remove ILEN_AUTO, by moving the computation of ILEN
+ * into cpu_restore_state, then we may remove this entirely.
+ */
+ if (access_type == MMU_INST_FETCH) {
+ env->int_pgm_ilen = 2;
+ }
- return 0;
+ cpu_loop_exit(cs);
}
static void do_program_interrupt(CPUS390XState *env)
void s390x_cpu_debug_excp_handler(CPUState *cs);
void s390_cpu_do_interrupt(CPUState *cpu);
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
-int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
/*****************************************************************************/
/* Softmmu support */
-#if !defined(CONFIG_USER_ONLY)
-
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret = s390_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret != 0)) {
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
-#endif
/* #define DEBUG_HELPER */
#ifdef DEBUG_HELPER
static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
{
- TCGv_i64 z, n;
- z = tcg_const_i64(0);
- n = tcg_temp_new_i64();
- tcg_gen_neg_i64(n, o->in2);
- tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
- tcg_temp_free_i64(n);
- tcg_temp_free_i64(z);
+ tcg_gen_abs_i64(o->out, o->in2);
return DISAS_NEXT;
}
cc->synchronize_from_tb = superh_cpu_synchronize_from_tb;
cc->gdb_read_register = superh_cpu_gdb_read_register;
cc->gdb_write_register = superh_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = superh_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = superh_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = superh_cpu_do_unaligned_access;
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif
void sh4_translate_init(void);
int cpu_sh4_signal_handler(int host_signum, void *pinfo,
void *puc);
-int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void sh4_cpu_list(void);
#if !defined(CONFIG_USER_ONLY)
#include "hw/sh4/sh_intc.h"
#endif
-#if defined(CONFIG_USER_ONLY)
-
-void superh_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
-int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
-{
- SuperHCPU *cpu = SUPERH_CPU(cs);
- CPUSH4State *env = &cpu->env;
-
- env->tea = address;
- cs->exception_index = -1;
- switch (rw) {
- case 0:
- cs->exception_index = 0x0a0;
- break;
- case 1:
- cs->exception_index = 0x0c0;
- break;
- case 2:
- cs->exception_index = 0x0a0;
- break;
- }
- return 1;
-}
-
-int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
-{
- /* For user mode, only U0 area is cacheable. */
- return !(addr & 0x80000000);
-}
-
-#else /* !CONFIG_USER_ONLY */
-
#define MMU_OK 0
#define MMU_ITLB_MISS (-1)
#define MMU_ITLB_MULTIPLE (-2)
#define MMU_DADDR_ERROR_READ (-12)
#define MMU_DADDR_ERROR_WRITE (-13)
+#if defined(CONFIG_USER_ONLY)
+
+void superh_cpu_do_interrupt(CPUState *cs)
+{
+ cs->exception_index = -1;
+}
+
+int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
+{
+ /* For user mode, only U0 area is cacheable. */
+ return !(addr & 0x80000000);
+}
+
+#else /* !CONFIG_USER_ONLY */
+
void superh_cpu_do_interrupt(CPUState *cs)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
return get_mmu_address(env, physical, prot, address, rw, access_type);
}
-int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
-{
- SuperHCPU *cpu = SUPERH_CPU(cs);
- CPUSH4State *env = &cpu->env;
- target_ulong physical;
- int prot, ret, access_type;
-
- access_type = ACCESS_INT;
- ret =
- get_physical_address(env, &physical, &prot, address, rw,
- access_type);
-
- if (ret != MMU_OK) {
- env->tea = address;
- if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
- env->pteh = (env->pteh & PTEH_ASID_MASK) |
- (address & PTEH_VPN_MASK);
- }
- switch (ret) {
- case MMU_ITLB_MISS:
- case MMU_DTLB_MISS_READ:
- cs->exception_index = 0x040;
- break;
- case MMU_DTLB_MULTIPLE:
- case MMU_ITLB_MULTIPLE:
- cs->exception_index = 0x140;
- break;
- case MMU_ITLB_VIOLATION:
- cs->exception_index = 0x0a0;
- break;
- case MMU_DTLB_MISS_WRITE:
- cs->exception_index = 0x060;
- break;
- case MMU_DTLB_INITIAL_WRITE:
- cs->exception_index = 0x080;
- break;
- case MMU_DTLB_VIOLATION_READ:
- cs->exception_index = 0x0a0;
- break;
- case MMU_DTLB_VIOLATION_WRITE:
- cs->exception_index = 0x0c0;
- break;
- case MMU_IADDR_ERROR:
- case MMU_DADDR_ERROR_READ:
- cs->exception_index = 0x0e0;
- break;
- case MMU_DADDR_ERROR_WRITE:
- cs->exception_index = 0x100;
- break;
- default:
- cpu_abort(cs, "Unhandled MMU fault");
- }
- return 1;
- }
-
- address &= TARGET_PAGE_MASK;
- physical &= TARGET_PAGE_MASK;
-
- tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
-}
-
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
if (needs_tlb_flush) {
tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10);
}
-
} else {
int index = (addr & 0x00003f00) >> 8;
tlb_t * entry = &s->utlb[index];
}
return false;
}
+
+bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ int ret;
+
+#ifdef CONFIG_USER_ONLY
+ ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE :
+ access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION :
+ MMU_DTLB_VIOLATION_READ);
+#else
+ target_ulong physical;
+ int prot, sh_access_type;
+
+ sh_access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, sh_access_type);
+
+ if (ret == MMU_OK) {
+ address &= TARGET_PAGE_MASK;
+ physical &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
+
+ if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
+ env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK);
+ }
+#endif
+
+ env->tea = address;
+ switch (ret) {
+ case MMU_ITLB_MISS:
+ case MMU_DTLB_MISS_READ:
+ cs->exception_index = 0x040;
+ break;
+ case MMU_DTLB_MULTIPLE:
+ case MMU_ITLB_MULTIPLE:
+ cs->exception_index = 0x140;
+ break;
+ case MMU_ITLB_VIOLATION:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_MISS_WRITE:
+ cs->exception_index = 0x060;
+ break;
+ case MMU_DTLB_INITIAL_WRITE:
+ cs->exception_index = 0x080;
+ break;
+ case MMU_DTLB_VIOLATION_READ:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_VIOLATION_WRITE:
+ cs->exception_index = 0x0c0;
+ break;
+ case MMU_IADDR_ERROR:
+ case MMU_DADDR_ERROR_READ:
+ cs->exception_index = 0x0e0;
+ break;
+ case MMU_DADDR_ERROR_WRITE:
+ cs->exception_index = 0x100;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled MMU fault");
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+}
cpu_loop_exit_restore(cs, retaddr);
}
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = superh_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
#endif
void helper_ldtlb(CPUSH4State *env)
-#ifndef _SPARC_ASI_H
-#define _SPARC_ASI_H
+#ifndef SPARC_ASI_H
+#define SPARC_ASI_H
/* asi.h: Address Space Identifier values for the sparc.
*
* implicit, little-endian
*/
-#endif /* _SPARC_ASI_H */
+#endif /* SPARC_ASI_H */
cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
cc->gdb_read_register = sparc_cpu_gdb_read_register;
cc->gdb_write_register = sparc_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = sparc_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unassigned_access = sparc_cpu_unassigned_access;
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
void sparc_cpu_list(void);
/* mmu_helper.c */
-int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
void dump_mmu(CPUSPARCState *env);
#endif
cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
}
-
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = sparc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
#endif
#if defined(CONFIG_USER_ONLY)
-int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
- if (rw & 2) {
+ if (access_type == MMU_INST_FETCH) {
cs->exception_index = TT_TFAULT;
} else {
cs->exception_index = TT_DFAULT;
env->mmuregs[4] = address;
#endif
}
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
}
/* Perform address translation */
-int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
target_ulong page_size;
int error_code = 0, prot, access_index;
+ /*
+ * TODO: If we ever need tlb_vaddr_to_host for this target,
+ * then we must figure out how to manipulate FSR and FAR
+ * when both MMU_NF and probe are set. In the meantime,
+ * do not support this use case.
+ */
+ assert(!probe);
+
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx, &page_size);
+ address, access_type,
+ mmu_idx, &page_size);
vaddr = address;
- if (error_code == 0) {
+ if (likely(error_code == 0)) {
qemu_log_mask(CPU_LOG_MMU,
- "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
- TARGET_FMT_lx "\n", address, paddr, vaddr);
+ "Translate at %" VADDR_PRIx " -> "
+ TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
+ address, paddr, vaddr);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
- return 0;
+ return true;
}
if (env->mmuregs[3]) { /* Fault status register */
switching to normal mode. */
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
} else {
- if (rw & 2) {
+ if (access_type == MMU_INST_FETCH) {
cs->exception_index = TT_TFAULT;
} else {
cs->exception_index = TT_DFAULT;
}
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
}
}
/* Perform address translation */
-int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx, &page_size);
- if (error_code == 0) {
+ address, access_type,
+ mmu_idx, &page_size);
+ if (likely(error_code == 0)) {
vaddr = address;
trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
env->dmmu.mmu_secondary_context);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
- return 0;
+ return true;
}
- /* XXX */
- return 1;
+ if (probe) {
+ return false;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
}
void dump_mmu(CPUSPARCState *env)
#include "hw/qdev-properties.h"
#include "linux-user/syscall_defs.h"
#include "qemu/qemu-print.h"
+#include "exec/exec-all.h"
static void tilegx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
cs->exception_index = -1;
}
-static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+static bool tilegx_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
TileGXCPU *cpu = TILEGX_CPU(cs);
cpu->env.signo = TARGET_SIGSEGV;
cpu->env.sigcode = 0;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
cc->dump_state = tilegx_cpu_dump_state;
cc->set_pc = tilegx_cpu_set_pc;
- cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault;
+ cc->tlb_fill = tilegx_cpu_tlb_fill;
cc->gdb_num_core_regs = 0;
cc->tcg_initialize = tilegx_tcg_init;
}
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
cc->get_phys_page_attrs_debug = tricore_cpu_get_phys_page_attrs_debug;
cc->tcg_initialize = tricore_tcg_init;
+ cc->tlb_fill = tricore_cpu_tlb_fill;
}
#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \
#define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU
/* helpers.c */
-int cpu_tricore_handle_mmu_fault(CPUState *cpu, target_ulong address,
- int rw, int mmu_idx);
-#define cpu_handle_mmu_fault cpu_tricore_handle_mmu_fault
+bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#endif /* TRICORE_CPU_H */
{
}
-int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address,
- int rw, int mmu_idx)
+bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType rw, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
TriCoreCPU *cpu = TRICORE_CPU(cs);
CPUTriCoreState *env = &cpu->env;
access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot,
address, rw, access_type);
- qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx
- " prot %d\n", __func__, address, ret, physical, prot);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, (target_ulong)address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
- ret = 0;
- } else if (ret < 0) {
+ return true;
+ } else {
+ assert(ret < 0);
+ if (probe) {
+ return false;
+ }
raise_mmu_exception(env, address, rw, ret);
- ret = 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
-
- return ret;
}
static void tricore_cpu_list_entry(gpointer data, gpointer user_data)
{
return psw_read(env);
}
-
-
-static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env,
- uint32_t exception,
- int error_code,
- uintptr_t pc)
-{
- CPUState *cs = CPU(tricore_env_get_cpu(env));
- cs->exception_index = exception;
- env->error_code = error_code;
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, pc);
-}
-
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
- ret = cpu_tricore_handle_mmu_fault(cs, addr, access_type, mmu_idx);
- if (ret) {
- TriCoreCPU *cpu = TRICORE_CPU(cs);
- CPUTriCoreState *env = &cpu->env;
- do_raise_exception_err(env, cs->exception_index,
- env->error_code, retaddr);
- }
-}
static inline void gen_abs(TCGv ret, TCGv r1)
{
- TCGv temp = tcg_temp_new();
- TCGv t0 = tcg_const_i32(0);
-
- tcg_gen_neg_tl(temp, r1);
- tcg_gen_movcond_tl(TCG_COND_GE, ret, r1, t0, r1, temp);
+ tcg_gen_abs_tl(ret, r1);
/* overflow can only happen, if r1 = 0x80000000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
-
- tcg_temp_free(temp);
- tcg_temp_free(t0);
}
static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_movi_tl(cpu_PSW_AV, 0);
if (!tricore_feature(env, TRICORE_FEATURE_131)) {
/* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
- tcg_gen_neg_tl(temp, temp3);
- /* use cpu_PSW_AV to compare against 0 */
- tcg_gen_movcond_tl(TCG_COND_LT, temp, temp3, cpu_PSW_AV,
- temp, temp3);
- tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]);
- tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV,
- temp2, cpu_gpr_d[r2]);
+ tcg_gen_abs_tl(temp, temp3);
+ tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
} else {
/* overflow = (D[b] == 0) */
tcg_gen_movi_tl(cpu_PSW_AV, 0);
if (!tricore_feature(env, TRICORE_FEATURE_131)) {
/* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
- tcg_gen_neg_tl(temp, temp3);
- /* use cpu_PSW_AV to compare against 0 */
- tcg_gen_movcond_tl(TCG_COND_LT, temp, temp3, cpu_PSW_AV,
- temp, temp3);
- tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]);
- tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV,
- temp2, cpu_gpr_d[r2]);
+ tcg_gen_abs_tl(temp, temp3);
+ tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
} else {
/* overflow = (D[b] == 0) */
cc->cpu_exec_interrupt = uc32_cpu_exec_interrupt;
cc->dump_state = uc32_cpu_dump_state;
cc->set_pc = uc32_cpu_set_pc;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = uc32_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = uc32_cpu_tlb_fill;
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
-#endif
cc->tcg_initialize = uc32_translate_init;
dc->vmsd = &vmstate_uc32_cpu;
}
}
}
-int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void uc32_translate_init(void);
void switch_mode(CPUUniCore32State *, int);
}
#endif
-#ifdef CONFIG_USER_ONLY
-void switch_mode(CPUUniCore32State *env, int mode)
-{
- UniCore32CPU *cpu = uc32_env_get_cpu(env);
-
- if (mode != ASR_MODE_USER) {
- cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
- }
-}
-
-void uc32_cpu_do_interrupt(CPUState *cs)
-{
- cpu_abort(cs, "NO interrupt in user mode\n");
-}
-
-int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int access_type, int mmu_idx)
-{
- cpu_abort(cs, "NO mmu fault in user mode\n");
- return 1;
-}
-#endif
-
bool uc32_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
return ((uint32_t)x >> shift) | (x << (32 - shift));
}
}
-
-#ifndef CONFIG_USER_ONLY
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = uc32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-#endif
return code;
}
-int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int access_type, int mmu_idx)
+bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
UniCore32CPU *cpu = UNICORE32_CPU(cs);
CPUUniCore32State *env = &cpu->env;
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
- return 0;
+ return true;
+ }
+
+ if (probe) {
+ return false;
}
env->cp0.c3_faultstatus = ret;
} else {
cs->exception_index = UC32_EXCP_DTRAP;
}
- return ret;
+ cpu_loop_exit_restore(cs, retaddr);
}
hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-#ifndef _XTENSA_CORE_CONFIGURATION_H
-#define _XTENSA_CORE_CONFIGURATION_H
-
+#ifndef XTENSA_CORE_DE212_CORE_ISA_H
+#define XTENSA_CORE_DE212_CORE_ISA_H
/****************************************************************************
Parameters Useful for Any Code, USER or PRIVILEGED
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
-#endif /* _XTENSA_CORE_CONFIGURATION_H */
-
+#endif /* XTENSA_CORE_DE212_CORE_ISA_H */
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-#ifndef _XTENSA_CORE_CONFIGURATION_H
-#define _XTENSA_CORE_CONFIGURATION_H
-
+#ifndef XTENSA_CORE_SAMPLE_CONTROLLER_CORE_ISA_H
+#define XTENSA_CORE_SAMPLE_CONTROLLER_CORE_ISA_H
/****************************************************************************
Parameters Useful for Any Code, USER or PRIVILEGED
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
-#endif /* _XTENSA_CORE_CONFIGURATION_H */
-
+#endif /* XTENSA_CORE_SAMPLE_CONTROLLER_CORE_ISA_H */
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-#ifndef _XTENSA_CORE_CONFIGURATION_H
-#define _XTENSA_CORE_CONFIGURATION_H
-
+#ifndef XTENSA_CORE_TEST_KC705_BE_CORE_ISA_H
+#define XTENSA_CORE_TEST_KC705_BE_CORE_ISA_H
/****************************************************************************
Parameters Useful for Any Code, USER or PRIVILEGED
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
-#endif /* _XTENSA_CORE_CONFIGURATION_H */
-
+#endif /* XTENSA_CORE_TEST_KC705_BE_CORE_ISA_H */
* Copyright (c) 1999-2009 Tensilica Inc.
*/
-#ifndef _XTENSA_CORE_CONFIGURATION_H
-#define _XTENSA_CORE_CONFIGURATION_H
-
+#ifndef XTENSA_CORE_TEST_MMUHIFI_C3_CORE_ISA_H
+#define XTENSA_CORE_TEST_MMUHIFI_C3_CORE_ISA_H
/****************************************************************************
Parameters Useful for Any Code, USER or PRIVILEGED
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
-#endif /* _XTENSA_CORE_CONFIGURATION_H */
-
+#endif /* XTENSA_CORE_TEST_MMUHIFI_C3_CORE_ISA_H */
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
cc->gdb_stop_before_watchpoint = true;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = xtensa_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = xtensa_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
cc->do_transaction_failed = xtensa_cpu_do_transaction_failed;
#define ENV_OFFSET offsetof(XtensaCPU, env)
-int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int size,
- int mmu_idx);
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void xtensa_cpu_do_interrupt(CPUState *cpu);
bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
#ifdef CONFIG_USER_ONLY
-int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
qemu_log_mask(CPU_LOG_INT,
"%s: rw = %d, address = 0x%08" VADDR_PRIx ", size = %d\n",
- __func__, rw, address, size);
+ __func__, access_type, address, size);
env->sregs[EXCVADDR] = address;
- env->sregs[EXCCAUSE] = rw ? STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE;
+ env->sregs[EXCCAUSE] = (access_type == MMU_DATA_STORE ?
+ STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE);
cs->exception_index = EXC_USER;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
}
}
-void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
uint32_t paddr;
uint32_t page_size;
unsigned access;
- int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
- &paddr, &page_size, &access);
+ int ret = xtensa_get_physical_addr(env, true, address, access_type,
+ mmu_idx, &paddr, &page_size, &access);
- qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
- __func__, vaddr, access_type, mmu_idx, paddr, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s(%08" VADDR_PRIx
+ ", %d, %d) -> %08x, ret = %d\n",
+ __func__, address, access_type, mmu_idx, paddr, ret);
if (ret == 0) {
tlb_set_page(cs,
- vaddr & TARGET_PAGE_MASK,
+ address & TARGET_PAGE_MASK,
paddr & TARGET_PAGE_MASK,
access, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
} else {
cpu_restore_state(cs, retaddr, true);
- HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
+ HELPER(exception_cause_vaddr)(env, env->pc, ret, address);
}
}
static void translate_abs(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- TCGv_i32 zero = tcg_const_i32(0);
- TCGv_i32 neg = tcg_temp_new_i32();
-
- tcg_gen_neg_i32(neg, arg[1].in);
- tcg_gen_movcond_i32(TCG_COND_GE, arg[0].out,
- arg[1].in, zero, arg[1].in, neg);
- tcg_temp_free(neg);
- tcg_temp_free(zero);
+ tcg_gen_abs_i32(arg[0].out, arg[1].in);
}
static void translate_add(DisasContext *dc, const OpcodeArg arg[],
extern xtensa_isa_status xtisa_errno;
extern char xtisa_error_msg[];
-#endif /* !XTENSA_ISA_INTERNAL_H */
+#endif /* XTENSA_ISA_INTERNAL_H */
Similarly, v0 = -v1.
+* abs_vec v0, v1
+
+ Similarly, v0 = v1 < 0 ? -v1 : v1, in elements across the vector.
+
* smin_vec:
* umin_vec:
#define TCG_TARGET_HAS_orc_vec 1
#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec 1
+#define TCG_TARGET_HAS_abs_vec 1
#define TCG_TARGET_HAS_shi_vec 1
#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 0
+#define TCG_TARGET_HAS_shv_vec 1
#define TCG_TARGET_HAS_cmp_vec 1
#define TCG_TARGET_HAS_mul_vec 1
#define TCG_TARGET_HAS_sat_vec 1
I3207_BLR = 0xd63f0000,
I3207_RET = 0xd65f0000,
+ /* AdvSIMD load/store single structure. */
+ I3303_LD1R = 0x0d40c000,
+
/* Load literal for loading the address at pc-relative offset */
I3305_LDR = 0x58000000,
I3305_LDR_v64 = 0x5c000000,
I3616_CMEQ = 0x2e208c00,
I3616_SMAX = 0x0e206400,
I3616_SMIN = 0x0e206c00,
+ I3616_SSHL = 0x0e204400,
I3616_SQADD = 0x0e200c00,
I3616_SQSUB = 0x0e202c00,
I3616_UMAX = 0x2e206400,
I3616_UMIN = 0x2e206c00,
I3616_UQADD = 0x2e200c00,
I3616_UQSUB = 0x2e202c00,
+ I3616_USHL = 0x2e204400,
/* AdvSIMD two-reg misc. */
I3617_CMGT0 = 0x0e208800,
I3617_CMGE0 = 0x2e208800,
I3617_CMLE0 = 0x2e20a800,
I3617_NOT = 0x2e205800,
+ I3617_ABS = 0x0e20b800,
I3617_NEG = 0x2e20b800,
/* System instructions. */
#define tcg_out_insn(S, FMT, OP, ...) \
glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__)
-static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn, int imm19, TCGReg rt)
+static void tcg_out_insn_3303(TCGContext *s, AArch64Insn insn, bool q,
+ TCGReg rt, TCGReg rn, unsigned size)
+{
+ tcg_out32(s, insn | (rt & 0x1f) | (rn << 5) | (size << 10) | (q << 30));
+}
+
+static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn,
+ int imm19, TCGReg rt)
{
tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt);
}
}
static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
- TCGReg rd, uint64_t v64)
+ TCGReg rd, tcg_target_long v64)
{
int op, cmode, imm8;
}
}
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg rd, TCGReg rs)
+{
+ int is_q = type - TCG_TYPE_V64;
+ tcg_out_insn(s, 3605, DUP, is_q, rd, rs, 1 << vece, 0);
+ return true;
+}
+
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg r, TCGReg base, intptr_t offset)
+{
+ TCGReg temp = TCG_REG_TMP;
+
+ if (offset < -0xffffff || offset > 0xffffff) {
+ tcg_out_movi(s, TCG_TYPE_PTR, temp, offset);
+ tcg_out_insn(s, 3502, ADD, 1, temp, temp, base);
+ base = temp;
+ } else {
+ AArch64Insn add_insn = I3401_ADDI;
+
+ if (offset < 0) {
+ add_insn = I3401_SUBI;
+ offset = -offset;
+ }
+ if (offset & 0xfff000) {
+ tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff000);
+ base = temp;
+ }
+ if (offset & 0xfff) {
+ tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff);
+ base = temp;
+ }
+ }
+ tcg_out_insn(s, 3303, LD1R, type == TCG_TYPE_V128, r, base, vece);
+ return true;
+}
+
static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
tcg_target_long value)
{
tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
}
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
if (ret == arg) {
- return;
+ return true;
}
switch (type) {
case TCG_TYPE_I32:
default:
g_assert_not_reached();
}
+ return true;
}
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
- case INDEX_op_mov_vec:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
- case INDEX_op_dupi_vec:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
default:
g_assert_not_reached();
case INDEX_op_st_vec:
tcg_out_st(s, type, a0, a1, a2);
break;
+ case INDEX_op_dupm_vec:
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
+ break;
case INDEX_op_add_vec:
tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
break;
case INDEX_op_neg_vec:
tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
break;
+ case INDEX_op_abs_vec:
+ tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
+ break;
case INDEX_op_and_vec:
tcg_out_insn(s, 3616, AND, is_q, 0, a0, a1, a2);
break;
case INDEX_op_not_vec:
tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1);
break;
- case INDEX_op_dup_vec:
- tcg_out_insn(s, 3605, DUP, is_q, a0, a1, 1 << vece, 0);
- break;
case INDEX_op_shli_vec:
tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
break;
case INDEX_op_sari_vec:
tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
break;
+ case INDEX_op_shlv_vec:
+ tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
+ break;
+ case INDEX_op_aa64_sshl_vec:
+ tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
+ break;
case INDEX_op_cmp_vec:
{
TCGCond cond = args[3];
}
}
break;
+
+ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
+ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
default:
g_assert_not_reached();
}
case INDEX_op_andc_vec:
case INDEX_op_orc_vec:
case INDEX_op_neg_vec:
+ case INDEX_op_abs_vec:
case INDEX_op_not_vec:
case INDEX_op_cmp_vec:
case INDEX_op_shli_vec:
case INDEX_op_sssub_vec:
case INDEX_op_usadd_vec:
case INDEX_op_ussub_vec:
+ case INDEX_op_shlv_vec:
+ return 1;
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ return -1;
+ case INDEX_op_mul_vec:
case INDEX_op_smax_vec:
case INDEX_op_smin_vec:
case INDEX_op_umax_vec:
case INDEX_op_umin_vec:
- return 1;
- case INDEX_op_mul_vec:
return vece < MO_64;
default:
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
+ va_list va;
+ TCGv_vec v0, v1, v2, t1;
+
+ va_start(va, a0);
+ v0 = temp_tcgv_vec(arg_temp(a0));
+ v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
+ v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
+
+ switch (opc) {
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ /* Right shifts are negative left shifts for AArch64. */
+ t1 = tcg_temp_new_vec(type);
+ tcg_gen_neg_vec(vece, t1, v2);
+ opc = (opc == INDEX_op_shrv_vec
+ ? INDEX_op_shlv_vec : INDEX_op_aa64_sshl_vec);
+ vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
+ tcg_temp_free_vec(t1);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ va_end(va);
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_smin_vec:
case INDEX_op_umax_vec:
case INDEX_op_umin_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_aa64_sshl_vec:
return &w_w_w;
case INDEX_op_not_vec:
case INDEX_op_neg_vec:
+ case INDEX_op_abs_vec:
case INDEX_op_shli_vec:
case INDEX_op_shri_vec:
case INDEX_op_sari_vec:
return &w_w;
case INDEX_op_ld_vec:
case INDEX_op_st_vec:
+ case INDEX_op_dupm_vec:
return &w_r;
case INDEX_op_dup_vec:
return &w_wr;
/* Target-specific opcodes for host vector expansion. These will be
emitted by tcg_expand_vec_op. For those familiar with GCC internals,
consider these to be UNSPEC with names. */
+
+DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC)
return false;
}
-static inline void tcg_out_mov(TCGContext *s, TCGType type,
+static inline bool tcg_out_mov(TCGContext *s, TCGType type,
TCGReg ret, TCGReg arg)
{
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
+ tcg_out_mov_reg(s, COND_AL, ret, arg);
+ return true;
}
static inline void tcg_out_movi(TCGContext *s, TCGType type,
#define TCG_TARGET_HAS_orc_vec 0
#define TCG_TARGET_HAS_not_vec 0
#define TCG_TARGET_HAS_neg_vec 0
+#define TCG_TARGET_HAS_abs_vec 1
#define TCG_TARGET_HAS_shi_vec 1
-#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 0
+#define TCG_TARGET_HAS_shs_vec 1
+#define TCG_TARGET_HAS_shv_vec have_avx2
#define TCG_TARGET_HAS_cmp_vec 1
#define TCG_TARGET_HAS_mul_vec 1
#define TCG_TARGET_HAS_sat_vec 1
#define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
#define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16)
#define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16)
-#define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2)
#define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
#define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
#define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
#define OPC_MOVSLQ (0x63 | P_REXW)
#define OPC_MOVZBL (0xb6 | P_EXT)
#define OPC_MOVZWL (0xb7 | P_EXT)
+#define OPC_PABSB (0x1c | P_EXT38 | P_DATA16)
+#define OPC_PABSW (0x1d | P_EXT38 | P_DATA16)
+#define OPC_PABSD (0x1e | P_EXT38 | P_DATA16)
#define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
#define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
#define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
#define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
#define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
+#define OPC_PSLLW (0xf1 | P_EXT | P_DATA16)
+#define OPC_PSLLD (0xf2 | P_EXT | P_DATA16)
+#define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16)
+#define OPC_PSRAW (0xe1 | P_EXT | P_DATA16)
+#define OPC_PSRAD (0xe2 | P_EXT | P_DATA16)
+#define OPC_PSRLW (0xd1 | P_EXT | P_DATA16)
+#define OPC_PSRLD (0xd2 | P_EXT | P_DATA16)
+#define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16)
#define OPC_PSUBB (0xf8 | P_EXT | P_DATA16)
#define OPC_PSUBW (0xf9 | P_EXT | P_DATA16)
#define OPC_PSUBD (0xfa | P_EXT | P_DATA16)
#define OPC_UD2 (0x0b | P_EXT)
#define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
#define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
+#define OPC_VPINSRB (0x20 | P_EXT3A | P_DATA16)
+#define OPC_VPINSRW (0xc4 | P_EXT | P_DATA16)
+#define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16)
+#define OPC_VBROADCASTSD (0x19 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
+#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
+#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW)
+#define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
+#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
+#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW)
#define OPC_VZEROUPPER (0x77 | P_EXT)
#define OPC_XCHG_ax_r32 (0x90)
tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
}
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
int rexw = 0;
if (arg == ret) {
- return;
+ return true;
}
switch (type) {
case TCG_TYPE_I64:
default:
g_assert_not_reached();
}
+ return true;
}
-static void tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+static const int avx2_dup_insn[4] = {
+ OPC_VPBROADCASTB, OPC_VPBROADCASTW,
+ OPC_VPBROADCASTD, OPC_VPBROADCASTQ,
+};
+
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg r, TCGReg a)
{
if (have_avx2) {
- static const int dup_insn[4] = {
- OPC_VPBROADCASTB, OPC_VPBROADCASTW,
- OPC_VPBROADCASTD, OPC_VPBROADCASTQ,
- };
int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
- tcg_out_vex_modrm(s, dup_insn[vece] + vex_l, r, 0, a);
+ tcg_out_vex_modrm(s, avx2_dup_insn[vece] + vex_l, r, 0, a);
} else {
switch (vece) {
case MO_8:
g_assert_not_reached();
}
}
+ return true;
+}
+
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg r, TCGReg base, intptr_t offset)
+{
+ if (have_avx2) {
+ int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
+ tcg_out_vex_modrm_offset(s, avx2_dup_insn[vece] + vex_l,
+ r, 0, base, offset);
+ } else {
+ switch (vece) {
+ case MO_64:
+ tcg_out_vex_modrm_offset(s, OPC_VBROADCASTSD, r, 0, base, offset);
+ break;
+ case MO_32:
+ tcg_out_vex_modrm_offset(s, OPC_VBROADCASTSS, r, 0, base, offset);
+ break;
+ case MO_16:
+ tcg_out_vex_modrm_offset(s, OPC_VPINSRW, r, r, base, offset);
+ tcg_out8(s, 0); /* imm8 */
+ tcg_out_dup_vec(s, type, vece, r, r);
+ break;
+ case MO_8:
+ tcg_out_vex_modrm_offset(s, OPC_VPINSRB, r, r, base, offset);
+ tcg_out8(s, 0); /* imm8 */
+ tcg_out_dup_vec(s, type, vece, r, r);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ return true;
}
static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
} else if (have_avx2) {
tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret);
} else {
- tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret);
+ tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSD, ret);
}
new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
- } else if (have_avx2) {
- tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
- new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
} else {
- tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy, ret);
+ if (have_avx2) {
+ tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSD + vex_l, ret);
+ } else {
+ tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret);
+ }
new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
- tcg_out_dup_vec(s, type, MO_32, ret, ret);
}
}
break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
- case INDEX_op_mov_vec:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
- case INDEX_op_dupi_vec:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
default:
tcg_abort();
static int const umax_insn[4] = {
OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2
};
+ static int const shlv_insn[4] = {
+ /* TODO: AVX512 adds support for MO_16. */
+ OPC_UD2, OPC_UD2, OPC_VPSLLVD, OPC_VPSLLVQ
+ };
+ static int const shrv_insn[4] = {
+ /* TODO: AVX512 adds support for MO_16. */
+ OPC_UD2, OPC_UD2, OPC_VPSRLVD, OPC_VPSRLVQ
+ };
+ static int const sarv_insn[4] = {
+ /* TODO: AVX512 adds support for MO_16, MO_64. */
+ OPC_UD2, OPC_UD2, OPC_VPSRAVD, OPC_UD2
+ };
+ static int const shls_insn[4] = {
+ OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ
+ };
+ static int const shrs_insn[4] = {
+ OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ
+ };
+ static int const sars_insn[4] = {
+ OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_UD2
+ };
+ static int const abs_insn[4] = {
+ /* TODO: AVX512 adds support for MO_64. */
+ OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2
+ };
TCGType type = vecl + TCG_TYPE_V64;
int insn, sub;
case INDEX_op_umax_vec:
insn = umax_insn[vece];
goto gen_simd;
+ case INDEX_op_shlv_vec:
+ insn = shlv_insn[vece];
+ goto gen_simd;
+ case INDEX_op_shrv_vec:
+ insn = shrv_insn[vece];
+ goto gen_simd;
+ case INDEX_op_sarv_vec:
+ insn = sarv_insn[vece];
+ goto gen_simd;
+ case INDEX_op_shls_vec:
+ insn = shls_insn[vece];
+ goto gen_simd;
+ case INDEX_op_shrs_vec:
+ insn = shrs_insn[vece];
+ goto gen_simd;
+ case INDEX_op_sars_vec:
+ insn = sars_insn[vece];
+ goto gen_simd;
case INDEX_op_x86_punpckl_vec:
insn = punpckl_insn[vece];
goto gen_simd;
insn = OPC_PUNPCKLDQ;
goto gen_simd;
#endif
+ case INDEX_op_abs_vec:
+ insn = abs_insn[vece];
+ a2 = a1;
+ a1 = 0;
+ goto gen_simd;
gen_simd:
tcg_debug_assert(insn != OPC_UD2);
if (type == TCG_TYPE_V256) {
case INDEX_op_st_vec:
tcg_out_st(s, type, a0, a1, a2);
break;
- case INDEX_op_dup_vec:
- tcg_out_dup_vec(s, type, vece, a0, a1);
+ case INDEX_op_dupm_vec:
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
break;
case INDEX_op_x86_shufps_vec:
tcg_out8(s, a2);
break;
+ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
+ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
default:
g_assert_not_reached();
}
case INDEX_op_ld_vec:
case INDEX_op_st_vec:
+ case INDEX_op_dupm_vec:
return &x_r;
case INDEX_op_add_vec:
case INDEX_op_umin_vec:
case INDEX_op_smax_vec:
case INDEX_op_umax_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_shls_vec:
+ case INDEX_op_shrs_vec:
+ case INDEX_op_sars_vec:
case INDEX_op_cmp_vec:
case INDEX_op_x86_shufps_vec:
case INDEX_op_x86_blend_vec:
case INDEX_op_dup2_vec:
#endif
return &x_x_x;
+ case INDEX_op_abs_vec:
case INDEX_op_dup_vec:
case INDEX_op_shli_vec:
case INDEX_op_shri_vec:
}
return 1;
+ case INDEX_op_shls_vec:
+ case INDEX_op_shrs_vec:
+ return vece >= MO_16;
+ case INDEX_op_sars_vec:
+ return vece >= MO_16 && vece <= MO_32;
+
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ return have_avx2 && vece >= MO_32;
+ case INDEX_op_sarv_vec:
+ return have_avx2 && vece == MO_32;
+
case INDEX_op_mul_vec:
if (vece == MO_8) {
/* We can expand the operation for MO_8. */
case INDEX_op_umin_vec:
case INDEX_op_umax_vec:
return vece <= MO_32 ? 1 : -1;
+ case INDEX_op_abs_vec:
+ return vece <= MO_32;
default:
return 0;
tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa);
}
-static inline void tcg_out_mov(TCGContext *s, TCGType type,
+static inline bool tcg_out_mov(TCGContext *s, TCGType type,
TCGReg ret, TCGReg arg)
{
/* Simple reg-reg move, optimising out the 'do nothing' case */
if (ret != arg) {
tcg_out_opc_reg(s, OPC_OR, ret, arg, TCG_REG_ZERO);
}
+ return true;
}
static void tcg_out_movi(TCGContext *s, TCGType type,
} else if (opc == INDEX_op_sub_i64) {
neg_op = INDEX_op_neg_i64;
have_neg = TCG_TARGET_HAS_neg_i64;
- } else {
+ } else if (TCG_TARGET_HAS_neg_vec) {
+ TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
+ unsigned vece = TCGOP_VECE(op);
neg_op = INDEX_op_neg_vec;
- have_neg = TCG_TARGET_HAS_neg_vec;
+ have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
+ } else {
+ break;
}
if (!have_neg) {
break;
static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
TCGReg base, tcg_target_long offset);
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
if (ret != arg) {
tcg_out32(s, OR | SAB(arg, ret, arg));
}
+ return true;
}
static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
* TCG intrinsics
*/
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
if (ret == arg) {
- return;
+ return true;
}
switch (type) {
case TCG_TYPE_I32:
default:
g_assert_not_reached();
}
+ return true;
}
static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
}
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
{
if (src != dst) {
if (type == TCG_TYPE_I32) {
tcg_out_insn(s, RRE, LGR, dst, src);
}
}
+ return true;
}
static const S390Opcode lli_insns[4] = {
| (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
}
-static inline void tcg_out_mov(TCGContext *s, TCGType type,
+static inline bool tcg_out_mov(TCGContext *s, TCGType type,
TCGReg ret, TCGReg arg)
{
if (ret != arg) {
tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
}
+ return true;
}
static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
#define MAX_UNROLL 4
+#ifdef CONFIG_DEBUG_TCG
+static const TCGOpcode vecop_list_empty[1] = { 0 };
+#else
+#define vecop_list_empty NULL
+#endif
+
+
/* Verify vector size and alignment rules. OFS should be the OR of all
of the operand offsets so that we can check them all at once. */
static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs)
* on elements of size VECE in the selected type. Do not select V64 if
* PREFER_I64 is true. Return 0 if no vector type is selected.
*/
-static TCGType choose_vector_type(TCGOpcode op, unsigned vece, uint32_t size,
- bool prefer_i64)
+static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece,
+ uint32_t size, bool prefer_i64)
{
if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) {
- if (op == 0) {
- return TCG_TYPE_V256;
- }
- /* Recall that ARM SVE allows vector sizes that are not a
+ /*
+ * Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
* It is hard to imagine a case in which v256 is supported
* but v128 is not, but check anyway.
*/
- if (tcg_can_emit_vec_op(op, TCG_TYPE_V256, vece)
+ if (tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece)
&& (size % 32 == 0
- || tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
+ || tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) {
return TCG_TYPE_V256;
}
}
if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16)
- && (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
+ && tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece)) {
return TCG_TYPE_V128;
}
if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
- && (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V64, vece))) {
+ && tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)) {
return TCG_TYPE_V64;
}
return 0;
}
+static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
+ uint32_t maxsz, TCGv_vec t_vec)
+{
+ uint32_t i = 0;
+
+ switch (type) {
+ case TCG_TYPE_V256:
+ /*
+ * Recall that ARM SVE allows vector sizes that are not a
+ * power of 2, but always a multiple of 16. The intent is
+ * that e.g. size == 80 would be expanded with 2x32 + 1x16.
+ */
+ for (; i + 32 <= oprsz; i += 32) {
+ tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V256);
+ }
+ /* fallthru */
+ case TCG_TYPE_V128:
+ for (; i + 16 <= oprsz; i += 16) {
+ tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V128);
+ }
+ break;
+ case TCG_TYPE_V64:
+ for (; i < oprsz; i += 8) {
+ tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (oprsz < maxsz) {
+ expand_clr(dofs + oprsz, maxsz - oprsz);
+ }
+}
+
/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
* Only one of IN_32 or IN_64 may be set;
* IN_C is used if IN_32 and IN_64 are unset.
/* Implement inline with a vector type, if possible.
* Prefer integer when 64-bit host and no variable dup.
*/
- type = choose_vector_type(0, vece, oprsz,
+ type = choose_vector_type(NULL, vece, oprsz,
(TCG_TARGET_REG_BITS == 64 && in_32 == NULL
&& (in_64 == NULL || vece == MO_64)));
if (type != 0) {
} else if (in_64) {
tcg_gen_dup_i64_vec(vece, t_vec, in_64);
} else {
- switch (vece) {
- case MO_8:
- tcg_gen_dup8i_vec(t_vec, in_c);
- break;
- case MO_16:
- tcg_gen_dup16i_vec(t_vec, in_c);
- break;
- case MO_32:
- tcg_gen_dup32i_vec(t_vec, in_c);
- break;
- default:
- tcg_gen_dup64i_vec(t_vec, in_c);
- break;
- }
- }
-
- i = 0;
- switch (type) {
- case TCG_TYPE_V256:
- /* Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- for (; i + 32 <= oprsz; i += 32) {
- tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V256);
- }
- /* fallthru */
- case TCG_TYPE_V128:
- for (; i + 16 <= oprsz; i += 16) {
- tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V128);
- }
- break;
- case TCG_TYPE_V64:
- for (; i < oprsz; i += 8) {
- tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
- }
- break;
- default:
- g_assert_not_reached();
+ tcg_gen_dupi_vec(vece, t_vec, in_c);
}
-
+ do_dup_store(type, dofs, oprsz, maxsz, t_vec);
tcg_temp_free_vec(t_vec);
- goto done;
+ return;
}
/* Otherwise, inline with an integer type, unless "large". */
tcg_temp_free_i32(t0);
}
+static void expand_3i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, int32_t c, bool load_dest,
+ void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t))
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ uint32_t i;
+
+ for (i = 0; i < oprsz; i += 4) {
+ tcg_gen_ld_i32(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i32(t1, cpu_env, bofs + i);
+ if (load_dest) {
+ tcg_gen_ld_i32(t2, cpu_env, dofs + i);
+ }
+ fni(t2, t0, t1, c);
+ tcg_gen_st_i32(t2, cpu_env, dofs + i);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t cofs, uint32_t oprsz, bool write_aofs,
tcg_temp_free_i64(t0);
}
+static void expand_3i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, int64_t c, bool load_dest,
+ void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t))
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ uint32_t i;
+
+ for (i = 0; i < oprsz; i += 8) {
+ tcg_gen_ld_i64(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i64(t1, cpu_env, bofs + i);
+ if (load_dest) {
+ tcg_gen_ld_i64(t2, cpu_env, dofs + i);
+ }
+ fni(t2, t0, t1, c);
+ tcg_gen_st_i64(t2, cpu_env, dofs + i);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
/* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t cofs, uint32_t oprsz, bool write_aofs,
tcg_temp_free_vec(t0);
}
+/*
+ * Expand OPSZ bytes worth of three-vector operands and an immediate operand
+ * using host vectors.
+ */
+static void expand_3i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t tysz,
+ TCGType type, int64_t c, bool load_dest,
+ void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec,
+ int64_t))
+{
+ TCGv_vec t0 = tcg_temp_new_vec(type);
+ TCGv_vec t1 = tcg_temp_new_vec(type);
+ TCGv_vec t2 = tcg_temp_new_vec(type);
+ uint32_t i;
+
+ for (i = 0; i < oprsz; i += tysz) {
+ tcg_gen_ld_vec(t0, cpu_env, aofs + i);
+ tcg_gen_ld_vec(t1, cpu_env, bofs + i);
+ if (load_dest) {
+ tcg_gen_ld_vec(t2, cpu_env, dofs + i);
+ }
+ fni(vece, t2, t0, t1, c);
+ tcg_gen_st_vec(t2, cpu_env, dofs + i);
+ }
+ tcg_temp_free_vec(t0);
+ tcg_temp_free_vec(t1);
+ tcg_temp_free_vec(t2);
+}
+
/* Expand OPSZ bytes worth of four-operand operations using host vectors. */
static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t cofs, uint32_t oprsz,
void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
{
+ const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
TCGType type;
uint32_t some;
type = 0;
if (g->fniv) {
- type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
+ type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
}
switch (type) {
case TCG_TYPE_V256:
} else {
assert(g->fno != NULL);
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
- return;
+ oprsz = maxsz;
}
break;
default:
g_assert_not_reached();
}
+ tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
uint32_t maxsz, int64_t c, const GVecGen2i *g)
{
+ const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
TCGType type;
uint32_t some;
type = 0;
if (g->fniv) {
- type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
+ type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
}
switch (type) {
case TCG_TYPE_V256:
maxsz, c, g->fnoi);
tcg_temp_free_i64(tcg_c);
}
- return;
+ oprsz = maxsz;
}
break;
default:
g_assert_not_reached();
}
+ tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
type = 0;
if (g->fniv) {
- type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
+ type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
}
if (type != 0) {
+ const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
TCGv_vec t_vec = tcg_temp_new_vec(type);
uint32_t some;
g_assert_not_reached();
}
tcg_temp_free_vec(t_vec);
+ tcg_swap_vecop_list(hold_list);
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
TCGv_i64 t64 = tcg_temp_new_i64();
void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
{
+ const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
TCGType type;
uint32_t some;
type = 0;
if (g->fniv) {
- type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
+ type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
}
switch (type) {
case TCG_TYPE_V256:
assert(g->fno != NULL);
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
maxsz, g->data, g->fno);
- return;
+ oprsz = maxsz;
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ tcg_swap_vecop_list(hold_list);
+
+ if (oprsz < maxsz) {
+ expand_clr(dofs + oprsz, maxsz - oprsz);
+ }
+}
+
+/* Expand a vector operation with three vectors and an immediate. */
+void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
+ const GVecGen3i *g)
+{
+ const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
+ TCGType type;
+ uint32_t some;
+
+ check_size_align(oprsz, maxsz, dofs | aofs | bofs);
+ check_overlap_3(dofs, aofs, bofs, maxsz);
+
+ type = 0;
+ if (g->fniv) {
+ type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
+ }
+ switch (type) {
+ case TCG_TYPE_V256:
+ /*
+ * Recall that ARM SVE allows vector sizes that are not a
+ * power of 2, but always a multiple of 16. The intent is
+ * that e.g. size == 80 would be expanded with 2x32 + 1x16.
+ */
+ some = QEMU_ALIGN_DOWN(oprsz, 32);
+ expand_3i_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
+ c, g->load_dest, g->fniv);
+ if (some == oprsz) {
+ break;
+ }
+ dofs += some;
+ aofs += some;
+ bofs += some;
+ oprsz -= some;
+ maxsz -= some;
+ /* fallthru */
+ case TCG_TYPE_V128:
+ expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
+ c, g->load_dest, g->fniv);
+ break;
+ case TCG_TYPE_V64:
+ expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
+ c, g->load_dest, g->fniv);
+ break;
+
+ case 0:
+ if (g->fni8 && check_size_impl(oprsz, 8)) {
+ expand_3i_i64(dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni8);
+ } else if (g->fni4 && check_size_impl(oprsz, 4)) {
+ expand_3i_i32(dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni4);
+ } else {
+ assert(g->fno != NULL);
+ tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, c, g->fno);
+ oprsz = maxsz;
}
break;
default:
g_assert_not_reached();
}
+ tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g)
{
+ const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
TCGType type;
uint32_t some;
type = 0;
if (g->fniv) {
- type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
+ type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
}
switch (type) {
case TCG_TYPE_V256:
assert(g->fno != NULL);
tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
oprsz, maxsz, g->data, g->fno);
- return;
+ oprsz = maxsz;
}
break;
default:
g_assert_not_reached();
}
+ tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz)
{
+ if (vece <= MO_64) {
+ TCGType type = choose_vector_type(0, vece, oprsz, 0);
+ if (type != 0) {
+ TCGv_vec t_vec = tcg_temp_new_vec(type);
+ tcg_gen_dup_mem_vec(vece, t_vec, cpu_env, aofs);
+ do_dup_store(type, dofs, oprsz, maxsz, t_vec);
+ tcg_temp_free_vec(t_vec);
+ return;
+ }
+ }
if (vece <= MO_32) {
TCGv_i32 in = tcg_temp_new_i32();
switch (vece) {
tcg_temp_free_i64(t2);
}
+static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 };
+
void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
{ .fni8 = tcg_gen_vec_add8_i64,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_add8,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_add16_i64,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_add16,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.vece = MO_16 },
{ .fni4 = tcg_gen_add_i32,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_add32,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.vece = MO_32 },
{ .fni8 = tcg_gen_add_i64,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_add64,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
{ .fni8 = tcg_gen_vec_add8_i64,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_adds8,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_add16_i64,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_adds16,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.vece = MO_16 },
{ .fni4 = tcg_gen_add_i32,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_adds32,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.vece = MO_32 },
{ .fni8 = tcg_gen_add_i64,
.fniv = tcg_gen_add_vec,
.fno = gen_helper_gvec_adds64,
- .opc = INDEX_op_add_vec,
+ .opt_opc = vecop_list_add,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
tcg_temp_free_i64(tmp);
}
+static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 };
+
void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
{ .fni8 = tcg_gen_vec_sub8_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_subs8,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_sub16_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_subs16,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.vece = MO_16 },
{ .fni4 = tcg_gen_sub_i32,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_subs32,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.vece = MO_32 },
{ .fni8 = tcg_gen_sub_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_subs64,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
{ .fni8 = tcg_gen_vec_sub8_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_sub8,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_sub16_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_sub16,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.vece = MO_16 },
{ .fni4 = tcg_gen_sub_i32,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_sub32,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.vece = MO_32 },
{ .fni8 = tcg_gen_sub_i64,
.fniv = tcg_gen_sub_vec,
.fno = gen_helper_gvec_sub64,
- .opc = INDEX_op_sub_vec,
+ .opt_opc = vecop_list_sub,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
}
+static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 };
+
void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_mul8,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.vece = MO_8 },
{ .fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_mul16,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.vece = MO_16 },
{ .fni4 = tcg_gen_mul_i32,
.fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_mul32,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.vece = MO_32 },
{ .fni8 = tcg_gen_mul_i64,
.fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_mul64,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
static const GVecGen2s g[4] = {
{ .fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_muls8,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.vece = MO_8 },
{ .fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_muls16,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.vece = MO_16 },
{ .fni4 = tcg_gen_mul_i32,
.fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_muls32,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.vece = MO_32 },
{ .fni8 = tcg_gen_mul_i64,
.fniv = tcg_gen_mul_vec,
.fno = gen_helper_gvec_muls64,
- .opc = INDEX_op_mul_vec,
+ .opt_opc = vecop_list_mul,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_ssadd_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_ssadd_vec,
.fno = gen_helper_gvec_ssadd8,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_ssadd_vec,
.fno = gen_helper_gvec_ssadd16,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fniv = tcg_gen_ssadd_vec,
.fno = gen_helper_gvec_ssadd32,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fniv = tcg_gen_ssadd_vec,
.fno = gen_helper_gvec_ssadd64,
- .opc = INDEX_op_ssadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 },
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_sssub_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_sssub_vec,
.fno = gen_helper_gvec_sssub8,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_sssub_vec,
.fno = gen_helper_gvec_sssub16,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fniv = tcg_gen_sssub_vec,
.fno = gen_helper_gvec_sssub32,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fniv = tcg_gen_sssub_vec,
.fno = gen_helper_gvec_sssub64,
- .opc = INDEX_op_sssub_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 },
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_usadd_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_usadd_vec,
.fno = gen_helper_gvec_usadd8,
- .opc = INDEX_op_usadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_usadd_vec,
.fno = gen_helper_gvec_usadd16,
- .opc = INDEX_op_usadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_usadd_i32,
.fniv = tcg_gen_usadd_vec,
.fno = gen_helper_gvec_usadd32,
- .opc = INDEX_op_usadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_usadd_i64,
.fniv = tcg_gen_usadd_vec,
.fno = gen_helper_gvec_usadd64,
- .opc = INDEX_op_usadd_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 }
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_ussub_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_ussub_vec,
.fno = gen_helper_gvec_ussub8,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_ussub_vec,
.fno = gen_helper_gvec_ussub16,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_ussub_i32,
.fniv = tcg_gen_ussub_vec,
.fno = gen_helper_gvec_ussub32,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_ussub_i64,
.fniv = tcg_gen_ussub_vec,
.fno = gen_helper_gvec_ussub64,
- .opc = INDEX_op_ussub_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 }
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_smin_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_smin_vec,
.fno = gen_helper_gvec_smin8,
- .opc = INDEX_op_smin_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_smin_vec,
.fno = gen_helper_gvec_smin16,
- .opc = INDEX_op_smin_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_smin_i32,
.fniv = tcg_gen_smin_vec,
.fno = gen_helper_gvec_smin32,
- .opc = INDEX_op_smin_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_smin_i64,
.fniv = tcg_gen_smin_vec,
.fno = gen_helper_gvec_smin64,
- .opc = INDEX_op_smin_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 }
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_umin_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_umin_vec,
.fno = gen_helper_gvec_umin8,
- .opc = INDEX_op_umin_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_umin_vec,
.fno = gen_helper_gvec_umin16,
- .opc = INDEX_op_umin_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_umin_i32,
.fniv = tcg_gen_umin_vec,
.fno = gen_helper_gvec_umin32,
- .opc = INDEX_op_umin_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_umin_i64,
.fniv = tcg_gen_umin_vec,
.fno = gen_helper_gvec_umin64,
- .opc = INDEX_op_umin_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 }
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_smax_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_smax_vec,
.fno = gen_helper_gvec_smax8,
- .opc = INDEX_op_smax_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_smax_vec,
.fno = gen_helper_gvec_smax16,
- .opc = INDEX_op_smax_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_smax_i32,
.fniv = tcg_gen_smax_vec,
.fno = gen_helper_gvec_smax32,
- .opc = INDEX_op_smax_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_smax_i64,
.fniv = tcg_gen_smax_vec,
.fno = gen_helper_gvec_smax64,
- .opc = INDEX_op_smax_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 }
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_umax_vec, 0 };
static const GVecGen3 g[4] = {
{ .fniv = tcg_gen_umax_vec,
.fno = gen_helper_gvec_umax8,
- .opc = INDEX_op_umax_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fniv = tcg_gen_umax_vec,
.fno = gen_helper_gvec_umax16,
- .opc = INDEX_op_umax_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_umax_i32,
.fniv = tcg_gen_umax_vec,
.fno = gen_helper_gvec_umax32,
- .opc = INDEX_op_umax_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_umax_i64,
.fniv = tcg_gen_umax_vec,
.fno = gen_helper_gvec_umax64,
- .opc = INDEX_op_umax_vec,
+ .opt_opc = vecop_list,
.vece = MO_64 }
};
tcg_debug_assert(vece <= MO_64);
void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_neg_vec, 0 };
static const GVecGen2 g[4] = {
{ .fni8 = tcg_gen_vec_neg8_i64,
.fniv = tcg_gen_neg_vec,
.fno = gen_helper_gvec_neg8,
- .opc = INDEX_op_neg_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_neg16_i64,
.fniv = tcg_gen_neg_vec,
.fno = gen_helper_gvec_neg16,
- .opc = INDEX_op_neg_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_neg_i32,
.fniv = tcg_gen_neg_vec,
.fno = gen_helper_gvec_neg32,
- .opc = INDEX_op_neg_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_neg_i64,
.fniv = tcg_gen_neg_vec,
.fno = gen_helper_gvec_neg64,
- .opc = INDEX_op_neg_vec,
+ .opt_opc = vecop_list,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g[vece]);
+}
+
+static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ int nbit = 8 << vece;
+
+ /* Create -1 for each negative element. */
+ tcg_gen_shri_i64(t, b, nbit - 1);
+ tcg_gen_andi_i64(t, t, dup_const(vece, 1));
+ tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
+
+ /*
+ * Invert (via xor -1) and add one (via sub -1).
+ * Because of the ordering the msb is cleared,
+ * so we never have carry into the next element.
+ */
+ tcg_gen_xor_i64(d, b, t);
+ tcg_gen_sub_i64(d, d, t);
+
+ tcg_temp_free_i64(t);
+}
+
+static void tcg_gen_vec_abs8_i64(TCGv_i64 d, TCGv_i64 b)
+{
+ gen_absv_mask(d, b, MO_8);
+}
+
+static void tcg_gen_vec_abs16_i64(TCGv_i64 d, TCGv_i64 b)
+{
+ gen_absv_mask(d, b, MO_16);
+}
+
+void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_abs_vec, 0 };
+ static const GVecGen2 g[4] = {
+ { .fni8 = tcg_gen_vec_abs8_i64,
+ .fniv = tcg_gen_abs_vec,
+ .fno = gen_helper_gvec_abs8,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = tcg_gen_vec_abs16_i64,
+ .fniv = tcg_gen_abs_vec,
+ .fno = gen_helper_gvec_abs16,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = tcg_gen_abs_i32,
+ .fniv = tcg_gen_abs_vec,
+ .fno = gen_helper_gvec_abs32,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = tcg_gen_abs_i64,
+ .fniv = tcg_gen_abs_vec,
+ .fno = gen_helper_gvec_abs64,
+ .opt_opc = vecop_list,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
.fni8 = tcg_gen_and_i64,
.fniv = tcg_gen_and_vec,
.fno = gen_helper_gvec_and,
- .opc = INDEX_op_and_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
.fni8 = tcg_gen_or_i64,
.fniv = tcg_gen_or_vec,
.fno = gen_helper_gvec_or,
- .opc = INDEX_op_or_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
.fni8 = tcg_gen_xor_i64,
.fniv = tcg_gen_xor_vec,
.fno = gen_helper_gvec_xor,
- .opc = INDEX_op_xor_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
.fni8 = tcg_gen_andc_i64,
.fniv = tcg_gen_andc_vec,
.fno = gen_helper_gvec_andc,
- .opc = INDEX_op_andc_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
.fni8 = tcg_gen_orc_i64,
.fniv = tcg_gen_orc_vec,
.fno = gen_helper_gvec_orc,
- .opc = INDEX_op_orc_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
.fni8 = tcg_gen_and_i64,
.fniv = tcg_gen_and_vec,
.fno = gen_helper_gvec_ands,
- .opc = INDEX_op_and_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64
};
.fni8 = tcg_gen_xor_i64,
.fniv = tcg_gen_xor_vec,
.fno = gen_helper_gvec_xors,
- .opc = INDEX_op_xor_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64
};
.fni8 = tcg_gen_or_i64,
.fniv = tcg_gen_or_vec,
.fno = gen_helper_gvec_ors,
- .opc = INDEX_op_or_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64
};
void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
static const GVecGen2i g[4] = {
{ .fni8 = tcg_gen_vec_shl8i_i64,
.fniv = tcg_gen_shli_vec,
.fno = gen_helper_gvec_shl8i,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_shl16i_i64,
.fniv = tcg_gen_shli_vec,
.fno = gen_helper_gvec_shl16i,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_shli_i32,
.fniv = tcg_gen_shli_vec,
.fno = gen_helper_gvec_shl32i,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_shli_i64,
.fniv = tcg_gen_shli_vec,
.fno = gen_helper_gvec_shl64i,
- .opc = INDEX_op_shli_vec,
+ .opt_opc = vecop_list,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
static const GVecGen2i g[4] = {
{ .fni8 = tcg_gen_vec_shr8i_i64,
.fniv = tcg_gen_shri_vec,
.fno = gen_helper_gvec_shr8i,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_shr16i_i64,
.fniv = tcg_gen_shri_vec,
.fno = gen_helper_gvec_shr16i,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_shri_i32,
.fniv = tcg_gen_shri_vec,
.fno = gen_helper_gvec_shr32i,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_shri_i64,
.fniv = tcg_gen_shri_vec,
.fno = gen_helper_gvec_shr64i,
- .opc = INDEX_op_shri_vec,
+ .opt_opc = vecop_list,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode vecop_list[] = { INDEX_op_sari_vec, 0 };
static const GVecGen2i g[4] = {
{ .fni8 = tcg_gen_vec_sar8i_i64,
.fniv = tcg_gen_sari_vec,
.fno = gen_helper_gvec_sar8i,
- .opc = INDEX_op_sari_vec,
+ .opt_opc = vecop_list,
.vece = MO_8 },
{ .fni8 = tcg_gen_vec_sar16i_i64,
.fniv = tcg_gen_sari_vec,
.fno = gen_helper_gvec_sar16i,
- .opc = INDEX_op_sari_vec,
+ .opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = tcg_gen_sari_i32,
.fniv = tcg_gen_sari_vec,
.fno = gen_helper_gvec_sar32i,
- .opc = INDEX_op_sari_vec,
+ .opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = tcg_gen_sari_i64,
.fniv = tcg_gen_sari_vec,
.fno = gen_helper_gvec_sar64i,
- .opc = INDEX_op_sari_vec,
+ .opt_opc = vecop_list,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.vece = MO_64 },
};
}
}
+/*
+ * Specialized generation vector shifts by a non-constant scalar.
+ */
+
+typedef struct {
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
+ void (*fniv_s)(unsigned, TCGv_vec, TCGv_vec, TCGv_i32);
+ void (*fniv_v)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
+ gen_helper_gvec_2 *fno[4];
+ TCGOpcode s_list[2];
+ TCGOpcode v_list[2];
+} GVecGen2sh;
+
+static void expand_2sh_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t tysz, TCGType type,
+ TCGv_i32 shift,
+ void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_i32))
+{
+ TCGv_vec t0 = tcg_temp_new_vec(type);
+ uint32_t i;
+
+ for (i = 0; i < oprsz; i += tysz) {
+ tcg_gen_ld_vec(t0, cpu_env, aofs + i);
+ fni(vece, t0, t0, shift);
+ tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ }
+ tcg_temp_free_vec(t0);
+}
+
+static void
+do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2sh *g)
+{
+ TCGType type;
+ uint32_t some;
+
+ check_size_align(oprsz, maxsz, dofs | aofs);
+ check_overlap_2(dofs, aofs, maxsz);
+
+ /* If the backend has a scalar expansion, great. */
+ type = choose_vector_type(g->s_list, vece, oprsz, vece == MO_64);
+ if (type) {
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
+ switch (type) {
+ case TCG_TYPE_V256:
+ some = QEMU_ALIGN_DOWN(oprsz, 32);
+ expand_2sh_vec(vece, dofs, aofs, some, 32,
+ TCG_TYPE_V256, shift, g->fniv_s);
+ if (some == oprsz) {
+ break;
+ }
+ dofs += some;
+ aofs += some;
+ oprsz -= some;
+ maxsz -= some;
+ /* fallthru */
+ case TCG_TYPE_V128:
+ expand_2sh_vec(vece, dofs, aofs, oprsz, 16,
+ TCG_TYPE_V128, shift, g->fniv_s);
+ break;
+ case TCG_TYPE_V64:
+ expand_2sh_vec(vece, dofs, aofs, oprsz, 8,
+ TCG_TYPE_V64, shift, g->fniv_s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_swap_vecop_list(hold_list);
+ goto clear_tail;
+ }
+
+ /* If the backend supports variable vector shifts, also cool. */
+ type = choose_vector_type(g->v_list, vece, oprsz, vece == MO_64);
+ if (type) {
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
+ TCGv_vec v_shift = tcg_temp_new_vec(type);
+
+ if (vece == MO_64) {
+ TCGv_i64 sh64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(sh64, shift);
+ tcg_gen_dup_i64_vec(MO_64, v_shift, sh64);
+ tcg_temp_free_i64(sh64);
+ } else {
+ tcg_gen_dup_i32_vec(vece, v_shift, shift);
+ }
+
+ switch (type) {
+ case TCG_TYPE_V256:
+ some = QEMU_ALIGN_DOWN(oprsz, 32);
+ expand_2s_vec(vece, dofs, aofs, some, 32, TCG_TYPE_V256,
+ v_shift, false, g->fniv_v);
+ if (some == oprsz) {
+ break;
+ }
+ dofs += some;
+ aofs += some;
+ oprsz -= some;
+ maxsz -= some;
+ /* fallthru */
+ case TCG_TYPE_V128:
+ expand_2s_vec(vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
+ v_shift, false, g->fniv_v);
+ break;
+ case TCG_TYPE_V64:
+ expand_2s_vec(vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
+ v_shift, false, g->fniv_v);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_vec(v_shift);
+ tcg_swap_vecop_list(hold_list);
+ goto clear_tail;
+ }
+
+ /* Otherwise fall back to integral... */
+ if (vece == MO_32 && check_size_impl(oprsz, 4)) {
+ expand_2s_i32(dofs, aofs, oprsz, shift, false, g->fni4);
+ } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
+ TCGv_i64 sh64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(sh64, shift);
+ expand_2s_i64(dofs, aofs, oprsz, sh64, false, g->fni8);
+ tcg_temp_free_i64(sh64);
+ } else {
+ TCGv_ptr a0 = tcg_temp_new_ptr();
+ TCGv_ptr a1 = tcg_temp_new_ptr();
+ TCGv_i32 desc = tcg_temp_new_i32();
+
+ tcg_gen_shli_i32(desc, shift, SIMD_DATA_SHIFT);
+ tcg_gen_ori_i32(desc, desc, simd_desc(oprsz, maxsz, 0));
+ tcg_gen_addi_ptr(a0, cpu_env, dofs);
+ tcg_gen_addi_ptr(a1, cpu_env, aofs);
+
+ g->fno[vece](a0, a1, desc);
+
+ tcg_temp_free_ptr(a0);
+ tcg_temp_free_ptr(a1);
+ tcg_temp_free_i32(desc);
+ return;
+ }
+
+ clear_tail:
+ if (oprsz < maxsz) {
+ expand_clr(dofs + oprsz, maxsz - oprsz);
+ }
+}
+
+void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen2sh g = {
+ .fni4 = tcg_gen_shl_i32,
+ .fni8 = tcg_gen_shl_i64,
+ .fniv_s = tcg_gen_shls_vec,
+ .fniv_v = tcg_gen_shlv_vec,
+ .fno = {
+ gen_helper_gvec_shl8i,
+ gen_helper_gvec_shl16i,
+ gen_helper_gvec_shl32i,
+ gen_helper_gvec_shl64i,
+ },
+ .s_list = { INDEX_op_shls_vec, 0 },
+ .v_list = { INDEX_op_shlv_vec, 0 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
+}
+
+void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen2sh g = {
+ .fni4 = tcg_gen_shr_i32,
+ .fni8 = tcg_gen_shr_i64,
+ .fniv_s = tcg_gen_shrs_vec,
+ .fniv_v = tcg_gen_shrv_vec,
+ .fno = {
+ gen_helper_gvec_shr8i,
+ gen_helper_gvec_shr16i,
+ gen_helper_gvec_shr32i,
+ gen_helper_gvec_shr64i,
+ },
+ .s_list = { INDEX_op_shrs_vec, 0 },
+ .v_list = { INDEX_op_shrv_vec, 0 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
+}
+
+void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen2sh g = {
+ .fni4 = tcg_gen_sar_i32,
+ .fni8 = tcg_gen_sar_i64,
+ .fniv_s = tcg_gen_sars_vec,
+ .fniv_v = tcg_gen_sarv_vec,
+ .fno = {
+ gen_helper_gvec_sar8i,
+ gen_helper_gvec_sar16i,
+ gen_helper_gvec_sar32i,
+ gen_helper_gvec_sar64i,
+ },
+ .s_list = { INDEX_op_sars_vec, 0 },
+ .v_list = { INDEX_op_sarv_vec, 0 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
+}
+
+/*
+ * Expand D = A << (B % element bits)
+ *
+ * Unlike scalar shifts, where it is easy for the target front end
+ * to include the modulo as part of the expansion. If the target
+ * naturally includes the modulo as part of the operation, great!
+ * If the target has some other behaviour from out-of-range shifts,
+ * then it could not use this function anyway, and would need to
+ * do it's own expansion with custom functions.
+ */
+static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_and_vec(vece, t, t, b);
+ tcg_gen_shlv_vec(vece, d, a, t);
+ tcg_temp_free_vec(t);
+}
+
+static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(t, b, 31);
+ tcg_gen_shl_i32(d, a, t);
+ tcg_temp_free_i32(t);
+}
+
+static void tcg_gen_shl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(t, b, 63);
+ tcg_gen_shl_i64(d, a, t);
+ tcg_temp_free_i64(t);
+}
+
+void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_shlv_vec, 0 };
+ static const GVecGen3 g[4] = {
+ { .fniv = tcg_gen_shlv_mod_vec,
+ .fno = gen_helper_gvec_shl8v,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = tcg_gen_shlv_mod_vec,
+ .fno = gen_helper_gvec_shl16v,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = tcg_gen_shl_mod_i32,
+ .fniv = tcg_gen_shlv_mod_vec,
+ .fno = gen_helper_gvec_shl32v,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = tcg_gen_shl_mod_i64,
+ .fniv = tcg_gen_shlv_mod_vec,
+ .fno = gen_helper_gvec_shl64v,
+ .opt_opc = vecop_list,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+}
+
+/*
+ * Similarly for logical right shifts.
+ */
+
+static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_and_vec(vece, t, t, b);
+ tcg_gen_shrv_vec(vece, d, a, t);
+ tcg_temp_free_vec(t);
+}
+
+static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(t, b, 31);
+ tcg_gen_shr_i32(d, a, t);
+ tcg_temp_free_i32(t);
+}
+
+static void tcg_gen_shr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(t, b, 63);
+ tcg_gen_shr_i64(d, a, t);
+ tcg_temp_free_i64(t);
+}
+
+void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_shrv_vec, 0 };
+ static const GVecGen3 g[4] = {
+ { .fniv = tcg_gen_shrv_mod_vec,
+ .fno = gen_helper_gvec_shr8v,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = tcg_gen_shrv_mod_vec,
+ .fno = gen_helper_gvec_shr16v,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = tcg_gen_shr_mod_i32,
+ .fniv = tcg_gen_shrv_mod_vec,
+ .fno = gen_helper_gvec_shr32v,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = tcg_gen_shr_mod_i64,
+ .fniv = tcg_gen_shrv_mod_vec,
+ .fno = gen_helper_gvec_shr64v,
+ .opt_opc = vecop_list,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+}
+
+/*
+ * Similarly for arithmetic right shifts.
+ */
+
+static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_and_vec(vece, t, t, b);
+ tcg_gen_sarv_vec(vece, d, a, t);
+ tcg_temp_free_vec(t);
+}
+
+static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(t, b, 31);
+ tcg_gen_sar_i32(d, a, t);
+ tcg_temp_free_i32(t);
+}
+
+static void tcg_gen_sar_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(t, b, 63);
+ tcg_gen_sar_i64(d, a, t);
+ tcg_temp_free_i64(t);
+}
+
+void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_sarv_vec, 0 };
+ static const GVecGen3 g[4] = {
+ { .fniv = tcg_gen_sarv_mod_vec,
+ .fno = gen_helper_gvec_sar8v,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = tcg_gen_sarv_mod_vec,
+ .fno = gen_helper_gvec_sar16v,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = tcg_gen_sar_mod_i32,
+ .fniv = tcg_gen_sarv_mod_vec,
+ .fno = gen_helper_gvec_sar32v,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = tcg_gen_sar_mod_i64,
+ .fniv = tcg_gen_sarv_mod_vec,
+ .fno = gen_helper_gvec_sar64v,
+ .opt_opc = vecop_list,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+}
+
/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, TCGCond cond)
uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz)
{
+ static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
static gen_helper_gvec_3 * const eq_fn[4] = {
gen_helper_gvec_eq8, gen_helper_gvec_eq16,
gen_helper_gvec_eq32, gen_helper_gvec_eq64
[TCG_COND_LTU] = ltu_fn,
[TCG_COND_LEU] = leu_fn,
};
+
+ const TCGOpcode *hold_list;
TCGType type;
uint32_t some;
return;
}
- /* Implement inline with a vector type, if possible.
+ /*
+ * Implement inline with a vector type, if possible.
* Prefer integer when 64-bit host and 64-bit comparison.
*/
- type = choose_vector_type(INDEX_op_cmp_vec, vece, oprsz,
+ hold_list = tcg_swap_vecop_list(cmp_list);
+ type = choose_vector_type(cmp_list, vece, oprsz,
TCG_TARGET_REG_BITS == 64 && vece == MO_64);
switch (type) {
case TCG_TYPE_V256:
assert(fn != NULL);
}
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]);
- return;
+ oprsz = maxsz;
}
break;
default:
g_assert_not_reached();
}
+ tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
void (*fniv)(unsigned, TCGv_vec, TCGv_vec);
/* Expand out-of-line helper w/descriptor. */
gen_helper_gvec_2 *fno;
- /* The opcode, if any, to which this corresponds. */
- TCGOpcode opc;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
/* The data argument to the out-of-line helper. */
int32_t data;
/* The vector element size, if applicable. */
gen_helper_gvec_2 *fno;
/* Expand out-of-line helper w/descriptor, data as argument. */
gen_helper_gvec_2i *fnoi;
- /* The opcode, if any, to which this corresponds. */
- TCGOpcode opc;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
/* The vector element size, if applicable. */
uint8_t vece;
/* Prefer i64 to v64. */
void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
/* Expand out-of-line helper w/descriptor. */
gen_helper_gvec_2i *fno;
- /* The opcode, if any, to which this corresponds. */
- TCGOpcode opc;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
/* The data argument to the out-of-line helper. */
uint32_t data;
/* The vector element size, if applicable. */
void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
/* Expand out-of-line helper w/descriptor. */
gen_helper_gvec_3 *fno;
- /* The opcode, if any, to which this corresponds. */
- TCGOpcode opc;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
/* The data argument to the out-of-line helper. */
int32_t data;
/* The vector element size, if applicable. */
bool load_dest;
} GVecGen3;
+typedef struct {
+ /*
+ * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
+ * non-NULL.
+ */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
+ gen_helper_gvec_3 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 3rd source operand. */
+ bool load_dest;
+} GVecGen3i;
+
typedef struct {
/* Expand inline as a 64-bit or 32-bit integer.
Only one of these will be non-NULL. */
void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec);
/* Expand out-of-line helper w/descriptor. */
gen_helper_gvec_4 *fno;
- /* The opcode, if any, to which this corresponds. */
- TCGOpcode opc;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
/* The data argument to the out-of-line helper. */
int32_t data;
/* The vector element size, if applicable. */
uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
+void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
+ const GVecGen3i *);
void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+
+/*
+ * Perform vector shift by vector element, modulo the element size.
+ * E.g. D[i] = A[i] << (B[i] % (8 << vece)).
+ */
+void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz);
#define TCGV_HIGH TCGV_HIGH_link_error
#endif
+/*
+ * Vector optional opcode tracking.
+ * Except for the basic logical operations (and, or, xor), and
+ * data movement (mov, ld, st, dupi), many vector opcodes are
+ * optional and may not be supported on the host. Thank Intel
+ * for the irregularity in their instruction set.
+ *
+ * The gvec expanders allow custom vector operations to be composed,
+ * generally via the .fniv callback in the GVecGen* structures. At
+ * the same time, in deciding whether to use this hook we need to
+ * know if the host supports the required operations. This is
+ * presented as an array of opcodes, terminated by 0. Each opcode
+ * is assumed to be expanded with the given VECE.
+ *
+ * For debugging, we want to validate this array. Therefore, when
+ * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders
+ * will validate that their opcode is present in the list.
+ */
+#ifdef CONFIG_DEBUG_TCG
+void tcg_assert_listed_vecop(TCGOpcode op)
+{
+ const TCGOpcode *p = tcg_ctx->vecop_list;
+ if (p) {
+ for (; *p; ++p) {
+ if (*p == op) {
+ return;
+ }
+ }
+ g_assert_not_reached();
+ }
+}
+#endif
+
+bool tcg_can_emit_vecop_list(const TCGOpcode *list,
+ TCGType type, unsigned vece)
+{
+ if (list == NULL) {
+ return true;
+ }
+
+ for (; *list; ++list) {
+ TCGOpcode opc = *list;
+
+#ifdef CONFIG_DEBUG_TCG
+ switch (opc) {
+ case INDEX_op_and_vec:
+ case INDEX_op_or_vec:
+ case INDEX_op_xor_vec:
+ case INDEX_op_mov_vec:
+ case INDEX_op_dup_vec:
+ case INDEX_op_dupi_vec:
+ case INDEX_op_dup2_vec:
+ case INDEX_op_ld_vec:
+ case INDEX_op_st_vec:
+ /* These opcodes are mandatory and should not be listed. */
+ g_assert_not_reached();
+ default:
+ break;
+ }
+#endif
+
+ if (tcg_can_emit_vec_op(opc, type, vece)) {
+ continue;
+ }
+
+ /*
+ * The opcode list is created by front ends based on what they
+ * actually invoke. We must mirror the logic in the routines
+ * below for generic expansions using other opcodes.
+ */
+ switch (opc) {
+ case INDEX_op_neg_vec:
+ if (tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece)) {
+ continue;
+ }
+ break;
+ case INDEX_op_abs_vec:
+ if (tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece)
+ && (tcg_can_emit_vec_op(INDEX_op_smax_vec, type, vece) > 0
+ || tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0
+ || tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece))) {
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+ }
+ return true;
+}
+
void vec_gen_2(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a)
{
TCGOp *op = tcg_emit_op(opc);
vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
}
+void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec r, TCGv_ptr b,
+ tcg_target_long ofs)
+{
+ TCGArg ri = tcgv_vec_arg(r);
+ TCGArg bi = tcgv_ptr_arg(b);
+ TCGTemp *rt = arg_temp(ri);
+ TCGType type = rt->base_type;
+
+ vec_gen_3(INDEX_op_dupm_vec, type, vece, ri, bi, ofs);
+}
+
static void vec_gen_ldst(TCGOpcode opc, TCGv_vec r, TCGv_ptr b, TCGArg o)
{
TCGArg ri = tcgv_vec_arg(r);
vec_gen_3(INDEX_op_st_vec, low_type, 0, ri, bi, o);
}
-void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
-{
- vec_gen_op3(INDEX_op_add_vec, vece, r, a, b);
-}
-
-void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
-{
- vec_gen_op3(INDEX_op_sub_vec, vece, r, a, b);
-}
-
void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
{
vec_gen_op3(INDEX_op_and_vec, 0, r, a, b);
tcg_gen_not_vec(0, r, r);
}
-void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
+static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
{
- if (TCG_TARGET_HAS_not_vec) {
- vec_gen_op2(INDEX_op_not_vec, 0, r, a);
+ TCGTemp *rt = tcgv_vec_temp(r);
+ TCGTemp *at = tcgv_vec_temp(a);
+ TCGArg ri = temp_arg(rt);
+ TCGArg ai = temp_arg(at);
+ TCGType type = rt->base_type;
+ int can;
+
+ tcg_debug_assert(at->base_type >= type);
+ tcg_assert_listed_vecop(opc);
+ can = tcg_can_emit_vec_op(opc, type, vece);
+ if (can > 0) {
+ vec_gen_2(opc, type, vece, ri, ai);
+ } else if (can < 0) {
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
+ tcg_expand_vec_op(opc, type, vece, ri, ai);
+ tcg_swap_vecop_list(hold_list);
} else {
+ return false;
+ }
+ return true;
+}
+
+void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
+{
+ if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
TCGv_vec t = tcg_const_ones_vec_matching(r);
tcg_gen_xor_vec(0, r, a, t);
tcg_temp_free_vec(t);
void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
{
- if (TCG_TARGET_HAS_neg_vec) {
- vec_gen_op2(INDEX_op_neg_vec, vece, r, a);
- } else {
+ const TCGOpcode *hold_list;
+
+ tcg_assert_listed_vecop(INDEX_op_neg_vec);
+ hold_list = tcg_swap_vecop_list(NULL);
+
+ if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) {
TCGv_vec t = tcg_const_zeros_vec_matching(r);
tcg_gen_sub_vec(vece, r, t, a);
tcg_temp_free_vec(t);
}
+ tcg_swap_vecop_list(hold_list);
+}
+
+void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
+{
+ const TCGOpcode *hold_list;
+
+ tcg_assert_listed_vecop(INDEX_op_abs_vec);
+ hold_list = tcg_swap_vecop_list(NULL);
+
+ if (!do_op2(vece, r, a, INDEX_op_abs_vec)) {
+ TCGType type = tcgv_vec_temp(r)->base_type;
+ TCGv_vec t = tcg_temp_new_vec(type);
+
+ tcg_debug_assert(tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece));
+ if (tcg_can_emit_vec_op(INDEX_op_smax_vec, type, vece) > 0) {
+ tcg_gen_neg_vec(vece, t, a);
+ tcg_gen_smax_vec(vece, r, a, t);
+ } else {
+ if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) {
+ tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1);
+ } else {
+ do_dupi_vec(t, MO_REG, 0);
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a, t);
+ }
+ tcg_gen_xor_vec(vece, r, a, t);
+ tcg_gen_sub_vec(vece, r, r, t);
+ }
+
+ tcg_temp_free_vec(t);
+ }
+ tcg_swap_vecop_list(hold_list);
}
static void do_shifti(TCGOpcode opc, unsigned vece,
tcg_debug_assert(at->base_type == type);
tcg_debug_assert(i >= 0 && i < (8 << vece));
+ tcg_assert_listed_vecop(opc);
if (i == 0) {
tcg_gen_mov_vec(r, a);
/* We leave the choice of expansion via scalar or vector shift
to the target. Often, but not always, dupi can feed a vector
shift easier than a scalar. */
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
tcg_debug_assert(can < 0);
tcg_expand_vec_op(opc, type, vece, ri, ai, i);
+ tcg_swap_vecop_list(hold_list);
}
}
tcg_debug_assert(at->base_type >= type);
tcg_debug_assert(bt->base_type >= type);
+ tcg_assert_listed_vecop(INDEX_op_cmp_vec);
can = tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece);
if (can > 0) {
vec_gen_4(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
} else {
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
tcg_debug_assert(can < 0);
tcg_expand_vec_op(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
+ tcg_swap_vecop_list(hold_list);
}
}
tcg_debug_assert(at->base_type >= type);
tcg_debug_assert(bt->base_type >= type);
+ tcg_assert_listed_vecop(opc);
can = tcg_can_emit_vec_op(opc, type, vece);
if (can > 0) {
vec_gen_3(opc, type, vece, ri, ai, bi);
} else {
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
tcg_debug_assert(can < 0);
tcg_expand_vec_op(opc, type, vece, ri, ai, bi);
+ tcg_swap_vecop_list(hold_list);
}
}
+void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+ do_op3(vece, r, a, b, INDEX_op_add_vec);
+}
+
+void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+ do_op3(vece, r, a, b, INDEX_op_sub_vec);
+}
+
void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
{
do_op3(vece, r, a, b, INDEX_op_mul_vec);
{
do_op3(vece, r, a, b, INDEX_op_umax_vec);
}
+
+void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+ do_op3(vece, r, a, b, INDEX_op_shlv_vec);
+}
+
+void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+ do_op3(vece, r, a, b, INDEX_op_shrv_vec);
+}
+
+void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+ do_op3(vece, r, a, b, INDEX_op_sarv_vec);
+}
+
+static void do_shifts(unsigned vece, TCGv_vec r, TCGv_vec a,
+ TCGv_i32 s, TCGOpcode opc_s, TCGOpcode opc_v)
+{
+ TCGTemp *rt = tcgv_vec_temp(r);
+ TCGTemp *at = tcgv_vec_temp(a);
+ TCGTemp *st = tcgv_i32_temp(s);
+ TCGArg ri = temp_arg(rt);
+ TCGArg ai = temp_arg(at);
+ TCGArg si = temp_arg(st);
+ TCGType type = rt->base_type;
+ const TCGOpcode *hold_list;
+ int can;
+
+ tcg_debug_assert(at->base_type >= type);
+ tcg_assert_listed_vecop(opc_s);
+ hold_list = tcg_swap_vecop_list(NULL);
+
+ can = tcg_can_emit_vec_op(opc_s, type, vece);
+ if (can > 0) {
+ vec_gen_3(opc_s, type, vece, ri, ai, si);
+ } else if (can < 0) {
+ tcg_expand_vec_op(opc_s, type, vece, ri, ai, si);
+ } else {
+ TCGv_vec vec_s = tcg_temp_new_vec(type);
+
+ if (vece == MO_64) {
+ TCGv_i64 s64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(s64, s);
+ tcg_gen_dup_i64_vec(MO_64, vec_s, s64);
+ tcg_temp_free_i64(s64);
+ } else {
+ tcg_gen_dup_i32_vec(vece, vec_s, s);
+ }
+ do_op3(vece, r, a, vec_s, opc_v);
+ tcg_temp_free_vec(vec_s);
+ }
+ tcg_swap_vecop_list(hold_list);
+}
+
+void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
+{
+ do_shifts(vece, r, a, b, INDEX_op_shls_vec, INDEX_op_shlv_vec);
+}
+
+void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
+{
+ do_shifts(vece, r, a, b, INDEX_op_shrs_vec, INDEX_op_shrv_vec);
+}
+
+void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
+{
+ do_shifts(vece, r, a, b, INDEX_op_sars_vec, INDEX_op_sarv_vec);
+}
tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a);
}
+void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_sari_i32(t, a, 31);
+ tcg_gen_xor_i32(ret, a, t);
+ tcg_gen_sub_i32(ret, ret, t);
+ tcg_temp_free_i32(t);
+}
+
/* 64-bit ops */
#if TCG_TARGET_REG_BITS == 32
tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a);
}
+void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_sari_i64(t, a, 63);
+ tcg_gen_xor_i64(ret, a, t);
+ tcg_gen_sub_i64(ret, ret, t);
+ tcg_temp_free_i64(t);
+}
+
/* Size changing operations. */
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
static inline void tcg_gen_discard_i32(TCGv_i32 arg)
{
void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
#if TCG_TARGET_REG_BITS == 64
static inline void tcg_gen_discard_i64(TCGv_i64 arg)
void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64);
+void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long);
void tcg_gen_dup8i_vec(TCGv_vec, uint32_t);
void tcg_gen_dup16i_vec(TCGv_vec, uint32_t);
void tcg_gen_dup32i_vec(TCGv_vec, uint32_t);
void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
+void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+
+void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+
void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
TCGv_vec a, TCGv_vec b);
#define tcg_gen_addi_tl tcg_gen_addi_i64
#define tcg_gen_sub_tl tcg_gen_sub_i64
#define tcg_gen_neg_tl tcg_gen_neg_i64
+#define tcg_gen_abs_tl tcg_gen_abs_i64
#define tcg_gen_subfi_tl tcg_gen_subfi_i64
#define tcg_gen_subi_tl tcg_gen_subi_i64
#define tcg_gen_and_tl tcg_gen_and_i64
#define tcg_gen_addi_tl tcg_gen_addi_i32
#define tcg_gen_sub_tl tcg_gen_sub_i32
#define tcg_gen_neg_tl tcg_gen_neg_i32
+#define tcg_gen_abs_tl tcg_gen_abs_i32
#define tcg_gen_subfi_tl tcg_gen_subfi_i32
#define tcg_gen_subi_tl tcg_gen_subi_i32
#define tcg_gen_and_tl tcg_gen_and_i32
DEF(ld_vec, 1, 1, 1, IMPLVEC)
DEF(st_vec, 0, 2, 1, IMPLVEC)
+DEF(dupm_vec, 1, 1, 1, IMPLVEC)
DEF(add_vec, 1, 2, 0, IMPLVEC)
DEF(sub_vec, 1, 2, 0, IMPLVEC)
DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec))
DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec))
+DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec))
DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
const char *ct_str, TCGType type);
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
intptr_t arg2);
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg);
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
const int *const_args);
#if TCG_TARGET_MAYBE_vec
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg src);
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg base, intptr_t offset);
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
+ TCGReg dst, tcg_target_long arg);
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
unsigned vece, const TCGArg *args,
const int *const_args);
#else
+static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg src)
+{
+ g_assert_not_reached();
+}
+static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg base, intptr_t offset)
+{
+ g_assert_not_reached();
+}
+static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type,
+ TCGReg dst, tcg_target_long arg)
+{
+ g_assert_not_reached();
+}
static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
unsigned vece, const TCGArg *args,
const int *const_args)
case INDEX_op_mov_vec:
case INDEX_op_dup_vec:
case INDEX_op_dupi_vec:
+ case INDEX_op_dupm_vec:
case INDEX_op_ld_vec:
case INDEX_op_st_vec:
case INDEX_op_add_vec:
return have_vec && TCG_TARGET_HAS_not_vec;
case INDEX_op_neg_vec:
return have_vec && TCG_TARGET_HAS_neg_vec;
+ case INDEX_op_abs_vec:
+ return have_vec && TCG_TARGET_HAS_abs_vec;
case INDEX_op_andc_vec:
return have_vec && TCG_TARGET_HAS_andc_vec;
case INDEX_op_orc_vec:
save_globals(s, allocated_regs);
}
+/*
+ * Specialized code generation for INDEX_op_movi_*.
+ */
static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
tcg_target_ulong val, TCGLifeData arg_life,
TCGRegSet preferred_regs)
{
- if (ots->fixed_reg) {
- /* For fixed registers, we do not do any constant propagation. */
- tcg_out_movi(s, ots->type, ots->reg, val);
- return;
- }
+ /* ENV should not be modified. */
+ tcg_debug_assert(!ots->fixed_reg);
/* The movi is not explicitly generated here. */
if (ots->val_type == TEMP_VAL_REG) {
tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]);
}
+/*
+ * Specialized code generation for INDEX_op_mov_*.
+ */
static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
{
const TCGLifeData arg_life = op->life;
ots = arg_temp(op->args[0]);
ts = arg_temp(op->args[1]);
+ /* ENV should not be modified. */
+ tcg_debug_assert(!ots->fixed_reg);
+
/* Note that otype != itype for no-op truncation. */
otype = ots->type;
itype = ts->type;
}
tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
- if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
+ if (IS_DEAD_ARG(0)) {
/* mov to a non-saved dead register makes no sense (even with
liveness analysis disabled). */
tcg_debug_assert(NEED_SYNC_ARG(0));
}
temp_dead(s, ots);
} else {
- if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
+ if (IS_DEAD_ARG(1) && !ts->fixed_reg) {
/* the mov can be suppressed */
if (ots->val_type == TEMP_VAL_REG) {
s->reg_to_temp[ots->reg] = NULL;
allocated_regs, preferred_regs,
ots->indirect_base);
}
- tcg_out_mov(s, otype, ots->reg, ts->reg);
+ if (!tcg_out_mov(s, otype, ots->reg, ts->reg)) {
+ /*
+ * Cross register class move not supported.
+ * Store the source register into the destination slot
+ * and leave the destination temp as TEMP_VAL_MEM.
+ */
+ assert(!ots->fixed_reg);
+ if (!ts->mem_allocated) {
+ temp_allocate_frame(s, ots);
+ }
+ tcg_out_st(s, ts->type, ts->reg,
+ ots->mem_base->reg, ots->mem_offset);
+ ots->mem_coherent = 1;
+ temp_free_or_dead(s, ots, -1);
+ return;
+ }
}
ots->val_type = TEMP_VAL_REG;
ots->mem_coherent = 0;
}
}
+/*
+ * Specialized code generation for INDEX_op_dup_vec.
+ */
+static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
+{
+ const TCGLifeData arg_life = op->life;
+ TCGRegSet dup_out_regs, dup_in_regs;
+ TCGTemp *its, *ots;
+ TCGType itype, vtype;
+ intptr_t endian_fixup;
+ unsigned vece;
+ bool ok;
+
+ ots = arg_temp(op->args[0]);
+ its = arg_temp(op->args[1]);
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!ots->fixed_reg);
+
+ itype = its->type;
+ vece = TCGOP_VECE(op);
+ vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
+
+ if (its->val_type == TEMP_VAL_CONST) {
+ /* Propagate constant via movi -> dupi. */
+ tcg_target_ulong val = its->val;
+ if (IS_DEAD_ARG(1)) {
+ temp_dead(s, its);
+ }
+ tcg_reg_alloc_do_movi(s, ots, val, arg_life, op->output_pref[0]);
+ return;
+ }
+
+ dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].u.regs;
+ dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].u.regs;
+
+ /* Allocate the output register now. */
+ if (ots->val_type != TEMP_VAL_REG) {
+ TCGRegSet allocated_regs = s->reserved_regs;
+
+ if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
+ /* Make sure to not spill the input register. */
+ tcg_regset_set_reg(allocated_regs, its->reg);
+ }
+ ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
+ op->output_pref[0], ots->indirect_base);
+ ots->val_type = TEMP_VAL_REG;
+ ots->mem_coherent = 0;
+ s->reg_to_temp[ots->reg] = ots;
+ }
+
+ switch (its->val_type) {
+ case TEMP_VAL_REG:
+ /*
+ * The dup constriaints must be broad, covering all possible VECE.
+ * However, tcg_op_dup_vec() gets to see the VECE and we allow it
+ * to fail, indicating that extra moves are required for that case.
+ */
+ if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
+ if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
+ goto done;
+ }
+ /* Try again from memory or a vector input register. */
+ }
+ if (!its->mem_coherent) {
+ /*
+ * The input register is not synced, and so an extra store
+ * would be required to use memory. Attempt an integer-vector
+ * register move first. We do not have a TCGRegSet for this.
+ */
+ if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
+ break;
+ }
+ /* Sync the temp back to its slot and load from there. */
+ temp_sync(s, its, s->reserved_regs, 0, 0);
+ }
+ /* fall through */
+
+ case TEMP_VAL_MEM:
+#ifdef HOST_WORDS_BIGENDIAN
+ endian_fixup = itype == TCG_TYPE_I32 ? 4 : 8;
+ endian_fixup -= 1 << vece;
+#else
+ endian_fixup = 0;
+#endif
+ if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
+ its->mem_offset + endian_fixup)) {
+ goto done;
+ }
+ tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ /* We now have a vector input register, so dup must succeed. */
+ ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
+ tcg_debug_assert(ok);
+
+ done:
+ if (IS_DEAD_ARG(1)) {
+ temp_dead(s, its);
+ }
+ if (NEED_SYNC_ARG(0)) {
+ temp_sync(s, ots, s->reserved_regs, 0, 0);
+ }
+ if (IS_DEAD_ARG(0)) {
+ temp_dead(s, ots);
+ }
+}
+
static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
{
const TCGLifeData arg_life = op->life;
i_allocated_regs, 0);
reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
o_preferred_regs, ts->indirect_base);
- tcg_out_mov(s, ts->type, reg, ts->reg);
+ if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
+ /*
+ * Cross register class move not supported. Sync the
+ * temp back to its slot and load from there.
+ */
+ temp_sync(s, ts, i_allocated_regs, 0, 0);
+ tcg_out_ld(s, ts->type, reg,
+ ts->mem_base->reg, ts->mem_offset);
+ }
}
new_args[i] = reg;
const_args[i] = 0;
arg = op->args[i];
arg_ct = &def->args_ct[i];
ts = arg_temp(arg);
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!ts->fixed_reg);
+
if ((arg_ct->ct & TCG_CT_ALIAS)
&& !const_args[arg_ct->alias_index]) {
reg = new_args[arg_ct->alias_index];
i_allocated_regs | o_allocated_regs,
op->output_pref[k], ts->indirect_base);
} else {
- /* if fixed register, we try to use it */
- reg = ts->reg;
- if (ts->fixed_reg &&
- tcg_regset_test_reg(arg_ct->u.regs, reg)) {
- goto oarg_end;
- }
reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
op->output_pref[k], ts->indirect_base);
}
tcg_regset_set_reg(o_allocated_regs, reg);
- /* if a fixed register is used, then a move will be done afterwards */
- if (!ts->fixed_reg) {
- if (ts->val_type == TEMP_VAL_REG) {
- s->reg_to_temp[ts->reg] = NULL;
- }
- ts->val_type = TEMP_VAL_REG;
- ts->reg = reg;
- /* temp value is modified, so the value kept in memory is
- potentially not the same */
- ts->mem_coherent = 0;
- s->reg_to_temp[reg] = ts;
+ if (ts->val_type == TEMP_VAL_REG) {
+ s->reg_to_temp[ts->reg] = NULL;
}
- oarg_end:
+ ts->val_type = TEMP_VAL_REG;
+ ts->reg = reg;
+ /*
+ * Temp value is modified, so the value kept in memory is
+ * potentially not the same.
+ */
+ ts->mem_coherent = 0;
+ s->reg_to_temp[reg] = ts;
new_args[i] = reg;
}
}
/* move the outputs in the correct register if needed */
for(i = 0; i < nb_oargs; i++) {
ts = arg_temp(op->args[i]);
- reg = new_args[i];
- if (ts->fixed_reg && ts->reg != reg) {
- tcg_out_mov(s, ts->type, ts->reg, reg);
- }
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!ts->fixed_reg);
+
if (NEED_SYNC_ARG(i)) {
temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
} else if (IS_DEAD_ARG(i)) {
if (ts->val_type == TEMP_VAL_REG) {
if (ts->reg != reg) {
tcg_reg_free(s, reg, allocated_regs);
- tcg_out_mov(s, ts->type, reg, ts->reg);
+ if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
+ /*
+ * Cross register class move not supported. Sync the
+ * temp back to its slot and load from there.
+ */
+ temp_sync(s, ts, allocated_regs, 0, 0);
+ tcg_out_ld(s, ts->type, reg,
+ ts->mem_base->reg, ts->mem_offset);
+ }
}
} else {
TCGRegSet arg_set = 0;
for(i = 0; i < nb_oargs; i++) {
arg = op->args[i];
ts = arg_temp(arg);
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!ts->fixed_reg);
+
reg = tcg_target_call_oarg_regs[i];
tcg_debug_assert(s->reg_to_temp[reg] == NULL);
-
- if (ts->fixed_reg) {
- if (ts->reg != reg) {
- tcg_out_mov(s, ts->type, ts->reg, reg);
- }
- } else {
- if (ts->val_type == TEMP_VAL_REG) {
- s->reg_to_temp[ts->reg] = NULL;
- }
- ts->val_type = TEMP_VAL_REG;
- ts->reg = reg;
- ts->mem_coherent = 0;
- s->reg_to_temp[reg] = ts;
- if (NEED_SYNC_ARG(i)) {
- temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
- } else if (IS_DEAD_ARG(i)) {
- temp_dead(s, ts);
- }
+ if (ts->val_type == TEMP_VAL_REG) {
+ s->reg_to_temp[ts->reg] = NULL;
+ }
+ ts->val_type = TEMP_VAL_REG;
+ ts->reg = reg;
+ ts->mem_coherent = 0;
+ s->reg_to_temp[reg] = ts;
+ if (NEED_SYNC_ARG(i)) {
+ temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
+ } else if (IS_DEAD_ARG(i)) {
+ temp_dead(s, ts);
}
}
}
case INDEX_op_dupi_vec:
tcg_reg_alloc_movi(s, op);
break;
+ case INDEX_op_dup_vec:
+ tcg_reg_alloc_dup(s, op);
+ break;
case INDEX_op_insn_start:
if (num_insns >= 0) {
size_t off = tcg_current_code_size(s);
&& !defined(TCG_TARGET_HAS_v128) \
&& !defined(TCG_TARGET_HAS_v256)
#define TCG_TARGET_MAYBE_vec 0
+#define TCG_TARGET_HAS_abs_vec 0
#define TCG_TARGET_HAS_neg_vec 0
#define TCG_TARGET_HAS_not_vec 0
#define TCG_TARGET_HAS_andc_vec 0
#ifdef CONFIG_DEBUG_TCG
int temps_in_use;
int goto_tb_issue_mask;
+ const TCGOpcode *vecop_list;
#endif
/* Code generation. Note that we specifically do not use tcg_insn_unit
void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
TCGMemOpIdx oi, uintptr_t retaddr);
+#ifdef CONFIG_DEBUG_TCG
+void tcg_assert_listed_vecop(TCGOpcode);
+#else
+static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
+#endif
+
+static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
+{
+#ifdef CONFIG_DEBUG_TCG
+ const TCGOpcode *o = tcg_ctx->vecop_list;
+ tcg_ctx->vecop_list = n;
+ return o;
+#else
+ return NULL;
+#endif
+}
+
+bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
+
#endif /* TCG_H */
old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
uint8_t *old_code_ptr = s->code_ptr;
tcg_debug_assert(ret != arg);
tcg_out_r(s, ret);
tcg_out_r(s, arg);
old_code_ptr[1] = s->code_ptr - old_code_ptr;
+ return true;
}
static void tcg_out_movi(TCGContext *s, TCGType type,
check-unit-y += tests/check-qdict$(EXESUF)
check-unit-y += tests/check-block-qdict$(EXESUF)
-check-unit-y += tests/test-char$(EXESUF)
+check-unit-$(CONFIG_SOFTMMU) += tests/test-char$(EXESUF)
check-unit-y += tests/check-qnum$(EXESUF)
check-unit-y += tests/check-qstring$(EXESUF)
check-unit-y += tests/check-qlist$(EXESUF)
check-unit-y += tests/test-string-output-visitor$(EXESUF)
check-unit-y += tests/test-qmp-event$(EXESUF)
check-unit-y += tests/test-opts-visitor$(EXESUF)
-check-unit-y += tests/test-coroutine$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-coroutine$(EXESUF)
check-unit-y += tests/test-visitor-serialization$(EXESUF)
check-unit-y += tests/test-iov$(EXESUF)
-check-unit-y += tests/test-aio$(EXESUF)
-check-unit-y += tests/test-aio-multithread$(EXESUF)
-check-unit-y += tests/test-throttle$(EXESUF)
-check-unit-y += tests/test-thread-pool$(EXESUF)
-check-unit-y += tests/test-hbitmap$(EXESUF)
-check-unit-y += tests/test-bdrv-drain$(EXESUF)
-check-unit-y += tests/test-bdrv-graph-mod$(EXESUF)
-check-unit-y += tests/test-blockjob$(EXESUF)
-check-unit-y += tests/test-blockjob-txn$(EXESUF)
-check-unit-y += tests/test-block-backend$(EXESUF)
-check-unit-y += tests/test-block-iothread$(EXESUF)
-check-unit-y += tests/test-image-locking$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-aio$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-aio-multithread$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-throttle$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-thread-pool$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-hbitmap$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-bdrv-drain$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-bdrv-graph-mod$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-blockjob$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-blockjob-txn$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-block-backend$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-block-iothread$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-image-locking$(EXESUF)
check-unit-y += tests/test-x86-cpuid$(EXESUF)
# all code tested by test-x86-cpuid is inside topology.h
ifeq ($(CONFIG_SOFTMMU),y)
check-unit-y += tests/check-qom-proplist$(EXESUF)
check-unit-y += tests/test-qemu-opts$(EXESUF)
check-unit-y += tests/test-keyval$(EXESUF)
-check-unit-y += tests/test-write-threshold$(EXESUF)
-check-unit-y += tests/test-crypto-hash$(EXESUF)
-check-speed-y += tests/benchmark-crypto-hash$(EXESUF)
-check-unit-y += tests/test-crypto-hmac$(EXESUF)
-check-speed-y += tests/benchmark-crypto-hmac$(EXESUF)
-check-unit-y += tests/test-crypto-cipher$(EXESUF)
-check-speed-y += tests/benchmark-crypto-cipher$(EXESUF)
-check-unit-y += tests/test-crypto-secret$(EXESUF)
-check-unit-$(CONFIG_GNUTLS) += tests/test-crypto-tlscredsx509$(EXESUF)
-check-unit-$(CONFIG_GNUTLS) += tests/test-crypto-tlssession$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-write-threshold$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-hash$(EXESUF)
+check-speed-$(CONFIG_BLOCK) += tests/benchmark-crypto-hash$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-hmac$(EXESUF)
+check-speed-$(CONFIG_BLOCK) += tests/benchmark-crypto-hmac$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-cipher$(EXESUF)
+check-speed-$(CONFIG_BLOCK) += tests/benchmark-crypto-cipher$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-secret$(EXESUF)
+check-unit-$(call land,$(CONFIG_BLOCK),$(CONFIG_GNUTLS)) += tests/test-crypto-tlscredsx509$(EXESUF)
+check-unit-$(call land,$(CONFIG_BLOCK),$(CONFIG_GNUTLS)) += tests/test-crypto-tlssession$(EXESUF)
ifneq (,$(findstring qemu-ga,$(TOOLS)))
check-unit-$(call land,$(CONFIG_LINUX),$(CONFIG_VIRTIO_SERIAL)) += tests/test-qga$(EXESUF)
endif
check-unit-y += tests/test-timed-average$(EXESUF)
check-unit-$(CONFIG_INOTIFY1) += tests/test-util-filemonitor$(EXESUF)
check-unit-y += tests/test-util-sockets$(EXESUF)
-check-unit-y += tests/test-authz-simple$(EXESUF)
-check-unit-y += tests/test-authz-list$(EXESUF)
-check-unit-y += tests/test-authz-listfile$(EXESUF)
-check-unit-$(CONFIG_AUTH_PAM) += tests/test-authz-pam$(EXESUF)
-check-unit-y += tests/test-io-task$(EXESUF)
-check-unit-y += tests/test-io-channel-socket$(EXESUF)
-check-unit-y += tests/test-io-channel-file$(EXESUF)
-check-unit-$(CONFIG_GNUTLS) += tests/test-io-channel-tls$(EXESUF)
-check-unit-y += tests/test-io-channel-command$(EXESUF)
-check-unit-y += tests/test-io-channel-buffer$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-authz-simple$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-authz-list$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-authz-listfile$(EXESUF)
+check-unit-$(call land,$(CONFIG_BLOCK),$(CONFIG_AUTH_PAM)) += tests/test-authz-pam$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-io-task$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-io-channel-socket$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-io-channel-file$(EXESUF)
+check-unit-$(call land,$(CONFIG_BLOCK),$(CONFIG_GNUTLS)) += tests/test-io-channel-tls$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-io-channel-command$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-io-channel-buffer$(EXESUF)
check-unit-y += tests/test-base64$(EXESUF)
-check-unit-$(if $(CONFIG_NETTLE),y,$(CONFIG_GCRYPT)) += tests/test-crypto-pbkdf$(EXESUF)
-check-unit-y += tests/test-crypto-ivgen$(EXESUF)
-check-unit-y += tests/test-crypto-afsplit$(EXESUF)
-check-unit-y += tests/test-crypto-xts$(EXESUF)
-check-unit-y += tests/test-crypto-block$(EXESUF)
+check-unit-$(call land,$(CONFIG_BLOCK),$(if $(CONFIG_NETTLE),y,$(CONFIG_GCRYPT))) += tests/test-crypto-pbkdf$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-ivgen$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-afsplit$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-xts$(EXESUF)
+check-unit-$(CONFIG_BLOCK) += tests/test-crypto-block$(EXESUF)
check-unit-y += tests/test-logging$(EXESUF)
-check-unit-$(CONFIG_REPLICATION) += tests/test-replication$(EXESUF)
+check-unit-$(call land,$(CONFIG_BLOCK),$(CONFIG_REPLICATION)) += tests/test-replication$(EXESUF)
check-unit-y += tests/test-bufferiszero$(EXESUF)
check-unit-y += tests/test-uuid$(EXESUF)
check-unit-y += tests/ptimer-test$(EXESUF)
check-qapi-schema-y := $(addprefix tests/qapi-schema/, $(qapi-schema))
-GENERATED_FILES += tests/test-qapi-types.h \
- tests/include/test-qapi-types-sub-module.h \
- tests/test-qapi-types-sub-sub-module.h \
- tests/test-qapi-visit.h \
- tests/include/test-qapi-visit-sub-module.h \
- tests/test-qapi-visit-sub-sub-module.h \
- tests/test-qapi-commands.h \
- tests/include/test-qapi-commands-sub-module.h \
- tests/test-qapi-commands-sub-sub-module.h \
- tests/test-qapi-events.h \
- tests/include/test-qapi-events-sub-module.h \
- tests/test-qapi-events-sub-sub-module.h \
- tests/test-qapi-introspect.h
+generated-files-y += tests/test-qapi-types.h
+generated-files-y += tests/include/test-qapi-types-sub-module.h
+generated-files-y += tests/test-qapi-types-sub-sub-module.h
+generated-files-y += tests/test-qapi-visit.h
+generated-files-y += tests/include/test-qapi-visit-sub-module.h
+generated-files-y += tests/test-qapi-visit-sub-sub-module.h
+generated-files-y += tests/test-qapi-commands.h
+generated-files-y += tests/include/test-qapi-commands-sub-module.h
+generated-files-y += tests/test-qapi-commands-sub-sub-module.h
+generated-files-y += tests/test-qapi-events.h
+generated-files-y += tests/include/test-qapi-events-sub-module.h
+generated-files-y += tests/test-qapi-events-sub-sub-module.h
+generated-files-y += tests/test-qapi-introspect.h
QEMU_CFLAGS += -I$(SRC_PATH)/tests
tests/test-qapi-visit-sub-sub-module.o \
tests/test-qapi-introspect.o \
$(test-qom-obj-y)
-benchmark-crypto-obj-y = $(authz-obj-y) $(crypto-obj-y) $(test-qom-obj-y)
-test-crypto-obj-y = $(authz-obj-y) $(crypto-obj-y) $(test-qom-obj-y)
-test-io-obj-y = $(io-obj-y) $(test-crypto-obj-y)
-test-authz-obj-y = $(test-qom-obj-y) $(authz-obj-y)
-test-block-obj-y = $(block-obj-y) $(test-io-obj-y) tests/iothread.o
+benchmark-crypto-obj-$(CONFIG_BLOCK) = $(authz-obj-y) $(crypto-obj-y) $(test-qom-obj-y)
+test-crypto-obj-$(CONFIG_BLOCK) = $(authz-obj-y) $(crypto-obj-y) $(test-qom-obj-y)
+test-io-obj-$(CONFIG_BLOCK) = $(io-obj-y) $(test-crypto-obj-y)
+test-authz-obj-$(CONFIG_BLOCK) = $(test-qom-obj-y) $(authz-obj-y)
+test-block-obj-$(CONFIG_BLOCK) = $(block-obj-y) $(test-io-obj-y) tests/iothread.o
tests/check-qnum$(EXESUF): tests/check-qnum.o $(test-util-obj-y)
tests/check-qstring$(EXESUF): tests/check-qstring.o $(test-util-obj-y)
const uint8_t *addr_ptr, const char *sig,
bool verify_checksum);
-#endif /* TEST_ACPI_UTILS_H */
+#endif /* TEST_ACPI_UTILS_H */
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
-#ifndef QGRAPH_E1000E
-#define QGRAPH_E1000E
+#ifndef QGRAPH_E1000E_H
+#define QGRAPH_E1000E_H
#include "libqos/qgraph.h"
#include "pci.h"
#ifndef QGRAPH_H
#define QGRAPH_H
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
#include <gmodule.h>
-#include <glib.h>
#include "qemu/module.h"
#include "malloc.h"
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
-#ifndef QGRAPH_EXTRA_H
-#define QGRAPH_EXTRA_H
+#ifndef QGRAPH_INTERNAL_H
+#define QGRAPH_INTERNAL_H
/* This header is declaring additional helper functions defined in
* libqos/qgraph.c
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
-#ifndef QGRAPH_QSDHCI
-#define QGRAPH_QSDHCI
+#ifndef QGRAPH_QSDHCI_H
+#define QGRAPH_QSDHCI_H
#include "libqos/qgraph.h"
#include "pci.h"
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
-#ifndef _TEST_MIGRATION_H_
-#define _TEST_MIGRATION_H_
+
+#ifndef MIGRATION_TEST_H
+#define MIGRATION_TEST_H
/* Common */
#define TEST_MEM_PAGE_SIZE 4096
*/
#define ARM_TEST_MAX_KERNEL_SIZE (512 * 1024)
-#endif /* _TEST_MIGRATION_H_ */
+#endif /* _MIGRATION_TEST_H */
event = self.vm_a.event_wait('MIGRATION')
if event['data']['status'] == 'completed':
break
+ while True:
+ result = self.vm_a.qmp('query-status')
+ if (result['return']['status'] == 'postmigrate'):
+ break
# test that bitmap is still here
removed = (not migrate_bitmaps) and persistent
self.check_bitmap(self.vm_a, False if removed else sha256)
- self.vm_a.qmp('cont')
+ result = self.vm_a.qmp('cont')
+ self.assert_qmp(result, 'return', {})
# test that bitmap is still here after invalidation
self.check_bitmap(self.vm_a, sha256)
_cleanup()
{
- _cleanup_test_img
+ _cleanup_qemu
+ _cleanup_test_img
+ rm -f "$TEST_DIR/nbd"
}
trap "_cleanup; exit \$status" 0 1 2 3 15
#!/usr/bin/env bash
#
# Test qemu-img vs. unaligned images
+# (See also 253, which is the O_DIRECT version)
#
# Copyright (C) 2018-2019 Red Hat, Inc.
#
_supported_proto file
_supported_os Linux
+_default_cache_mode writeback
+_supported_cache_modes writeback writethrough unsafe
+
echo
echo "=== Check mapping of unaligned raw image ==="
echo
# hd2 <- hd0
result = self.vm.qmp('block-stream', conv_keys = True, job_id = 'stream0',
- device = 'hd0', base_node = 'hd2', speed = 512 * 1024)
+ device = 'hd0', base_node = 'hd2',
+ auto_finalize = False)
self.assert_qmp(result, 'return', {})
# We can't remove hd2 while the stream job is ongoing
opts['backing'] = None
self.reopen(opts, {}, "Cannot change 'backing' link from 'hd0' to 'hd1'")
- self.wait_until_completed(drive = 'stream0')
+ self.vm.run_job('stream0', auto_finalize = False, auto_dismiss = True)
# Reopen the chain during a block-stream job (from hd2 to hd1)
def test_block_stream_4(self):
# hd1 <- hd0
result = self.vm.qmp('block-stream', conv_keys = True, job_id = 'stream0',
- device = 'hd1', speed = 512 * 1024)
+ device = 'hd1', auto_finalize = False)
self.assert_qmp(result, 'return', {})
# We can't reopen with the original options because that would
# make hd1 read-only and block-stream requires it to be read-write
- self.reopen(opts, {}, "Can't set node 'hd1' to r/o with copy-on-read enabled")
+ # (Which error message appears depends on whether the stream job is
+ # already done with copying at this point.)
+ self.reopen(opts, {},
+ ["Can't set node 'hd1' to r/o with copy-on-read enabled",
+ "Cannot make block node read-only, there is a writer on it"])
# We can't remove hd2 while the stream job is ongoing
opts['backing']['backing'] = None
opts['backing'] = None
self.reopen(opts)
- self.wait_until_completed(drive = 'stream0')
+ self.vm.run_job('stream0', auto_finalize = False, auto_dismiss = True)
# Reopen the chain during a block-commit job (from hd0 to hd2)
def test_block_commit_1(self):
self.assert_qmp(result, 'return', {})
result = self.vm.qmp('block-commit', conv_keys = True, job_id = 'commit0',
- device = 'hd0', speed = 1024 * 1024)
+ device = 'hd0')
self.assert_qmp(result, 'return', {})
# We can't remove hd2 while the commit job is ongoing
self.assert_qmp(result, 'return', {})
result = self.vm.qmp('block-commit', conv_keys = True, job_id = 'commit0',
- device = 'hd0', top_node = 'hd1', speed = 1024 * 1024)
+ device = 'hd0', top_node = 'hd1',
+ auto_finalize = False)
self.assert_qmp(result, 'return', {})
# We can't remove hd2 while the commit job is ongoing
self.reopen(opts, {}, "Cannot change backing link if 'hd0' has an implicit backing file")
# hd2 <- hd0
- self.wait_until_completed(drive = 'commit0')
+ self.vm.run_job('commit0', auto_finalize = False, auto_dismiss = True)
self.assert_qmp(self.get_node('hd0'), 'ro', False)
self.assertEqual(self.get_node('hd1'), None)
Ran 18 tests
OK
+{"execute": "job-finalize", "arguments": {"id": "commit0"}}
+{"return": {}}
+{"data": {"id": "commit0", "type": "commit"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"device": "commit0", "len": 3145728, "offset": 3145728, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"execute": "job-finalize", "arguments": {"id": "stream0"}}
+{"return": {}}
+{"data": {"id": "stream0", "type": "stream"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"device": "stream0", "len": 3145728, "offset": 3145728, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"execute": "job-finalize", "arguments": {"id": "stream0"}}
+{"return": {}}
+{"data": {"id": "stream0", "type": "stream"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"device": "stream0", "len": 3145728, "offset": 3145728, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
--- /dev/null
+#!/usr/bin/env bash
+#
+# Tests for rebasing COW images that require zero cluster support
+#
+# Copyright (C) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=mreitz@redhat.com
+
+seq=$(basename $0)
+echo "QA output created by $seq"
+
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+ rm -f "$TEST_IMG.base_new"
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+. ./common.pattern
+
+# Currently only qcow2 and qed support rebasing, and only qcow2 v3 has
+# zero cluster support
+_supported_fmt qcow2
+_unsupported_imgopts 'compat=0.10'
+_supported_proto file
+_supported_os Linux
+
+CLUSTER_SIZE=65536
+
+echo
+echo "=== Test rebase without input base ==="
+echo
+
+# Cluster allocations to be tested:
+#
+# Backing (new) 11 -- 11 -- 11 --
+# COW image 22 22 11 11 -- --
+#
+# Expected result:
+#
+# COW image 22 22 11 11 00 --
+#
+# (Cluster 2 might be "--" after the rebase, too, but rebase just
+# compares the new backing file to the old one and disregards the
+# overlay. Therefore, it will never discard overlay clusters.)
+
+_make_test_img $((6 * CLUSTER_SIZE))
+TEST_IMG="$TEST_IMG.base_new" _make_test_img $((6 * CLUSTER_SIZE))
+
+echo
+
+$QEMU_IO "$TEST_IMG" \
+ -c "write -P 0x22 $((0 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ -c "write -P 0x11 $((2 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ | _filter_qemu_io
+
+$QEMU_IO "$TEST_IMG.base_new" \
+ -c "write -P 0x11 $((0 * CLUSTER_SIZE)) $CLUSTER_SIZE" \
+ -c "write -P 0x11 $((2 * CLUSTER_SIZE)) $CLUSTER_SIZE" \
+ -c "write -P 0x11 $((4 * CLUSTER_SIZE)) $CLUSTER_SIZE" \
+ | _filter_qemu_io
+
+echo
+
+# This should be a no-op
+$QEMU_IMG rebase -b "" "$TEST_IMG"
+
+# Verify the data is correct
+$QEMU_IO "$TEST_IMG" \
+ -c "read -P 0x22 $((0 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ -c "read -P 0x11 $((2 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ -c "read -P 0x00 $((4 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ | _filter_qemu_io
+
+echo
+
+# Verify the allocation status (first four cluster should be allocated
+# in TEST_IMG, clusters 4 and 5 should be unallocated (marked as zero
+# clusters here because there is no backing file))
+$QEMU_IMG map --output=json "$TEST_IMG" | _filter_qemu_img_map
+
+echo
+
+$QEMU_IMG rebase -b "$TEST_IMG.base_new" "$TEST_IMG"
+
+# Verify the data is correct
+$QEMU_IO "$TEST_IMG" \
+ -c "read -P 0x22 $((0 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ -c "read -P 0x11 $((2 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ -c "read -P 0x00 $((4 * CLUSTER_SIZE)) $((2 * CLUSTER_SIZE))" \
+ | _filter_qemu_io
+
+echo
+
+# Verify the allocation status (first four cluster should be allocated
+# in TEST_IMG, cluster 4 should be zero, and cluster 5 should be
+# unallocated (signified by '"depth": 1'))
+$QEMU_IMG map --output=json "$TEST_IMG" | _filter_qemu_img_map
+
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
--- /dev/null
+QA output created by 252
+
+=== Test rebase without input base ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=393216
+Formatting 'TEST_DIR/t.IMGFMT.base_new', fmt=IMGFMT size=393216
+
+wrote 131072/131072 bytes at offset 0
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 131072/131072 bytes at offset 131072
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 65536/65536 bytes at offset 131072
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 65536/65536 bytes at offset 262144
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+read 131072/131072 bytes at offset 0
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 131072/131072 bytes at offset 131072
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 131072/131072 bytes at offset 262144
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+[{ "start": 0, "length": 262144, "depth": 0, "zero": false, "data": true, "offset": OFFSET},
+{ "start": 262144, "length": 131072, "depth": 0, "zero": true, "data": false}]
+
+read 131072/131072 bytes at offset 0
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 131072/131072 bytes at offset 131072
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 131072/131072 bytes at offset 262144
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+[{ "start": 0, "length": 262144, "depth": 0, "zero": false, "data": true, "offset": OFFSET},
+{ "start": 262144, "length": 65536, "depth": 0, "zero": true, "data": false},
+{ "start": 327680, "length": 65536, "depth": 1, "zero": true, "data": false}]
+*** done
--- /dev/null
+#!/usr/bin/env bash
+#
+# Test qemu-img vs. unaligned images; O_DIRECT version
+# (Originates from 221)
+#
+# Copyright (C) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt raw
+_supported_proto file
+_supported_os Linux
+
+_default_cache_mode none
+_supported_cache_modes none directsync
+
+echo
+echo "=== Check mapping of unaligned raw image ==="
+echo
+
+# We do not know how large a physical sector is, but it is certainly
+# going to be a factor of 1 MB
+size=$((1 * 1024 * 1024 - 1))
+
+# qemu-img create rounds size up to BDRV_SECTOR_SIZE
+_make_test_img $size
+$QEMU_IMG map --output=json --image-opts \
+ "driver=$IMGFMT,file.driver=file,file.filename=$TEST_IMG,cache.direct=on" \
+ | _filter_qemu_img_map
+
+# so we resize it and check again
+truncate --size=$size "$TEST_IMG"
+$QEMU_IMG map --output=json --image-opts \
+ "driver=$IMGFMT,file.driver=file,file.filename=$TEST_IMG,cache.direct=on" \
+ | _filter_qemu_img_map
+
+# qemu-io with O_DIRECT always writes whole physical sectors. Again,
+# we do not know how large a physical sector is, so we just start
+# writing from a 64 kB boundary, which should always be aligned.
+offset=$((1 * 1024 * 1024 - 64 * 1024))
+$QEMU_IO -c "w $offset $((size - offset))" "$TEST_IMG" | _filter_qemu_io
+$QEMU_IMG map --output=json --image-opts \
+ "driver=$IMGFMT,file.driver=file,file.filename=$TEST_IMG,cache.direct=on" \
+ | _filter_qemu_img_map
+
+# Resize it and check again -- contrary to 221, we may not get partial
+# sectors here, so there should be only two areas (one zero, one
+# data).
+truncate --size=$size "$TEST_IMG"
+$QEMU_IMG map --output=json --image-opts \
+ "driver=$IMGFMT,file.driver=file,file.filename=$TEST_IMG,cache.direct=on" \
+ | _filter_qemu_img_map
+
+# success, all done
+echo '*** done'
+rm -f $seq.full
+status=0
--- /dev/null
+QA output created by 253
+
+=== Check mapping of unaligned raw image ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048575
+[{ "start": 0, "length": 1048576, "depth": 0, "zero": true, "data": false, "offset": OFFSET}]
+[{ "start": 0, "length": 1048576, "depth": 0, "zero": true, "data": false, "offset": OFFSET}]
+wrote 65535/65535 bytes at offset 983040
+63.999 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 983040, "depth": 0, "zero": true, "data": false, "offset": OFFSET},
+{ "start": 983040, "length": 65536, "depth": 0, "zero": false, "data": true, "offset": OFFSET}]
+[{ "start": 0, "length": 983040, "depth": 0, "zero": true, "data": false, "offset": OFFSET},
+{ "start": 983040, "length": 65536, "depth": 0, "zero": false, "data": true, "offset": OFFSET}]
+*** done
247 rw auto quick
248 rw auto quick
249 rw auto quick
+252 rw auto backing quick
+253 rw auto quick
elif status == 'null':
return error
else:
- iotests.log(ev)
+ log(ev)
def node_info(self, node_name):
nodes = self.qmp('query-named-block-nodes')
self.fail('path "%s" has value "%s"' % (path, str(result)))
def assert_qmp(self, d, path, value):
- '''Assert that the value for a specific path in a QMP dict matches'''
+ '''Assert that the value for a specific path in a QMP dict
+ matches. When given a list of values, assert that any of
+ them matches.'''
+
result = self.dictpath(d, path)
- self.assertEqual(result, value, 'values not equal "%s" and "%s"' % (str(result), str(value)))
+
+ # [] makes no sense as a list of valid values, so treat it as
+ # an actual single value.
+ if isinstance(value, list) and value != []:
+ for v in value:
+ if result == v:
+ return
+ self.fail('no match for "%s" in %s' % (str(result), str(value)))
+ else:
+ self.assertEqual(result, value,
+ 'values not equal "%s" and "%s"'
+ % (str(result), str(value)))
def assert_no_active_block_jobs(self):
result = self.vm.qmp('query-block-jobs')
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
-#include <getopt.h>
#include "qemu/osdep.h"
+#include <getopt.h>
#include "libqtest.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qbool.h"
#include "block/blockjob_int.h"
#include "sysemu/block-backend.h"
#include "qapi/error.h"
+#include "qapi/qmp/qdict.h"
#include "iothread.h"
static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs,
blk_unref(blk);
}
+typedef struct TestBlockJob {
+ BlockJob common;
+ bool should_complete;
+ int n;
+} TestBlockJob;
+
+static int test_job_prepare(Job *job)
+{
+ g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
+ return 0;
+}
+
+static int coroutine_fn test_job_run(Job *job, Error **errp)
+{
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
+
+ job_transition_to_ready(&s->common.job);
+ while (!s->should_complete) {
+ s->n++;
+ g_assert(qemu_get_current_aio_context() == job->aio_context);
+
+ /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
+ * emulate some actual activity (probably some I/O) here so that the
+ * drain involved in AioContext switches has to wait for this activity
+ * to stop. */
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
+
+ job_pause_point(&s->common.job);
+ }
+
+ g_assert(qemu_get_current_aio_context() == job->aio_context);
+ return 0;
+}
+
+static void test_job_complete(Job *job, Error **errp)
+{
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
+ s->should_complete = true;
+}
+
+BlockJobDriver test_job_driver = {
+ .job_driver = {
+ .instance_size = sizeof(TestBlockJob),
+ .free = block_job_free,
+ .user_resume = block_job_user_resume,
+ .drain = block_job_drain,
+ .run = test_job_run,
+ .complete = test_job_complete,
+ .prepare = test_job_prepare,
+ },
+};
+
+static void test_attach_blockjob(void)
+{
+ IOThread *iothread = iothread_new();
+ AioContext *ctx = iothread_get_aio_context(iothread);
+ BlockBackend *blk;
+ BlockDriverState *bs;
+ TestBlockJob *tjob;
+
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
+ blk_insert_bs(blk, bs, &error_abort);
+
+ tjob = block_job_create("job0", &test_job_driver, NULL, bs,
+ 0, BLK_PERM_ALL,
+ 0, 0, NULL, NULL, &error_abort);
+ job_start(&tjob->common.job);
+
+ while (tjob->n == 0) {
+ aio_poll(qemu_get_aio_context(), false);
+ }
+
+ blk_set_aio_context(blk, ctx);
+
+ tjob->n = 0;
+ while (tjob->n == 0) {
+ aio_poll(qemu_get_aio_context(), false);
+ }
+
+ aio_context_acquire(ctx);
+ blk_set_aio_context(blk, qemu_get_aio_context());
+ aio_context_release(ctx);
+
+ tjob->n = 0;
+ while (tjob->n == 0) {
+ aio_poll(qemu_get_aio_context(), false);
+ }
+
+ blk_set_aio_context(blk, ctx);
+
+ tjob->n = 0;
+ while (tjob->n == 0) {
+ aio_poll(qemu_get_aio_context(), false);
+ }
+
+ aio_context_acquire(ctx);
+ job_complete_sync(&tjob->common.job, &error_abort);
+ blk_set_aio_context(blk, qemu_get_aio_context());
+ aio_context_release(ctx);
+
+ bdrv_unref(bs);
+ blk_unref(blk);
+}
+
+/*
+ * Test that changing the AioContext for one node in a tree (here through blk)
+ * changes all other nodes as well:
+ *
+ * blk
+ * |
+ * | bs_verify [blkverify]
+ * | / \
+ * | / \
+ * bs_a [bdrv_test] bs_b [bdrv_test]
+ *
+ */
+static void test_propagate_basic(void)
+{
+ IOThread *iothread = iothread_new();
+ AioContext *ctx = iothread_get_aio_context(iothread);
+ BlockBackend *blk;
+ BlockDriverState *bs_a, *bs_b, *bs_verify;
+ QDict *options;
+
+ /* Create bs_a and its BlockBackend */
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
+ blk_insert_bs(blk, bs_a, &error_abort);
+
+ /* Create bs_b */
+ bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
+
+ /* Create blkverify filter that references both bs_a and bs_b */
+ options = qdict_new();
+ qdict_put_str(options, "driver", "blkverify");
+ qdict_put_str(options, "test", "bs_a");
+ qdict_put_str(options, "raw", "bs_b");
+
+ bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
+
+ /* Switch the AioContext */
+ blk_set_aio_context(blk, ctx);
+ g_assert(blk_get_aio_context(blk) == ctx);
+ g_assert(bdrv_get_aio_context(bs_a) == ctx);
+ g_assert(bdrv_get_aio_context(bs_verify) == ctx);
+ g_assert(bdrv_get_aio_context(bs_b) == ctx);
+
+ /* Switch the AioContext back */
+ ctx = qemu_get_aio_context();
+ blk_set_aio_context(blk, ctx);
+ g_assert(blk_get_aio_context(blk) == ctx);
+ g_assert(bdrv_get_aio_context(bs_a) == ctx);
+ g_assert(bdrv_get_aio_context(bs_verify) == ctx);
+ g_assert(bdrv_get_aio_context(bs_b) == ctx);
+
+ bdrv_unref(bs_verify);
+ bdrv_unref(bs_b);
+ bdrv_unref(bs_a);
+ blk_unref(blk);
+}
+
+/*
+ * Test that diamonds in the graph don't lead to endless recursion:
+ *
+ * blk
+ * |
+ * bs_verify [blkverify]
+ * / \
+ * / \
+ * bs_b [raw] bs_c[raw]
+ * \ /
+ * \ /
+ * bs_a [bdrv_test]
+ */
+static void test_propagate_diamond(void)
+{
+ IOThread *iothread = iothread_new();
+ AioContext *ctx = iothread_get_aio_context(iothread);
+ BlockBackend *blk;
+ BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
+ QDict *options;
+
+ /* Create bs_a */
+ bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
+
+ /* Create bs_b and bc_c */
+ options = qdict_new();
+ qdict_put_str(options, "driver", "raw");
+ qdict_put_str(options, "file", "bs_a");
+ qdict_put_str(options, "node-name", "bs_b");
+ bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
+
+ options = qdict_new();
+ qdict_put_str(options, "driver", "raw");
+ qdict_put_str(options, "file", "bs_a");
+ qdict_put_str(options, "node-name", "bs_c");
+ bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
+
+ /* Create blkverify filter that references both bs_b and bs_c */
+ options = qdict_new();
+ qdict_put_str(options, "driver", "blkverify");
+ qdict_put_str(options, "test", "bs_b");
+ qdict_put_str(options, "raw", "bs_c");
+
+ bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ blk_insert_bs(blk, bs_verify, &error_abort);
+
+ /* Switch the AioContext */
+ blk_set_aio_context(blk, ctx);
+ g_assert(blk_get_aio_context(blk) == ctx);
+ g_assert(bdrv_get_aio_context(bs_verify) == ctx);
+ g_assert(bdrv_get_aio_context(bs_a) == ctx);
+ g_assert(bdrv_get_aio_context(bs_b) == ctx);
+ g_assert(bdrv_get_aio_context(bs_c) == ctx);
+
+ /* Switch the AioContext back */
+ ctx = qemu_get_aio_context();
+ blk_set_aio_context(blk, ctx);
+ g_assert(blk_get_aio_context(blk) == ctx);
+ g_assert(bdrv_get_aio_context(bs_verify) == ctx);
+ g_assert(bdrv_get_aio_context(bs_a) == ctx);
+ g_assert(bdrv_get_aio_context(bs_b) == ctx);
+ g_assert(bdrv_get_aio_context(bs_c) == ctx);
+
+ blk_unref(blk);
+ bdrv_unref(bs_verify);
+ bdrv_unref(bs_c);
+ bdrv_unref(bs_b);
+ bdrv_unref(bs_a);
+}
+
+static void test_propagate_mirror(void)
+{
+ IOThread *iothread = iothread_new();
+ AioContext *ctx = iothread_get_aio_context(iothread);
+ AioContext *main_ctx = qemu_get_aio_context();
+ BlockDriverState *src, *target;
+ BlockBackend *blk;
+ Job *job;
+ Error *local_err = NULL;
+
+ /* Create src and target*/
+ src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
+ target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
+ &error_abort);
+
+ /* Start a mirror job */
+ mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
+ MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN,
+ BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
+ false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
+ &error_abort);
+ job = job_get("job0");
+
+ /* Change the AioContext of src */
+ bdrv_try_set_aio_context(src, ctx, &error_abort);
+ g_assert(bdrv_get_aio_context(src) == ctx);
+ g_assert(bdrv_get_aio_context(target) == ctx);
+ g_assert(job->aio_context == ctx);
+
+ /* Change the AioContext of target */
+ aio_context_acquire(ctx);
+ bdrv_try_set_aio_context(target, main_ctx, &error_abort);
+ aio_context_release(ctx);
+ g_assert(bdrv_get_aio_context(src) == main_ctx);
+ g_assert(bdrv_get_aio_context(target) == main_ctx);
+
+ /* With a BlockBackend on src, changing target must fail */
+ blk = blk_new(0, BLK_PERM_ALL);
+ blk_insert_bs(blk, src, &error_abort);
+
+ bdrv_try_set_aio_context(target, ctx, &local_err);
+ g_assert(local_err);
+ error_free(local_err);
+
+ g_assert(blk_get_aio_context(blk) == main_ctx);
+ g_assert(bdrv_get_aio_context(src) == main_ctx);
+ g_assert(bdrv_get_aio_context(target) == main_ctx);
+
+ /* ...unless we explicitly allow it */
+ aio_context_acquire(ctx);
+ blk_set_allow_aio_context_change(blk, true);
+ bdrv_try_set_aio_context(target, ctx, &error_abort);
+ aio_context_release(ctx);
+
+ g_assert(blk_get_aio_context(blk) == ctx);
+ g_assert(bdrv_get_aio_context(src) == ctx);
+ g_assert(bdrv_get_aio_context(target) == ctx);
+
+ job_cancel_sync_all();
+
+ aio_context_acquire(ctx);
+ blk_set_aio_context(blk, main_ctx);
+ bdrv_try_set_aio_context(target, main_ctx, &error_abort);
+ aio_context_release(ctx);
+
+ blk_unref(blk);
+ bdrv_unref(src);
+ bdrv_unref(target);
+}
+
int main(int argc, char **argv)
{
int i;
g_test_add_data_func(t->name, t, test_sync_op);
}
+ g_test_add_func("/attach/blockjob", test_attach_blockjob);
+ g_test_add_func("/propagate/basic", test_propagate_basic);
+ g_test_add_func("/propagate/diamond", test_propagate_diamond);
+ g_test_add_func("/propagate/mirror", test_propagate_mirror);
+
return g_test_run();
}
void tpm_emu_test_wait_cond(TestState *s);
void *tpm_emu_ctrl_thread(void *data);
-#endif /* TEST_TPM_EMU_H */
+#endif /* TESTS_TPM_EMU_H */
assert(gls);
- glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
- surface_stride(surface) / surface_bytes_per_pixel(surface));
- glTexSubImage2D(GL_TEXTURE_2D, 0,
- x, y, w, h,
- surface->glformat, surface->gltype,
- data + surface_stride(surface) * y
- + surface_bytes_per_pixel(surface) * x);
+ if (surface->texture) {
+ glBindTexture(GL_TEXTURE_2D, surface->texture);
+ glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
+ surface_stride(surface)
+ / surface_bytes_per_pixel(surface));
+ glTexSubImage2D(GL_TEXTURE_2D, 0,
+ x, y, w, h,
+ surface->glformat, surface->gltype,
+ data + surface_stride(surface) * y
+ + surface_bytes_per_pixel(surface) * x);
+ }
}
void surface_gl_render_texture(QemuGLShader *gls,
{
console_ch_t *line;
cchar_t curses_line[width];
+ wchar_t wch[CCHARW_MAX];
+ attr_t attrs;
+ short colors;
+ int ret;
line = screen + y * width;
for (h += y; y < h; y ++, line += width) {
for (x = 0; x < width; x++) {
chtype ch = line[x] & 0xff;
chtype at = line[x] & ~0xff;
- if (vga_to_curses[ch].chars[0]) {
- curses_line[x] = vga_to_curses[ch];
- } else {
- curses_line[x] = (cchar_t) {
- .chars[0] = ch,
- };
+ ret = getcchar(&vga_to_curses[ch], wch, &attrs, &colors, NULL);
+ if (ret == ERR || wch[0] == 0) {
+ wch[0] = ch;
+ wch[1] = 0;
}
- curses_line[x].attr |= at;
+ setcchar(&curses_line[x], wch, at, 0, NULL);
}
mvwadd_wchnstr(screenpad, y, 0, curses_line, width);
}
endwin();
}
+/*
+ * In the following:
+ * - fch is the font glyph number
+ * - uch is the unicode value
+ * - wch is the wchar_t value (may not be unicode, e.g. on BSD/solaris)
+ * - mbch is the native local-dependent multibyte representation
+ */
+
/* Setup wchar glyph for one UCS-2 char */
-static void convert_ucs(int glyph, uint16_t ch, iconv_t conv)
+static void convert_ucs(unsigned char fch, uint16_t uch, iconv_t conv)
{
- wchar_t wch;
- char *pch, *pwch;
- size_t sch, swch;
-
- pch = (char *) &ch;
- pwch = (char *) &wch;
- sch = sizeof(ch);
- swch = sizeof(wch);
-
- if (iconv(conv, &pch, &sch, &pwch, &swch) == (size_t) -1) {
- fprintf(stderr, "Could not convert 0x%04x from UCS-2 to WCHAR_T: %s\n",
- ch, strerror(errno));
- } else {
- vga_to_curses[glyph].chars[0] = wch;
+ char mbch[MB_LEN_MAX];
+ wchar_t wch[2];
+ char *puch, *pmbch;
+ size_t such, smbch;
+ mbstate_t ps;
+
+ puch = (char *) &uch;
+ pmbch = (char *) mbch;
+ such = sizeof(uch);
+ smbch = sizeof(mbch);
+
+ if (iconv(conv, &puch, &such, &pmbch, &smbch) == (size_t) -1) {
+ fprintf(stderr, "Could not convert 0x%04x "
+ "from UCS-2 to a multibyte character: %s\n",
+ uch, strerror(errno));
+ return;
+ }
+
+ memset(&ps, 0, sizeof(ps));
+ if (mbrtowc(&wch[0], mbch, sizeof(mbch) - smbch, &ps) == -1) {
+ fprintf(stderr, "Could not convert 0x%04x "
+ "from a multibyte character to wchar_t: %s\n",
+ uch, strerror(errno));
+ return;
}
+
+ wch[1] = 0;
+ setcchar(&vga_to_curses[fch], wch, 0, 0, NULL);
}
/* Setup wchar glyph for one font character */
-static void convert_font(unsigned char ch, iconv_t conv)
+static void convert_font(unsigned char fch, iconv_t conv)
{
- wchar_t wch;
- char *pch, *pwch;
- size_t sch, swch;
-
- pch = (char *) &ch;
- pwch = (char *) &wch;
- sch = sizeof(ch);
- swch = sizeof(wch);
-
- if (iconv(conv, &pch, &sch, &pwch, &swch) == (size_t) -1) {
- fprintf(stderr, "Could not convert 0x%02x from %s to WCHAR_T: %s\n",
- ch, font_charset, strerror(errno));
- } else {
- vga_to_curses[ch].chars[0] = wch;
+ char mbch[MB_LEN_MAX];
+ wchar_t wch[2];
+ char *pfch, *pmbch;
+ size_t sfch, smbch;
+ mbstate_t ps;
+
+ pfch = (char *) &fch;
+ pmbch = (char *) &mbch;
+ sfch = sizeof(fch);
+ smbch = sizeof(mbch);
+
+ if (iconv(conv, &pfch, &sfch, &pmbch, &smbch) == (size_t) -1) {
+ fprintf(stderr, "Could not convert font glyph 0x%02x "
+ "from %s to a multibyte character: %s\n",
+ fch, font_charset, strerror(errno));
+ return;
+ }
+
+ memset(&ps, 0, sizeof(ps));
+ if (mbrtowc(&wch[0], mbch, sizeof(mbch) - smbch, &ps) == -1) {
+ fprintf(stderr, "Could not convert font glyph 0x%02x "
+ "from a multibyte character to wchar_t: %s\n",
+ fch, strerror(errno));
+ return;
}
+
+ wch[1] = 0;
+ setcchar(&vga_to_curses[fch], wch, 0, 0, NULL);
}
/* Convert one wchar to UCS-2 */
static uint16_t get_ucs(wchar_t wch, iconv_t conv)
{
- uint16_t ch;
- char *pch, *pwch;
- size_t sch, swch;
-
- pch = (char *) &ch;
- pwch = (char *) &wch;
- sch = sizeof(ch);
- swch = sizeof(wch);
-
- if (iconv(conv, &pwch, &swch, &pch, &sch) == (size_t) -1) {
- fprintf(stderr, "Could not convert 0x%02lx from WCHAR_T to UCS-2: %s\n",
- (unsigned long)wch, strerror(errno));
+ char mbch[MB_LEN_MAX];
+ uint16_t uch;
+ char *pmbch, *puch;
+ size_t smbch, such;
+ mbstate_t ps;
+ int ret;
+
+ memset(&ps, 0, sizeof(ps));
+ ret = wcrtomb(mbch, wch, &ps);
+ if (ret == -1) {
+ fprintf(stderr, "Could not convert 0x%04x "
+ "from wchar_t to a multibyte character: %s\n",
+ wch, strerror(errno));
return 0xFFFD;
}
- return ch;
+ pmbch = (char *) mbch;
+ puch = (char *) &uch;
+ smbch = ret;
+ such = sizeof(uch);
+
+ if (iconv(conv, &pmbch, &smbch, &puch, &such) == (size_t) -1) {
+ fprintf(stderr, "Could not convert 0x%04x "
+ "from a multibyte character to UCS-2 : %s\n",
+ wch, strerror(errno));
+ return 0xFFFD;
+ }
+
+ return uch;
}
/*
*/
static void font_setup(void)
{
+ iconv_t ucs2_to_nativecharset;
+ iconv_t nativecharset_to_ucs2;
+ iconv_t font_conv;
+ int i;
+
/*
* Control characters are normally non-printable, but VGA does have
* well-known glyphs for them.
0x25bc
};
- iconv_t ucs_to_wchar_conv;
- iconv_t wchar_to_ucs_conv;
- iconv_t font_conv;
- int i;
-
- ucs_to_wchar_conv = iconv_open("WCHAR_T", "UCS-2");
- if (ucs_to_wchar_conv == (iconv_t) -1) {
+ ucs2_to_nativecharset = iconv_open(nl_langinfo(CODESET), "UCS-2");
+ if (ucs2_to_nativecharset == (iconv_t) -1) {
fprintf(stderr, "Could not convert font glyphs from UCS-2: '%s'\n",
strerror(errno));
exit(1);
}
- wchar_to_ucs_conv = iconv_open("UCS-2", "WCHAR_T");
- if (wchar_to_ucs_conv == (iconv_t) -1) {
- iconv_close(ucs_to_wchar_conv);
+ nativecharset_to_ucs2 = iconv_open("UCS-2", nl_langinfo(CODESET));
+ if (nativecharset_to_ucs2 == (iconv_t) -1) {
+ iconv_close(ucs2_to_nativecharset);
fprintf(stderr, "Could not convert font glyphs to UCS-2: '%s'\n",
strerror(errno));
exit(1);
}
- font_conv = iconv_open("WCHAR_T", font_charset);
+ font_conv = iconv_open(nl_langinfo(CODESET), font_charset);
if (font_conv == (iconv_t) -1) {
- iconv_close(ucs_to_wchar_conv);
- iconv_close(wchar_to_ucs_conv);
+ iconv_close(ucs2_to_nativecharset);
+ iconv_close(nativecharset_to_ucs2);
fprintf(stderr, "Could not convert font glyphs from %s: '%s'\n",
font_charset, strerror(errno));
exit(1);
/* Control characters */
for (i = 0; i <= 0x1F; i++) {
- convert_ucs(i, control_characters[i], ucs_to_wchar_conv);
+ convert_ucs(i, control_characters[i], ucs2_to_nativecharset);
}
for (i = 0x20; i <= 0xFF; i++) {
}
/* DEL */
- convert_ucs(0x7F, 0x2302, ucs_to_wchar_conv);
+ convert_ucs(0x7F, 0x2302, ucs2_to_nativecharset);
if (strcmp(nl_langinfo(CODESET), "UTF-8")) {
/* Non-Unicode capable, use termcap equivalents for those available */
for (i = 0; i <= 0xFF; i++) {
- switch (get_ucs(vga_to_curses[i].chars[0], wchar_to_ucs_conv)) {
+ wchar_t wch[CCHARW_MAX];
+ attr_t attr;
+ short color;
+ int ret;
+
+ ret = getcchar(&vga_to_curses[i], wch, &attr, &color, NULL);
+ if (ret == ERR)
+ continue;
+
+ switch (get_ucs(wch[0], nativecharset_to_ucs2)) {
case 0x00a3:
vga_to_curses[i] = *WACS_STERLING;
break;
}
}
}
- iconv_close(ucs_to_wchar_conv);
- iconv_close(wchar_to_ucs_conv);
+ iconv_close(ucs2_to_nativecharset);
+ iconv_close(nativecharset_to_ucs2);
iconv_close(font_conv);
}
}
/* update key and modifier state */
- change_bit(qcode, kbd->keys);
+ if (down) {
+ set_bit(qcode, kbd->keys);
+ } else {
+ clear_bit(qcode, kbd->keys);
+ }
switch (qcode) {
case Q_KEY_CODE_SHIFT:
case Q_KEY_CODE_SHIFT_R:
/* compatibility wrapper */
int unix_listen(const char *str, Error **errp)
{
- char *path, *optstr;
- int sock, len;
UnixSocketAddress *saddr;
+ int sock;
saddr = g_new0(UnixSocketAddress, 1);
-
- optstr = strchr(str, ',');
- if (optstr) {
- len = optstr - str;
- if (len) {
- path = g_malloc(len+1);
- snprintf(path, len+1, "%.*s", len, str);
- saddr->path = path;
- }
- } else {
- saddr->path = g_strdup(str);
- }
-
+ saddr->path = g_strdup(str);
sock = unix_listen_saddr(saddr, errp);
-
qapi_free_UnixSocketAddress(saddr);
return sock;
}
static const VGAInterfaceInfo vga_interfaces[VGA_TYPE_MAX] = {
[VGA_NONE] = {
.opt_name = "none",
+ .name = "no graphic card",
},
[VGA_STD] = {
.opt_name = "std",
},
[VGA_XENFB] = {
.opt_name = "xenfb",
+ .name = "Xen paravirtualized framebuffer",
},
};
QemuOpts *fsdev;
QemuOpts *device;
+ warn_report("'-virtfs_synth' is deprecated, please use "
+ "'-fsdev synth' and '-device virtio-9p-...' "
+ "instead");
+
fsdev = qemu_opts_create(qemu_find_opts("fsdev"), "v_synth",
1, NULL);
if (!fsdev) {
qtest_log = optarg;
break;
case QEMU_OPTION_sandbox:
-#ifdef CONFIG_SECCOMP
- opts = qemu_opts_parse_noisily(qemu_find_opts("sandbox"),
- optarg, true);
+ olist = qemu_find_opts("sandbox");
+ if (!olist) {
+#ifndef CONFIG_SECCOMP
+ error_report("-sandbox support is not enabled "
+ "in this QEMU binary");
+#endif
+ exit(1);
+ }
+
+ opts = qemu_opts_parse_noisily(olist, optarg, true);
if (!opts) {
exit(1);
}
-#else
- error_report("-sandbox support is not enabled "
- "in this QEMU binary");
- exit(1);
-#endif
break;
case QEMU_OPTION_add_fd:
#ifndef _WIN32
}
break;
case QEMU_OPTION_realtime:
+ warn_report("'-realtime mlock=...' is deprecated, please use "
+ "'-overcommit mem-lock=...' instead");
opts = qemu_opts_parse_noisily(qemu_find_opts("realtime"),
optarg, false);
if (!opts) {