Unicode dirPath = NULL;
Unicode canonPath = NULL; /* result */
+ /*
+ * XXX PR 983286 XXX
+ * Posix_Statfs call seems to take non-trivial time (~50msec for each call)
+ * which is introducing delays in "many vm" tests. Short circuiting this
+ * function till we come up with a better alternate.
+ */
+ goto use_same_path;
+
/*
* absVMDirName should start with /vmfs/volumes/.
*/
}
ret = ioctl(fd, IOCTLCMD_VMFS_FS_GET_ATTR, (char *) *fsAttrs);
-
- close(fd);
if (ret == -1) {
Log(LGPFX" %s: Could not get volume attributes (ret = %d): %s\n",
__func__, ret, Err_Errno2String(errno));
*fsAttrs = NULL;
}
+ close(fd);
+
bail:
Unicode_Free(fullPath);
Unicode_Free(parentPath);
}
+/*
+ *----------------------------------------------------------------------
+ *
+ * File_GetVMFSFSType --
+ *
+ * Get the filesystem type number of the file system on which the
+ * given file/directory resides.
+ *
+ * Caller can specify either a pathname or an already opened fd of
+ * the file/dir whose filesystem he wants to determine.
+ * 'fd' takes precedence over 'pathName' so 'pathName' is used only
+ * if 'fd' is -1.
+ *
+ * Results:
+ * On success : return value 0 and file type number in 'fsTypeNum'.
+ * On failure : return value -1 (errno will be set appropriately).
+ *
+ * Side effects:
+ * On failure errno will be set.
+ *
+ *----------------------------------------------------------------------
+ */
+
+int
+File_GetVMFSFSType(ConstUnicode pathName, // IN: File name to test
+ int fd, // IN: fd of an already opened file
+ uint16 *fsTypeNum) // OUT: Filesystem type number
+{
+ int ret, savedErrno;
+ Bool fdArg = (fd >= 0); /* fd or pathname ? */
+
+ if (!fsTypeNum || (!fdArg && !pathName)) {
+ savedErrno = EINVAL;
+ goto exit;
+ }
+
+ if (!fdArg) {
+ fd = Posix_Open(pathName, O_RDONLY, 0);
+ if (fd < 0) {
+ savedErrno = errno;
+ Log(LGPFX" %s : Could not open %s : %s\n", __func__, UTF8(pathName),
+ Err_Errno2String(savedErrno));
+ goto exit;
+ }
+ }
+
+ ret = ioctl(fd, IOCTLCMD_VMFS_GET_FSTYPE, fsTypeNum);
+ /*
+ * Save errno to avoid close() affecting it.
+ */
+ savedErrno = errno;
+ if (!fdArg) {
+ close(fd);
+ }
+
+ if (ret == -1) {
+ Log(LGPFX" %s : Could not get filesystem type for %s (fd %d) : %s\n",
+ __func__, (!fdArg ? UTF8(pathName) : "__na__"), fd,
+ Err_Errno2String(savedErrno));
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ errno = savedErrno;
+ ASSERT(errno != 0);
+ return -1;
+}
+
+
/*
*----------------------------------------------------------------------
*
int File_GetVMFSAttributes(ConstUnicode pathName,
struct FS_PartitionListResult **fsAttrs);
+int File_GetVMFSFSType(ConstUnicode pathName,
+ int fd,
+ uint16 *fsTypeNum);
int File_GetVMFSVersion(ConstUnicode pathName,
uint32 *versionNum);
int File_GetVMFSBlockSize(ConstUnicode pathName,
#define ATOMIC_USE_FENCE
#endif
+/*
+ * Starting with vSphere 2014, we no longer support ESX on AMD Rev F.
+ * Thus, we can eliminate all dynamic checks for whether to enable
+ * the Errata 147 work-around when compiling many of our binaries.
+ * However, we use an opt-in approach here rather than assuming all
+ * parts of our builds are safe. For example, the "fdm" binary from
+ * a new build may time travel back to hosts running older versions
+ * of ESX on Rev F, so "fdm" continues to require the ability to
+ * dynamically enable the errata work-around. With vSphere 2017,
+ * this will no longer be required as the oldest version of ESX that
+ * VC 2017 will support is ESX 2014 (which won't run on Rev F).
+ */
+#if (!defined(VMX86_SERVER) || \
+ (!defined(VMX86_VMX) && !defined(VMKERNEL) && \
+ !defined(VMM) && !defined(VMCORE)))
+#define MAY_NEED_AMD_REVF_WORKAROUND 1
+#else
+#define MAY_NEED_AMD_REVF_WORKAROUND 0
+#endif
+
+#if MAY_NEED_AMD_REVF_WORKAROUND
#if defined(VMATOMIC_IMPORT_DLLDATA)
VMX86_EXTERN_DATA Bool AtomicUseFence;
#else
EXTERN Bool AtomicUseFence;
#endif
-
EXTERN Bool atomicFenceInitialized;
+#else /* MAY_NEED_AMD_REVF_WORKAROUND */
+#define AtomicUseFence FALSE
+#define atomicFenceInitialized TRUE
+#endif /* MAY_NEED_AMD_REVF_WORKAROUND */
+
void AtomicInitFence(void);
Atomic_Init(void)
{
#ifdef ATOMIC_USE_FENCE
- if (!atomicFenceInitialized) {
+ if (MAY_NEED_AMD_REVF_WORKAROUND && !atomicFenceInitialized) {
AtomicInitFence();
}
#endif
}
static INLINE void
-Atomic_SetFence(Bool fenceAfterLock) /* IN: TRUE to enable lfence */
- /* FALSE to disable. */
+Atomic_SetFence(Bool fenceAfterLock)
{
+ (void)fenceAfterLock; /* Work around unused parameter. */
+#if MAY_NEED_AMD_REVF_WORKAROUND
AtomicUseFence = fenceAfterLock;
-#if defined(__VMKERNEL__)
- extern void Atomic_SetFenceVMKAPI(Bool fenceAfterLock);
- Atomic_SetFenceVMKAPI(fenceAfterLock);
-#endif
atomicFenceInitialized = TRUE;
+#endif
}
static INLINE void
AtomicEpilogue(void)
{
-#ifdef ATOMIC_USE_FENCE
+#if MAY_NEED_AMD_REVF_WORKAROUND && defined(ATOMIC_USE_FENCE)
#ifdef VMM
/* The monitor conditionally patches out the lfence when not needed.*/
/* Construct a MonitorPatchTextEntry in the .patchtext section. */
- asm volatile ("1:\n\t"
- "lfence\n\t"
- "2:\n\t"
- ".pushsection .patchtext\n\t"
- ".quad 1b\n\t"
- ".quad 2b\n\t"
- ".popsection\n\t" ::: "memory");
+ asm volatile ("1:\n\t"
+ "lfence\n\t"
+ "2:\n\t"
+ ".pushsection .patchtext\n\t"
+ ".quad 1b\n\t"
+ ".quad 2b\n\t"
+ ".popsection\n\t" ::: "memory");
#else
- if (UNLIKELY(AtomicUseFence)) {
- asm volatile ("lfence" ::: "memory");
- }
+ if (UNLIKELY(AtomicUseFence)) {
+ asm volatile ("lfence" ::: "memory");
+ }
#endif
#endif
}
#define FUSION_VERSION "e.x.p"
#define BOOMERANG_VERSION "e.x.p"
-#define HBR_SERVER_VERSION "e.x.p"
// These must match VIE_FILEVERSION above
#define SYSIMAGE_VERSION "6.0.0"
#elif defined(VMX86_BOOMERANG)
# define PRODUCT_VERSION_NUMBER BOOMERANG_VERSION
#elif defined(VMX86_HBR_SERVER)
-# define PRODUCT_VERSION_NUMBER HBR_SERVER_VERSION
+# define PRODUCT_VERSION_NUMBER ESX_VERSION
#elif defined(VMX86_VIEW)
# define PRODUCT_VERSION_NUMBER VIEW_VERSION
#endif
}
}
- if (vmx86_debug && !MXRecLockIsOwner(&lock->recursiveLock)) {
- int lockCount = MXRecLockCount(&lock->recursiveLock);
+ if (vmx86_debug) {
+ if (MXRecLockCount(&lock->recursiveLock) == 0) {
+ MXUserDumpAndPanic(&lock->header,
+ "%s: Release of an unacquired exclusive lock\n",
+ __FUNCTION__);
+ }
- MXUserDumpAndPanic(&lock->header,
- "%s: Non-owner release of an %s exclusive lock\n",
- __FUNCTION__,
- lockCount == 0 ? "unacquired" : "acquired");
+ if (!MXRecLockIsOwner(&lock->recursiveLock)) {
+ MXUserDumpAndPanic(&lock->header,
+ "%s: Non-owner release of a exclusive lock\n",
+ __FUNCTION__);
+ }
}
MXUserReleaseTracking(&lock->header);
}
if (UNLIKELY(myContext->state == RW_UNLOCKED)) {
- uint32 lockCount = Atomic_Read(&lock->holderCount);
-
MXUserDumpAndPanic(&lock->header,
- "%s: Non-owner release of an %s read-write lock\n",
- __FUNCTION__,
- lockCount == 0 ? "unacquired" : "acquired");
+ "%s: Release of an unacquired read-write lock\n",
+ __FUNCTION__);
}
MXUserReleaseTracking(&lock->header);
}
}
- if (vmx86_debug && !MXRecLockIsOwner(&lock->recursiveLock)) {
- int lockCount = MXRecLockCount(&lock->recursiveLock);
+ if (vmx86_debug) {
+ if (MXRecLockCount(&lock->recursiveLock) == 0) {
+ MXUserDumpAndPanic(&lock->header,
+ "%s: Release of an unacquired recursive lock\n",
+ __FUNCTION__);
+ }
- MXUserDumpAndPanic(&lock->header,
- "%s: Non-owner release of an %s recursive lock\n",
- __FUNCTION__,
- lockCount == 0 ? "unacquired" : "acquired");
+ if (!MXRecLockIsOwner(&lock->recursiveLock)) {
+ MXUserDumpAndPanic(&lock->header,
+ "%s: Non-owner release of an recursive lock\n",
+ __FUNCTION__);
+ }
}
MXUserReleaseTracking(&lock->header);
*
* Support for atomic instructions.
*
- * This is the user-level version.
+ * This is the user-level and vmkernel version.
* The monitor-only version is in vmcore/vmm/main.
*/
#include "vmk_exports.h"
+/*
+ * AMD Rev E/F CPUs suffer from erratum 147 (see AMD docs). Our work-around
+ * is to execute a "fence" after every atomic instruction. Since this is
+ * expensive we conditionalize on "AtomicUseFence".
+ * ESX no longer supports any of the CPUs, so for SERVER builds neither
+ * the vmx nor the vmkernel define these variables in order to force all
+ * code in these (performance critical) components to use the constant
+ * version of AtomicUseFence from vm_atomic.h.
+ * For other components, we continue to define the variables to allow the
+ * code to work whether it is compiled with VMX86_SERVER set or not. This
+ * is conservative but the performance penalty should be minimal. And it
+ * is a (longer term) temporary sitation: when we eventually remove Rev F
+ * support from our hosted products, this will all go away.
+ */
+#if !defined(VMKERNEL) && !(defined(VMX86_VMX) && defined(VMX86_SERVER))
+#undef AtomicUseFence
+#undef atomicFenceInitialized
Bool AtomicUseFence;
-VMK_KERNEL_EXPORT(AtomicUseFence);
-
Bool atomicFenceInitialized;
+#endif
/*
AtomicInitFence(void)
{
Bool needFence = FALSE;
-#if defined(__i386__) || defined(__x86_64__)
+#if MAY_NEED_AMD_REVF_WORKAROUND && (defined(__i386__) || defined(__x86_64__))
{
CPUIDRegs regs;
/*
* capabilities - not all of these are implemented in the virtual HW
- * (eg VLAN support is in the virtual switch) so even vlance
+ * (eg VLAN support is in the virtual switch) so even vlance
* can use them
*/
-#define VMNET_CAP_SG 0x0001 /* Can do scatter-gather transmits. */
-#define VMNET_CAP_IP4_CSUM 0x0002 /* Can checksum only TCP/UDP over IPv4. */
-#define VMNET_CAP_HW_CSUM 0x0004 /* Can checksum all packets. */
-#define VMNET_CAP_HIGH_DMA 0x0008 /* Can DMA to high memory. */
-#define VMNET_CAP_TOE 0x0010 /* Supports TCP/IP offload. */
-#define VMNET_CAP_TSO 0x0020 /* Supports TCP Segmentation offload */
-#define VMNET_CAP_SW_TSO 0x0040 /* Supports SW TCP Segmentation */
-#define VMNET_CAP_VMXNET_APROM 0x0080 /* Vmxnet APROM support */
-#define VMNET_CAP_HW_TX_VLAN 0x0100 /* Can we do VLAN tagging in HW */
-#define VMNET_CAP_HW_RX_VLAN 0x0200 /* Can we do VLAN untagging in HW */
-#define VMNET_CAP_SW_VLAN 0x0400 /* Can we do VLAN tagging/untagging in SW */
-#define VMNET_CAP_WAKE_PCKT_RCV 0x0800 /* Can wake on network packet recv? */
-#define VMNET_CAP_ENABLE_INT_INLINE 0x1000 /* Enable Interrupt Inline */
-#define VMNET_CAP_ENABLE_HEADER_COPY 0x2000 /* copy header for vmkernel */
-#define VMNET_CAP_TX_CHAIN 0x4000 /* Guest can use multiple tx entries for a pkt */
-#define VMNET_CAP_RX_CHAIN 0x8000 /* a pkt can span multiple rx entries */
-#define VMNET_CAP_LPD 0x10000 /* large pkt delivery */
-#define VMNET_CAP_BPF 0x20000 /* BPF Support in VMXNET Virtual Hardware */
-#define VMNET_CAP_SG_SPAN_PAGES 0x40000 /* Can do scatter-gather span multiple pages transmits. */
-#define VMNET_CAP_IP6_CSUM 0x80000 /* Can do IPv6 csum offload. */
-#define VMNET_CAP_TSO6 0x100000 /* Can do TSO segmentation offload for IPv6 pkts. */
-#define VMNET_CAP_TSO256k 0x200000 /* Can do TSO segmentation offload for pkts up to 256kB. */
-#define VMNET_CAP_UPT 0x400000 /* Support UPT */
-#define VMNET_CAP_RDONLY_INETHDRS 0x800000 /* Modifies inet headers for TSO/CSUm */
-#define VMNET_CAP_ENCAP 0x1000000 /* NPA not used, so redefining for ENCAP support */
-#define VMNET_CAP_DCB 0x2000000 /* Support DCB */
-#define VMNET_CAP_OFFLOAD_8OFFSET 0x4000000 /* supports 8bit parameterized offsets */
-#define VMNET_CAP_OFFLOAD_16OFFSET 0x8000000 /* supports 16bit parameterized offsets */
-#define VMNET_CAP_IP6_CSUM_EXT_HDRS 0x10000000 /* support csum of ip6 ext hdrs */
-#define VMNET_CAP_TSO6_EXT_HDRS 0x20000000 /* support TSO for ip6 ext hdrs */
-#define VMNET_CAP_SCHED 0x40000000 /* compliant with network scheduling */
-#define VMNET_CAP_SRIOV 0x80000000 /* Supports SR-IOV */
+#define VMNET_CAP_SG CONST64U(0x0001) /* Can do scatter-gather transmits. */
+#define VMNET_CAP_IP4_CSUM CONST64U(0x0002) /* Can checksum only TCP/UDP over IPv4. */
+#define VMNET_CAP_HW_CSUM CONST64U(0x0004) /* Can checksum all packets. */
+#define VMNET_CAP_HIGH_DMA CONST64U(0x0008) /* Can DMA to high memory. */
+#define VMNET_CAP_TOE CONST64U(0x0010) /* Supports TCP/IP offload. */
+#define VMNET_CAP_TSO CONST64U(0x0020) /* Supports TCP Segmentation offload */
+#define VMNET_CAP_SW_TSO CONST64U(0x0040) /* Supports SW TCP Segmentation */
+#define VMNET_CAP_VMXNET_APROM CONST64U(0x0080) /* Vmxnet APROM support */
+#define VMNET_CAP_HW_TX_VLAN CONST64U(0x0100) /* Can we do VLAN tagging in HW */
+#define VMNET_CAP_HW_RX_VLAN CONST64U(0x0200) /* Can we do VLAN untagging in HW */
+#define VMNET_CAP_SW_VLAN CONST64U(0x0400) /* Can we do VLAN tagging/untagging in SW */
+#define VMNET_CAP_WAKE_PCKT_RCV CONST64U(0x0800) /* Can wake on network packet recv? */
+#define VMNET_CAP_ENABLE_INT_INLINE CONST64U(0x1000) /* Enable Interrupt Inline */
+#define VMNET_CAP_ENABLE_HEADER_COPY CONST64U(0x2000) /* copy header for vmkernel */
+#define VMNET_CAP_TX_CHAIN CONST64U(0x4000) /* Guest can use multiple tx entries for a pkt */
+#define VMNET_CAP_RX_CHAIN CONST64U(0x8000) /* a pkt can span multiple rx entries */
+#define VMNET_CAP_LPD CONST64U(0x10000) /* large pkt delivery */
+#define VMNET_CAP_BPF CONST64U(0x20000) /* BPF Support in VMXNET Virtual Hardware */
+#define VMNET_CAP_SG_SPAN_PAGES CONST64U(0x40000) /* Can do scatter-gather span multiple pages transmits. */
+#define VMNET_CAP_IP6_CSUM CONST64U(0x80000) /* Can do IPv6 csum offload. */
+#define VMNET_CAP_TSO6 CONST64U(0x100000) /* Can do TSO segmentation offload for IPv6 pkts. */
+#define VMNET_CAP_TSO256k CONST64U(0x200000) /* Can do TSO segmentation offload for pkts up to 256kB. */
+#define VMNET_CAP_UPT CONST64U(0x400000) /* Support UPT */
+#define VMNET_CAP_RDONLY_INETHDRS CONST64U(0x800000) /* Modifies inet headers for TSO/CSUm */
+#define VMNET_CAP_ENCAP CONST64U(0x1000000) /* NPA not used, so redefining for ENCAP support */
+#define VMNET_CAP_DCB CONST64U(0x2000000) /* Support DCB */
+#define VMNET_CAP_OFFLOAD_8OFFSET CONST64U(0x4000000) /* supports 8bit parameterized offsets */
+#define VMNET_CAP_OFFLOAD_16OFFSET CONST64U(0x8000000) /* supports 16bit parameterized offsets */
+#define VMNET_CAP_IP6_CSUM_EXT_HDRS CONST64U(0x10000000) /* support csum of ip6 ext hdrs */
+#define VMNET_CAP_TSO6_EXT_HDRS CONST64U(0x20000000) /* support TSO for ip6 ext hdrs */
+#define VMNET_CAP_SCHED CONST64U(0x40000000) /* compliant with network scheduling */
+#define VMNET_CAP_SRIOV CONST64U(0x80000000) /* Supports SR-IOV */
#define VMNET_CAP_SG_TX VMNET_CAP_SG
-#define VMNET_CAP_SG_RX 0x200000000UL /* Scatter-gather receive capability */
-#define VMNET_CAP_PRIV_STATS 0x400000000UL /* Driver supports accessing private stats */
-#define VMNET_CAP_LINK_STATUS_SET 0x800000000UL /* Driver supports changing link status */
-#define VMNET_CAP_MAC_ADDR_SET 0x1000000000UL /* Driver supports changing the interface MAC address */
-#define VMNET_CAP_COALESCE_PARAMS 0x2000000000UL /* Driver supports changing interrupt coalescing parameters */
-#define VMNET_CAP_VLAN_FILTER 0x4000000000UL /* VLAN Filtering capability */
-#define VMNET_CAP_WAKE_ON_LAN 0x8000000000UL /* Wake-On-LAN capability */
-#define VMNET_CAP_NETWORK_DUMP 0x10000000000UL /* Network core dumping capability */
-#define VMNET_CAP_MULTI_QUEUE 0x20000000000UL /* Multiple queue capability */
-#define VMNET_CAP_EEPROM 0x40000000000UL /* EEPROM dump capability */
-#define VMNET_CAP_REGDUMP 0x80000000000UL /* Register dump capability */
-#define VMNET_CAP_SELF_TEST 0x100000000000UL /* Self-test capability */
-#define VMNET_CAP_PAUSE_PARAMS 0x200000000000UL /* Pause frame parameter adjusting */
-#define VMNET_CAP_RESTART_NEG 0x400000000000UL /* Ability to restart negotiation of link speed/duplexity */
-#define VMNET_CAP_LRO 0x800000000000UL /* Hardware supported LRO */
-#define VMNET_CAP_LEGACY 0x8000000000000000UL /* Uplink is compatible with vmklinux drivers */
+#define VMNET_CAP_SG_RX CONST64U(0x200000000) /* Scatter-gather receive capability */
+#define VMNET_CAP_PRIV_STATS CONST64U(0x400000000) /* Driver supports accessing private stats */
+#define VMNET_CAP_LINK_STATUS_SET CONST64U(0x800000000) /* Driver supports changing link status */
+#define VMNET_CAP_MAC_ADDR_SET CONST64U(0x1000000000) /* Driver supports changing the interface MAC address */
+#define VMNET_CAP_COALESCE_PARAMS CONST64U(0x2000000000) /* Driver supports changing interrupt coalescing parameters */
+#define VMNET_CAP_VLAN_FILTER CONST64U(0x4000000000) /* VLAN Filtering capability */
+#define VMNET_CAP_WAKE_ON_LAN CONST64U(0x8000000000) /* Wake-On-LAN capability */
+#define VMNET_CAP_NETWORK_DUMP CONST64U(0x10000000000) /* Network core dumping capability */
+#define VMNET_CAP_MULTI_QUEUE CONST64U(0x20000000000) /* Multiple queue capability */
+#define VMNET_CAP_EEPROM CONST64U(0x40000000000) /* EEPROM dump capability */
+#define VMNET_CAP_REGDUMP CONST64U(0x80000000000) /* Register dump capability */
+#define VMNET_CAP_SELF_TEST CONST64U(0x100000000000) /* Self-test capability */
+#define VMNET_CAP_PAUSE_PARAMS CONST64U(0x200000000000) /* Pause frame parameter adjusting */
+#define VMNET_CAP_RESTART_NEG CONST64U(0x400000000000) /* Ability to restart negotiation of link speed/duplexity */
+#define VMNET_CAP_LRO CONST64U(0x800000000000) /* Hardware supported LRO */
+#define VMNET_CAP_OFFLOAD_ALIGN_ANY CONST64U(0x1000000000000) /* Nic requires no header alignment */
+#define VMNET_CAP_LEGACY CONST64U(0x8000000000000000) /* Uplink is compatible with vmklinux drivers */
#endif // _VMNET_DEF_H_