From: VMware, Inc <> Date: Tue, 26 Apr 2011 21:14:32 +0000 (-0700) Subject: On running queuepairAllocTest24_l test on Ubuntu9 guest, the guest goes unresponsive X-Git-Tag: 2011.04.25-402641~20 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=6dbefb2f893af326414b4cf71aae1100597e08b7;p=thirdparty%2Fopen-vm-tools.git On running queuepairAllocTest24_l test on Ubuntu9 guest, the guest goes unresponsive We try to allocate the queuepair pages before we make the hypercall (which would eventually hit the restriction in the device). On Windows and Mac OS, we can't allocate a single contiguous block this big, so the allocation fails immediately. On Linux, we allocate each page separately, so what happens is that the system starts thrashing and becomes unresponsive. Signed-off-by: Marcelo Vanzin --- diff --git a/open-vm-tools/modules/linux/vmci/common/vmciQPair.c b/open-vm-tools/modules/linux/vmci/common/vmciQPair.c index 5c4bc92bf..6be0b6a1e 100644 --- a/open-vm-tools/modules/linux/vmci/common/vmciQPair.c +++ b/open-vm-tools/modules/linux/vmci/common/vmciQPair.c @@ -128,6 +128,27 @@ VMCIQPair_Alloc(VMCIQPair **qpair, // OUT VMCIHandle dst = VMCI_MAKE_HANDLE(peer, VMCI_INVALID_ID); VMCIRoute route; + /* + * Restrict the size of a queuepair. The device already enforces a limit + * on the total amount of memory that can be allocated to queuepairs for a + * guest. However, we try to allocate this memory before we make the + * queuepair allocation hypercall. On Windows and Mac OS, we request a + * single, continguous block, and it will fail if the OS cannot satisfy the + * request. On Linux, we allocate each page separately, which means rather + * than fail, the guest will thrash while it tries to allocate, and will + * become increasingly unresponsive to the point where it appears to be hung. + * So we place a limit on the size of an individual queuepair here, and + * leave the device to enforce the restriction on total queuepair memory. + * (Note that this doesn't prevent all cases; a user with only this much + * physical memory could still get into trouble.) The error used by the + * device is NO_RESOURCES, so use that here too. + */ + + if (produceQSize + consumeQSize < MAX(produceQSize, consumeQSize) || + produceQSize + consumeQSize > VMCI_MAX_GUEST_QP_MEMORY) { + return VMCI_ERROR_NO_RESOURCES; + } + myQPair = VMCI_AllocKernelMem(sizeof *myQPair, VMCI_MEMORY_NONPAGED); if (!myQPair) { return VMCI_ERROR_NO_MEM; diff --git a/open-vm-tools/modules/linux/vmci/linux/vmciKernelIf.c b/open-vm-tools/modules/linux/vmci/linux/vmciKernelIf.c index a4aa6e1f9..ea414a9b0 100644 --- a/open-vm-tools/modules/linux/vmci/linux/vmciKernelIf.c +++ b/open-vm-tools/modules/linux/vmci/linux/vmciKernelIf.c @@ -888,6 +888,17 @@ VMCI_AllocQueue(uint64 size) // IN: size of queue (not including header) sizeof *queue + sizeof *(queue->kernelIf) + numDataPages * sizeof *(queue->kernelIf->page); + /* + * Size should be enforced by VMCIQPair_Alloc(), double-check here. + * Allocating too much on Linux can cause the system to become + * unresponsive, because we allocate page-by-page, and we allow the + * system to wait for pages rather than fail. + */ + if (size > VMCI_MAX_GUEST_QP_MEMORY) { + ASSERT(FALSE); + return NULL; + } + qHeader = (VMCIQueueHeader *)vmalloc(queueSize); if (!qHeader) { return NULL; diff --git a/open-vm-tools/modules/linux/vmci/linux/vmci_version.h b/open-vm-tools/modules/linux/vmci/linux/vmci_version.h index 27ed5b328..6884259eb 100644 --- a/open-vm-tools/modules/linux/vmci/linux/vmci_version.h +++ b/open-vm-tools/modules/linux/vmci/linux/vmci_version.h @@ -25,8 +25,8 @@ #ifndef _VMCI_VERSION_H_ #define _VMCI_VERSION_H_ -#define VMCI_DRIVER_VERSION 9.1.15.0 -#define VMCI_DRIVER_VERSION_COMMAS 9,1,15,0 -#define VMCI_DRIVER_VERSION_STRING "9.1.15.0" +#define VMCI_DRIVER_VERSION 9.1.16.0 +#define VMCI_DRIVER_VERSION_COMMAS 9,1,16,0 +#define VMCI_DRIVER_VERSION_STRING "9.1.16.0" #endif /* _VMCI_VERSION_H_ */