#include "hgfsDirNotify.h"
#include "hgfsTransport.h"
#include "userlock.h"
+#include "poll.h"
#if defined(_WIN32)
#include <io.h>
/* Session related callbacks. */
static void HgfsServerSessionReceive(HgfsPacket *packet,
- void *clientData,
- HgfsReceiveFlags flags);
+ void *clientData);
static Bool HgfsServerSessionConnect(void *transportData,
HgfsServerChannelCallbacks *channelCbTable,
void **clientData);
uint32 payloadSize,
HgfsHeader const *packetIn,
HgfsHeader *header);
+static void HgfsServer_ProcessRequest(void *data);
+void HgfsServer_ReplyWithError(HgfsPacket *packet,
+ const char *metaPacket,
+ HgfsStatus status,
+ Bool v4header,
+ HgfsSessionInfo *session);
+
/*
*----------------------------------------------------------------------------
/* Minimal size of the request packet */
unsigned int minReqSize;
+
+ /* How do you process the request {sync, async} ? */
+ RequestHint reqType;
+
} const handlers[] = {
- { HgfsServerOpen, sizeof (HgfsRequestOpen) },
- { HgfsServerRead, sizeof (HgfsRequestRead) },
- { HgfsServerWrite, sizeof (HgfsRequestWrite) },
- { HgfsServerClose, sizeof (HgfsRequestClose) },
- { HgfsServerSearchOpen, sizeof (HgfsRequestSearchOpen) },
- { HgfsServerSearchRead, sizeof (HgfsRequestSearchRead) },
- { HgfsServerSearchClose, sizeof (HgfsRequestSearchClose) },
- { HgfsServerGetattr, sizeof (HgfsRequestGetattr) },
- { HgfsServerSetattr, sizeof (HgfsRequestSetattr) },
- { HgfsServerCreateDir, sizeof (HgfsRequestCreateDir) },
- { HgfsServerDeleteFile, sizeof (HgfsRequestDelete) },
- { HgfsServerDeleteDir, sizeof (HgfsRequestDelete) },
- { HgfsServerRename, sizeof (HgfsRequestRename) },
- { HgfsServerQueryVolume, sizeof (HgfsRequestQueryVolume) },
-
- { HgfsServerOpen, sizeof (HgfsRequestOpenV2) },
- { HgfsServerGetattr, sizeof (HgfsRequestGetattrV2) },
- { HgfsServerSetattr, sizeof (HgfsRequestSetattrV2) },
- { HgfsServerSearchRead, sizeof (HgfsRequestSearchReadV2) },
- { HgfsServerSymlinkCreate, sizeof (HgfsRequestSymlinkCreate) },
- { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange) },
- { HgfsServerCreateDir, sizeof (HgfsRequestCreateDirV2) },
- { HgfsServerDeleteFile, sizeof (HgfsRequestDeleteV2) },
- { HgfsServerDeleteDir, sizeof (HgfsRequestDeleteV2) },
- { HgfsServerRename, sizeof (HgfsRequestRenameV2) },
-
- { HgfsServerOpen, HGFS_SIZEOF_OP(HgfsRequestOpenV3) },
- { HgfsServerRead, HGFS_SIZEOF_OP(HgfsRequestReadV3) },
- { HgfsServerWrite, HGFS_SIZEOF_OP(HgfsRequestWriteV3) },
- { HgfsServerClose, HGFS_SIZEOF_OP(HgfsRequestCloseV3) },
- { HgfsServerSearchOpen, HGFS_SIZEOF_OP(HgfsRequestSearchOpenV3) },
- { HgfsServerSearchRead, HGFS_SIZEOF_OP(HgfsRequestSearchReadV3) },
- { HgfsServerSearchClose, HGFS_SIZEOF_OP(HgfsRequestSearchCloseV3) },
- { HgfsServerGetattr, HGFS_SIZEOF_OP(HgfsRequestGetattrV3) },
- { HgfsServerSetattr, HGFS_SIZEOF_OP(HgfsRequestSetattrV3) },
- { HgfsServerCreateDir, HGFS_SIZEOF_OP(HgfsRequestCreateDirV3) },
- { HgfsServerDeleteFile, HGFS_SIZEOF_OP(HgfsRequestDeleteV3) },
- { HgfsServerDeleteDir, HGFS_SIZEOF_OP(HgfsRequestDeleteV3) },
- { HgfsServerRename, HGFS_SIZEOF_OP(HgfsRequestRenameV3) },
- { HgfsServerQueryVolume, HGFS_SIZEOF_OP(HgfsRequestQueryVolumeV3) },
- { HgfsServerSymlinkCreate, HGFS_SIZEOF_OP(HgfsRequestSymlinkCreateV3) },
- { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange) },
- { HgfsServerWriteWin32Stream, HGFS_SIZEOF_OP(HgfsRequestWriteWin32StreamV3) },
+ { HgfsServerOpen, sizeof (HgfsRequestOpen), REQ_SYNC },
+ { HgfsServerRead, sizeof (HgfsRequestRead), REQ_SYNC },
+ { HgfsServerWrite, sizeof (HgfsRequestWrite), REQ_SYNC },
+ { HgfsServerClose, sizeof (HgfsRequestClose), REQ_SYNC },
+ { HgfsServerSearchOpen, sizeof (HgfsRequestSearchOpen), REQ_SYNC },
+ { HgfsServerSearchRead, sizeof (HgfsRequestSearchRead), REQ_SYNC },
+ { HgfsServerSearchClose, sizeof (HgfsRequestSearchClose), REQ_SYNC },
+ { HgfsServerGetattr, sizeof (HgfsRequestGetattr), REQ_SYNC },
+ { HgfsServerSetattr, sizeof (HgfsRequestSetattr), REQ_SYNC },
+ { HgfsServerCreateDir, sizeof (HgfsRequestCreateDir), REQ_SYNC },
+ { HgfsServerDeleteFile, sizeof (HgfsRequestDelete), REQ_SYNC },
+ { HgfsServerDeleteDir, sizeof (HgfsRequestDelete), REQ_SYNC },
+ { HgfsServerRename, sizeof (HgfsRequestRename), REQ_SYNC },
+ { HgfsServerQueryVolume, sizeof (HgfsRequestQueryVolume), REQ_SYNC },
+
+ { HgfsServerOpen, sizeof (HgfsRequestOpenV2), REQ_SYNC },
+ { HgfsServerGetattr, sizeof (HgfsRequestGetattrV2), REQ_SYNC },
+ { HgfsServerSetattr, sizeof (HgfsRequestSetattrV2), REQ_SYNC },
+ { HgfsServerSearchRead, sizeof (HgfsRequestSearchReadV2), REQ_SYNC },
+ { HgfsServerSymlinkCreate, sizeof (HgfsRequestSymlinkCreate), REQ_SYNC },
+ { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange), REQ_SYNC },
+ { HgfsServerCreateDir, sizeof (HgfsRequestCreateDirV2), REQ_SYNC },
+ { HgfsServerDeleteFile, sizeof (HgfsRequestDeleteV2), REQ_SYNC },
+ { HgfsServerDeleteDir, sizeof (HgfsRequestDeleteV2), REQ_SYNC },
+ { HgfsServerRename, sizeof (HgfsRequestRenameV2), REQ_SYNC },
+
+ { HgfsServerOpen, HGFS_SIZEOF_OP(HgfsRequestOpenV3), REQ_SYNC },
+ { HgfsServerRead, HGFS_SIZEOF_OP(HgfsRequestReadV3), REQ_SYNC },
+ { HgfsServerWrite, HGFS_SIZEOF_OP(HgfsRequestWriteV3), REQ_SYNC },
+ { HgfsServerClose, HGFS_SIZEOF_OP(HgfsRequestCloseV3), REQ_SYNC },
+ { HgfsServerSearchOpen, HGFS_SIZEOF_OP(HgfsRequestSearchOpenV3), REQ_SYNC },
+ { HgfsServerSearchRead, HGFS_SIZEOF_OP(HgfsRequestSearchReadV3), REQ_SYNC },
+ { HgfsServerSearchClose, HGFS_SIZEOF_OP(HgfsRequestSearchCloseV3), REQ_SYNC },
+ { HgfsServerGetattr, HGFS_SIZEOF_OP(HgfsRequestGetattrV3), REQ_SYNC },
+ { HgfsServerSetattr, HGFS_SIZEOF_OP(HgfsRequestSetattrV3), REQ_SYNC },
+ { HgfsServerCreateDir, HGFS_SIZEOF_OP(HgfsRequestCreateDirV3), REQ_SYNC },
+ { HgfsServerDeleteFile, HGFS_SIZEOF_OP(HgfsRequestDeleteV3), REQ_SYNC },
+ { HgfsServerDeleteDir, HGFS_SIZEOF_OP(HgfsRequestDeleteV3), REQ_SYNC },
+ { HgfsServerRename, HGFS_SIZEOF_OP(HgfsRequestRenameV3), REQ_SYNC },
+ { HgfsServerQueryVolume, HGFS_SIZEOF_OP(HgfsRequestQueryVolumeV3), REQ_SYNC },
+ { HgfsServerSymlinkCreate, HGFS_SIZEOF_OP(HgfsRequestSymlinkCreateV3), REQ_SYNC },
+ { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange), REQ_SYNC },
+ { HgfsServerWriteWin32Stream, HGFS_SIZEOF_OP(HgfsRequestWriteWin32StreamV3), REQ_SYNC },
/*
* XXX
* Will be replaced with the real thing when during merge with another outstanding
* For now just set min size big enough so request gets rejected when
* such request comes from the client.
*/
- { NULL, 0xffffff }, // Implemented in another change
- { NULL, 0xffffff }, // Implemented in another change
- { HgfsServerRead, HGFS_SIZEOF_OP(HgfsRequestReadV3) },
- { HgfsServerWrite, HGFS_SIZEOF_OP(HgfsRequestWriteV3) },
+ { NULL, 0xffffff, REQ_ASYNC }, // Implemented in another change
+ { NULL, 0xffffff, REQ_ASYNC }, // Implemented in another change
+ { HgfsServerRead, HGFS_SIZEOF_OP(HgfsRequestReadV3), REQ_SYNC },
+ { HgfsServerWrite, HGFS_SIZEOF_OP(HgfsRequestWriteV3), REQ_SYNC },
};
static void
HgfsServerSessionReceive(HgfsPacket *packet, // IN: Hgfs Packet
- void *clientData, // IN: session info
- HgfsReceiveFlags flags) // IN: flags to indicate processing
+ void *clientData) // IN: session info
{
HgfsSessionInfo *session = (HgfsSessionInfo *)clientData;
HgfsRequest *request;
HgfsOp op;
HgfsStatus status;
Bool v4header = FALSE;
- HgfsInputParam input;
+ HgfsInputParam *input;
size_t metaPacketSize;
char *metaPacket;
if (session->state == HGFS_SESSION_STATE_CLOSED) {
LOG(4, ("%s: %d: Received packet after disconnected.\n", __FUNCTION__,
__LINE__));
-
return;
}
/* Increment the session's reference count until we send the reply. */
HgfsServerSessionGet(session);
- id = request->id;
+ packet->id = id = request->id;
op = request->op;
/* If it is a V4 packet then handle it appropriately. */
- if (HGFS_V4_LEGACY_OPCODE == op) {
+ if (HGFS_V4_LEGACY_OPCODE == op) {
HgfsHeader *header = (HgfsHeader *)metaPacket;
if (metaPacketSize < sizeof *header) {
status = HGFS_STATUS_PROTOCOL_ERROR;
HGFS_ASSERT_MINIMUM_OP(op);
if (op < sizeof handlers / sizeof handlers[0]) {
if (metaPacketSize >= handlers[op].minReqSize) {
- HgfsInternalStatus internalStatus;
- input.metaPacket = metaPacket;
- input.metaPacketSize = metaPacketSize;
- input.session = session;
- input.packet = packet;
- internalStatus = (*handlers[op].handler)(&input);
- status = HgfsConvertFromInternalStatus(internalStatus);
+ input = Util_SafeMalloc(sizeof *input);
+ input->metaPacket = NULL;
+ input->metaPacketSize = 0;
+ input->session = session;
+ input->packet = packet;
+ input->v4header = v4header;
+ input->op = op;
+
+ /*
+ * Do the decision making here, whether we want to process request
+ * synchronously or asynchronously. Various factors to consider:
+ *
+ * - Use hints from the client, for instance, windows OS explicitly
+ * tells the file system whether request is async or not.
+ * - Determine statically - Simple to reason out, Simple to code
+ */
+ if (packet->supportsAsync &&
+ ((handlers[op].reqType == REQ_ASYNC) || HGFS_DEBUG_ASYNC)) {
+ /*
+ * Asynchronous processing is supported by the transport.
+ * We can release mappings here and reacquire when needed.
+ */
+ HSPU_PutMetaPacket(packet, session);
+ packet->processedAsync = TRUE;
+ LOG(4, ("%s: %d: @@Async\n", __FUNCTION__, __LINE__));
+#ifndef VMX86_TOOLS
+ /* Remove pending requests during poweroff */
+ Poll_Callback(POLL_CS_MAIN,
+ POLL_FLAG_REMOVE_AT_POWEROFF,
+ HgfsServer_ProcessRequest,
+ input,
+ POLL_REALTIME,
+ 1000,
+ NULL);
+#else
+ /* Tools code should never process request async */
+ ASSERT(0);
+#endif
+ /* free(input) in HgfsServer_ProcessRequest */
+ } else {
+ LOG(4, ("%s: %d: ##Sync\n", __FUNCTION__, __LINE__));
+ packet->processedAsync = FALSE;
+ input->metaPacket = metaPacket;
+ input->metaPacketSize = metaPacketSize;
+
+ HgfsServer_ProcessRequest(input);
+ /* free(input) in HgfsServer_ProcessRequest */
+ }
+ return;
} else {
/*
* The input packet is smaller than the minimal size needed for the
err:
/* Send error if we fail to process the op. */
if (status != HGFS_STATUS_SUCCESS) {
- char *packetOut;
- uint32 replySize;
- size_t replyPacketSize;
- if (v4header) {
- HgfsHeader *header;
- replyPacketSize = sizeof *header;
- header = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
- if (!header || sizeof *header > replyPacketSize) {
- /*
- * Transport should probably check for minimum hgfs packet size.
- * How should we send an error back if there is no meta packet ?
- */
- return;
- }
- HgfsPackReplyHeaderV4(status, 0, (HgfsHeader *)metaPacket, header);
- packetOut = (char *)header;
- replySize = sizeof *header;
- } else {
- HgfsReply *reply;
- replyPacketSize = sizeof *reply;
- reply = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
- if (!reply || sizeof *reply > replyPacketSize) {
- /*
- * Transport should probably check for minimum hgfs packet size.
- * How should we send an error back if there is no meta packet ?
- */
- return;
- }
- reply->id = id;
- reply->status = status;
- packetOut = (char *)reply;
- replySize = sizeof *reply;
+ HgfsServer_ReplyWithError(packet, metaPacket, status, v4header, session);
+ }
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsServer_ProcessRequest --
+ *
+ * Reply with an error packet
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * Guest memory mappings may be established.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static void
+HgfsServer_ProcessRequest(void *data)
+{
+ HgfsStatus status;
+ HgfsInternalStatus internalStatus;
+ HgfsInputParam *input = (HgfsInputParam *)data;
+
+ if (!input->metaPacket) {
+ input->metaPacket = HSPU_GetMetaPacket(input->packet,
+ &input->metaPacketSize,
+ input->session);
+ }
+ ASSERT(input->metaPacket);
+
+ internalStatus = (*handlers[input->op].handler)(input);
+ status = HgfsConvertFromInternalStatus(internalStatus);
+ if (status != HGFS_STATUS_SUCCESS) {
+ HgfsServer_ReplyWithError(input->packet, input->metaPacket,
+ status, input->v4header, input->session);
+ }
+ free(input);
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsServer_ReplyWithError --
+ *
+ * Reply with an error packet
+ *
+ * Results:
+ * TRUE if succeeded, FALSE if failed.
+ *
+ * Side effects:
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+void
+HgfsServer_ReplyWithError(HgfsPacket *packet,
+ const char *metaPacket,
+ HgfsStatus status,
+ Bool v4header,
+ HgfsSessionInfo *session)
+{
+ char *packetOut;
+ uint32 replySize;
+ size_t replyPacketSize;
+
+ if (v4header) {
+ HgfsHeader *header;
+ replyPacketSize = sizeof *header;
+ header = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
+ if (!header || sizeof *header > replyPacketSize) {
+ /*
+ * Transport should probably check for minimum hgfs packet size.
+ * How should we send an error back if there is no meta packet ?
+ */
+ return;
}
- LOG(4, ("Error occured for id = %u\n", (uint32)id));
- if (!HgfsPacketSend(packet, packetOut, replySize, session, 0)) {
- /* Send failed. Drop the reply. */
- HSPU_PutReplyPacket(packet, session);
+
+ HgfsPackReplyHeaderV4(status, 0, (HgfsHeader *)metaPacket, header);
+ packetOut = (char *)header;
+ replySize = sizeof *header;
+ } else {
+ HgfsReply *reply;
+ replyPacketSize = sizeof *reply;
+ reply = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
+ if (!reply || sizeof *reply > replyPacketSize) {
+ /*
+ * Transport should probably check for minimum hgfs packet size.
+ * How should we send an error back if there is no meta packet ?
+ */
+ return;
}
+ reply->id = packet->id;
+ reply->status = status;
+ packetOut = (char *)reply;
+ replySize = sizeof *reply;
+ }
+ LOG(0, ("Error occured for id = %u %d status\n", (uint32)packet->id, status));
+ if (!HgfsPacketSend(packet, packetOut, replySize, session, 0)) {
+ /* Send failed. Drop the reply. */
+ HSPU_PutReplyPacket(packet, session);
}
}
void
HgfsServer_ProcessPacket(char const *packetIn, // IN: incoming packet
char *packetOut, // OUT: outgoing packet
- size_t *packetLen, // IN/OUT: packet length
- HgfsReceiveFlags flags) // IN: flags
+ size_t *packetLen) // IN/OUT: packet length
{
HgfsPacket packet;
ASSERT(packetIn);
packet.metaPacketSize = *packetLen;
packet.replyPacket = packetOut;
packet.replyPacketSize = HGFS_LARGE_PACKET_MAX;
+ packet.supportsAsync = FALSE;
HgfsServerSessionReceive(&packet,
- hgfsStaticSession.session,
- 0);
+ hgfsStaticSession.session);
/*
* At this point, all the HGFS ops send reply synchronously. So
Bool result = FALSE;
ASSERT(packet);
- ASSERT(packetOut);
ASSERT(session);
if (session->state == HGFS_SESSION_STATE_OPEN) {
{
HgfsRequest *request = (HgfsRequest *)packetIn;
Bool result = TRUE;
+
if (packetSize < sizeof *request) {
return FALSE;
}
packetSize >= header->packetSize;
} else {
result = packetSize >= sizeof *request;
- }
+ }
return result;
}
ASSERT(header->packetSize >= header->headerSize);
result = header->packetSize - header->headerSize;
}
+
return result;
}
ASSERT(packetIn);
ASSERT(openInfo);
-
+
if (!HgfsParseRequest(packetIn, packetSize, &payload, &payloadSize, &op)) {
return FALSE;
}
} \
} while(0)
+#define HGFS_DEBUG_ASYNC (0)
/*
* Does this platform have oplock support? We define it here to avoid long
uint64 fileId;
} HgfsLocalId;
+typedef enum {
+ REQ_ASYNC, /* Hint that request should be processed Async */
+ REQ_SYNC, /* " Sync */
+} RequestHint;
+
/* Three possible filenode states */
typedef enum {
size_t metaPacketSize;
HgfsSessionInfo *session;
HgfsPacket *packet;
+ Bool v4header;
+ HgfsOp op;
}
HgfsInputParam;
void **buf, // OUT: Contigous buffer
size_t bufSize, // IN: Size of buffer
Bool *isAllocated, // OUT: Was buffer allocated ?
- uint32 mappingType, // IN: Readable/ Writeable ?
+ MappingType mappingType, // IN: Readable/ Writeable ?
HgfsSessionInfo *session); // IN: Session Info
void *
void *
HSPU_GetDataPacketBuf(HgfsPacket *packet, // IN/OUT: Hgfs Packet
- uint32 mappingType, // IN: Readable/ Writeable ?
+ MappingType mappingType, // IN: Readable/ Writeable ?
HgfsSessionInfo *session); // IN: Session Info
void
void **buf, // IN/OUT: Buffer to be freed
size_t *bufSize, // IN: Size of the buffer
Bool *isAllocated, // IN: Was buffer allocated ?
- uint32 mappingType, // IN: Readable/ Writeable ?
+ MappingType mappingType, // IN: Readable/ Writeable ?
HgfsSessionInfo *session); // IN: Session info
void
replySize = HGFS_REP_PAYLOAD_SIZE_V3(reply) - 1;
/* Get a data packet buffer that is writeable */
- payload = HSPU_GetDataPacketBuf(input->packet, HGFS_BUF_WRITEABLE, session);
+ payload = HSPU_GetDataPacketBuf(input->packet, BUF_WRITEABLE, session);
if (!payload) {
ASSERT_DEVEL(payload);
status = EPROTO;
reply->reserved = 0;
actualSize = &reply->actualSize;
/* Get a data packet buffer that is readable */
- payload = HSPU_GetDataPacketBuf(input->packet, HGFS_BUF_READABLE, session);
+ payload = HSPU_GetDataPacketBuf(input->packet, BUF_READABLE, session);
if (!payload) {
ASSERT_DEVEL(payload);
status = EPROTO;
packet->replyPacket = HSPU_GetBuf(packet, 0, &packet->metaPacket,
packet->metaPacketSize,
&packet->metaPacketIsAllocated,
- HGFS_BUF_WRITEABLE,
+ BUF_WRITEABLE,
session);
/*
* Really this can never happen, we would have caught bad physical address
return HSPU_GetBuf(packet, 0, &packet->metaPacket,
packet->metaPacketSize,
&packet->metaPacketIsAllocated,
- HGFS_BUF_WRITEABLE, session);
+ BUF_WRITEABLE, session);
}
void *
HSPU_GetDataPacketBuf(HgfsPacket *packet, // IN/OUT: Hgfs Packet
- uint32 mappingType, // IN: Writeable/Readable
+ MappingType mappingType, // IN: Writeable/Readable
HgfsSessionInfo *session) // IN: Session Info
{
packet->dataMappingType = mappingType;
void **buf, // OUT: Contigous buffer
size_t bufSize, // IN: Size of buffer
Bool *isAllocated, // OUT: Was buffer allocated ?
- uint32 mappingType, // IN: Readable/Writeable ?
+ MappingType mappingType, // IN: Readable/Writeable ?
HgfsSessionInfo *session) // IN: Session Info
{
uint32 iovCount;
return NULL;
}
- ASSERT_DEVEL(session->channelCbTable);
if (!session->channelCbTable) {
return NULL;
}
- if (mappingType == HGFS_BUF_WRITEABLE) {
+ if (mappingType == BUF_WRITEABLE) {
func = session->channelCbTable->getWriteVa;
} else {
- ASSERT(mappingType == HGFS_BUF_READABLE);
+ ASSERT(mappingType == BUF_READABLE);
func = session->channelCbTable->getReadVa;
}
- ASSERT_DEVEL(func);
+ /* Looks like we are in the middle of poweroff. */
if (func == NULL) {
return NULL;
}
/* Debugging check: Iov in VMCI should never cross page boundary */
ASSERT_DEVEL(packet->iov[iovCount].len <=
- (4096 - (packet->iov[iovCount].pa & 0xfff)));
+ (PAGE_SIZE - PAGE_OFFSET(packet->iov[iovCount].pa)));
packet->iov[iovCount].va = func(packet->iov[iovCount].pa,
packet->iov[iovCount].len,
HSPU_PutBuf(packet, 0, &packet->metaPacket,
&packet->metaPacketSize,
&packet->metaPacketIsAllocated,
- HGFS_BUF_WRITEABLE, session);
+ BUF_WRITEABLE, session);
}
}
if (*isAllocated) {
- if (mappingType == HGFS_BUF_WRITEABLE) {
+ if (mappingType == BUF_WRITEABLE) {
HSPU_CopyBufToIovec(packet, startIndex, *buf, *bufSize, session);
}
LOG(10, ("%s: Hgfs Freeing buffer \n", __FUNCTION__));
ASSERT(size <= 0);
}
*buf = NULL;
- *bufSize = 0;
}
/* Debugging check: Iov in VMCI should never cross page boundary */
ASSERT_DEVEL(packet->iov[iovCount].len <=
- (4096 - (packet->iov[iovCount].pa & 0xfff)));
+ (PAGE_SIZE - PAGE_OFFSET(packet->iov[iovCount].pa)));
packet->iov[iovCount].va = session->channelCbTable->getWriteVa(packet->iov[iovCount].pa,
packet->iov[iovCount].len,
ASSERT(args[0] == ' ');
packetSize = argsSize - 1;
- HgfsServer_ProcessPacket((char const *)(args + 1), packet, &packetSize, 0);
+ HgfsServer_ProcessPacket((char const *)(args + 1), packet, &packetSize);
*result = packet;
*resultLen = packetSize;
void *loggerData; // logger callback private data
} HgfsServerStateLogger;
-#define HGFS_BUF_READABLE 0x0000cafe
-#define HGFS_BUF_WRITEABLE 0x0000babe
-
typedef
struct HgfsVmxIov {
void *va; /* Virtual addr */
struct HgfsVaIov {
void *va;
uint32 len;
-} HgfsVaIov;
+}HgfsVaIov;
+
+typedef enum {
+ BUF_READABLE, /* Establish readable mappings */
+ BUF_WRITEABLE, /* Establish writeable mappings */
+} MappingType;
typedef
struct HgfsPacket {
+ uint64 id;
+
+ /* Does the transport support Async operations ? */
+ Bool supportsAsync;
+
+ /* Does transport need to send Async reply ? */
+ Bool processedAsync;
+
+ /* Is the packet guest initiated ? */
+ Bool guestInitiated;
+
/* For metapacket we always establish writeable mappings */
void *metaPacket;
size_t metaPacketSize;
uint32 dataPacketIovIndex;
Bool dataPacketIsAllocated;
/* What type of mapping was established - readable/ writeable ? */
- uint32 dataMappingType;
+ MappingType dataMappingType;
void *replyPacket;
size_t replyPacketSize;
#define HGFS_SEND_CAN_DELAY (1 << 0)
#define HGFS_SEND_NO_COMPLETE (1 << 1)
-/*
- * Receive flags.
- *
- * Contains a bitwise OR of a combination of the following flags:
- * HGFS_RECEIVE_CAN_DELAY - directs the server to handle the message
- * asynchronously.
- */
-
-typedef uint32 HgfsReceiveFlags;
-
-#define HGFS_RECEIVE_CAN_DELAY (1 << 0)
typedef Bool
HgfsSessionSendFunc(void *opaqueSession, // IN
Bool (*connect)(void *, HgfsServerChannelCallbacks *, void **);
void (*disconnect)(void *);
void (*close)(void *);
- void (*receive)(HgfsPacket *packet, void *, HgfsReceiveFlags);
+ void (*receive)(HgfsPacket *packet, void *);
void (*invalidateObjects)(void *, DblLnkLst_Links *);
void (*sendComplete)(HgfsPacket *, void *);
} HgfsServerSessionCallbacks;
#ifdef VMX86_TOOLS
void HgfsServer_ProcessPacket(char const *packetIn,
char *packetOut,
- size_t *packetSize,
- HgfsReceiveFlags flags);
+ size_t *packetSize);
#endif
/*
* VMCI specific data structures, macros *
************************************************/
-#define HGFS_VMCI_VERSION_1 0xabcdabcd
+#define HGFS_VMCI_VERSION_1 0x1
-/* Helpful for debugging purposes */
-#define HGFS_VMCI_IO_PENDING 0xdeadbeef
-#define HGFS_VMCI_IO_COMPLETE 0xfaceb00c
-#define HGFS_VMCI_MORE_SPACE_NEEDED 0xc00becaf
-#define HGFS_VMCI_IO_FAILED 0xbeef0000
+typedef enum {
+ HGFS_TS_IO_PENDING,
+ HGFS_TS_IO_COMPLETE,
+ HGFS_TS_IO_FAILED,
+} HgfsTransportRequestState;
+
+typedef enum {
+ HGFS_ASYNC_IOREQ_SHMEM,
+ HGFS_ASYNC_IOREQ_GET_PAGES,
+ HGFS_ASYNC_IOREP,
+} HgfsAsyncReplyFlags;
+
+typedef enum {
+ HGFS_TH_REP_GET_PAGES,
+ HGFS_TH_REQUEST,
+ HGFS_TH_TERMINATE_SESSION,
+} HgfsTransportPacketType;
#define HGFS_VMCI_TRANSPORT_ERROR (VMCI_ERROR_CLIENT_MIN - 1)
+#define HGFS_VMCI_VERSION_MISMATCH (VMCI_ERROR_CLIENT_MIN - 2)
/*
* Used By : Guest and Host
#include "vmware_pack_end.h"
HgfsIov;
+/*
+ * Used By : Guest and Host
+ * Lives in : Inside HgfsVmciTransportHeader
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsAsyncIov {
+ uint64 pa; /* Physical addr */
+ uint64 va; /* Virtual addr */
+ uint32 len; /* length of data; should be <= PAGE_SIZE */
+ uint64 index; /* Guest opaque data; should not be changed by
+ host */
+ Bool chain; /* Are pages chained ? */
+}
+#include "vmware_pack_end.h"
+HgfsAsyncIov;
+
/*
* Every VMCI request will have this transport Header sent over
* in the datagram by the Guest OS.
#include "vmware_pack_begin.h"
struct HgfsVmciTransportHeader {
uint32 version; /* Version number */
+ HgfsTransportPacketType pktType; /* Type of packet */
uint32 iovCount; /* Number of iovs */
- HgfsIov iov[1]; /* (PA, len) */
+ union {
+ HgfsIov iov[1]; /* (PA, len) */
+ HgfsAsyncIov asyncIov[1];
+ };
}
#include "vmware_pack_end.h"
HgfsVmciTransportHeader;
typedef
#include "vmware_pack_begin.h"
struct HgfsVmciTransportStatus {
- uint32 status; /* IO_PENDING, COMPLETE, MORE SPACE NEEDED, FAILED etc */
- uint32 flags; /* ASYNC_PEND, VALID_ASYNC_PEND_REPLY */
- uint32 size; /* G->H: Size of the packet,H->G: How much more space is needed */
+ HgfsTransportRequestState status; /* IO_PENDING, IO_COMPLETE, IO_FAILED etc */
+ uint32 size; /* G->H: Size of the packet,H->G: How much more space is needed */
}
#include "vmware_pack_end.h"
HgfsVmciTransportStatus;
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsVmciAsyncResponse {
+ uint64 id; /* Id corresponding to the guest request */
+}
+#include "vmware_pack_end.h"
+HgfsVmciAsyncResponse;
+
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsVmciAsyncShmem {
+ uint32 count; /* Number of iovs */
+ HgfsAsyncIov iov[1];
+}
+#include "vmware_pack_end.h"
+HgfsVmciAsyncShmem;
+
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsVmciAsyncReply {
+ uint32 version;
+ HgfsAsyncReplyFlags pktType;
+ union {
+ HgfsVmciAsyncResponse response;
+ HgfsVmciAsyncShmem shmem;
+ };
+}
+#include "vmware_pack_end.h"
+HgfsVmciAsyncReply;
+
#endif /* _HGFS_TRANSPORT_H_ */
*/
HgfsServer_ProcessPacket(hgfsPacket, // packet in buf
hgfsReplyPacket, // packet out buf
- &hgfsPacketSize, // in/out size
- 0); // in flags
+ &hgfsPacketSize); // in/out size
#endif
if (NULL != resultValueResult) {
#include <linux/timer.h>
/* Must be included after sched.h. */
#include <linux/smp_lock.h>
+#include <linux/interrupt.h> /* for spin_lock_bh */
+
#include "hgfsDevLinux.h"
#include "hgfsProto.h"
{
ASSERT(req);
- spin_lock(&hgfsRepQueueLock);
+ spin_lock_bh(&hgfsRepQueueLock);
list_add_tail(&req->list, &hgfsRepPending);
- spin_unlock(&hgfsRepQueueLock);
+ spin_unlock_bh(&hgfsRepQueueLock);
}
*----------------------------------------------------------------------
*/
-static void
+void
HgfsTransportRemovePendingRequest(HgfsReq *req) // IN: Request to dequeue
{
ASSERT(req);
- spin_lock(&hgfsRepQueueLock);
+ spin_lock_bh(&hgfsRepQueueLock);
list_del_init(&req->list);
- spin_unlock(&hgfsRepQueueLock);
+ spin_unlock_bh(&hgfsRepQueueLock);
}
{
struct HgfsReq *req;
- spin_lock(&hgfsRepQueueLock);
+ spin_lock_bh(&hgfsRepQueueLock);
list_for_each_entry(req, &hgfsRepPending, list) {
if (req->state == HGFS_REQ_STATE_SUBMITTED) {
}
}
- spin_unlock(&hgfsRepQueueLock);
+ spin_unlock_bh(&hgfsRepQueueLock);
}
/*
{
HgfsReq *cur, *req = NULL;
- spin_lock(&hgfsRepQueueLock);
+ spin_lock_bh(&hgfsRepQueueLock);
list_for_each_entry(cur, &hgfsRepPending, list) {
if (cur->id == id) {
}
}
- spin_unlock(&hgfsRepQueueLock);
+ spin_unlock_bh(&hgfsRepQueueLock);
return req;
}
compat_mutex_unlock(&hgfsChannelLock);
if (likely(ret == 0)) {
- /* Send succeeded, wait for the reply */
- if (wait_event_interruptible(req->queue,
- req->state == HGFS_REQ_STATE_COMPLETED)) {
- ret = -EINTR; /* Interrupted by some signal. */
- }
+ /*
+ * Send succeeded, wait for the reply.
+ * Right now, we cannot cancel request once they
+ * are dispatched to the host.
+ */
+ wait_event(req->queue,
+ req->state == HGFS_REQ_STATE_COMPLETED);
}
HgfsTransportRemovePendingRequest(req);
void HgfsTransportFreeRequest(HgfsReq *req);
int HgfsTransportSendRequest(HgfsReq *req);
HgfsReq *HgfsTransportGetPendingRequest(HgfsHandle id);
+void HgfsTransportRemovePendingRequest(HgfsReq *req);
void HgfsTransportFinishRequest(HgfsReq *req, Bool success, Bool do_put);
void HgfsTransportFlushRequests(void);
void HgfsTransportMarkDead(void);
#include <linux/errno.h>
#include <linux/moduleparam.h>
+#include <linux/interrupt.h> /* for spin_lock_bh */
#include <asm/io.h>
#include "compat_mm.h"
static HgfsReq * HgfsVmciChannelAllocate(size_t payloadSize);
void HgfsVmciChannelFree(HgfsReq *req);
static int HgfsVmciChannelSend(HgfsTransportChannel *channel, HgfsReq *req);
+static void HgfsRequestAsyncDispatch(char *payload, uint32 size);
int USE_VMCI = 0;
module_param(USE_VMCI, int, 0444);
.status = HGFS_CHANNEL_NOTCONNECTED
};
+static spinlock_t vmciRequestProcessLock;
+
+typedef struct HgfsShmemPage {
+ uint64 va;
+ uint64 pa;
+ Bool free;
+} HgfsShmemPage;
+
+typedef struct HgfsShmemPages {
+ HgfsShmemPage *list;
+ uint32 totalPageCount;
+ uint32 freePageCount;
+} HgfsShmemPages;
+
+HgfsShmemPages gHgfsShmemPages;
+#define HGFS_VMCI_SHMEM_PAGES (16)
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsRequestAsyncDispatch --
+ *
+ * XXX Main dispatcher function. Currently just a stub. Needs to run
+ * in atomic context.
+ *
+ * Results:
+ * None
+ *
+ * Side effects:
+ * None
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+HgfsRequestAsyncDispatch(char *payload, // IN: request header
+ uint32 size) // IN: size of payload
+{
+ HgfsRequest *reqHeader = (HgfsRequest *)payload;
+
+ LOG(4, (KERN_WARNING "Size in Dispatch %u\n", size));
+
+ switch (reqHeader->op) {
+ case HGFS_OP_NOTIFY_V4: {
+ LOG(4, (KERN_WARNING "Calling HGFS_OP_NOTIFY_V4 dispatch function\n"));
+ break;
+ }
+ default:
+ LOG(4, (KERN_WARNING "%s: Unknown opcode = %d", __func__, reqHeader->op));
+ }
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsRequestAsyncShmemDispatch --
+ *
+ * Shared memory dispatcher. It extracts packets from the shared
+ * memory and dispatches to the main hgfs dispatcher function. When
+ * the buffer is larger than 4K, we may fail do deliver notifications.
+ * Main dispatcher function should run in atomic context.
+ *
+ * Results:
+ * None
+ *
+ * Side effects:
+ * None
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+HgfsRequestAsyncShmemDispatch(HgfsAsyncIov *iov, // IN: request vectors
+ uint32 count) // IN: number of iovs
+{
+ uint32 i;
+ char *buf = NULL;
+ uint32 size = 0;
+ Bool chainStarted = FALSE;
+ uint32 offset = 0;
+ uint32 copySize;
+ uint64 prevIndex = -1;
+ uint64 currIndex;
+ size_t va;
+
+ LOG(10, (KERN_WARNING "%s count = %u\n",__FUNCTION__, count));
+
+ /*
+ * When requests cross 4K boundary we have to chain pages together
+ * since guest passes 4k pages to the host. Here is how chaining works
+ *
+ * - All the vectors except the last one in the chain sets iov[].chain
+ * to TRUE.
+ * - Every iov[].len field indicates remaining bytes. So the first
+ * vector will contain total size of the request while the last vector
+ * will contain only size of data present in last vector.
+ */
+
+ for (i = 0; i < count; i++) {
+ va = (size_t)iov[i].va;
+ currIndex = iov[i].index;
+
+ if (LIKELY(!iov[i].chain)) {
+ /* When the chain ends we dispatch the datagram.*/
+ if (!chainStarted) {
+ buf = (char *)va;
+ LOG(8, (KERN_WARNING " Chain wasn't started...\n"));
+ size = iov[i].len;
+ } else {
+ memcpy(buf + offset, (char *)va, iov[i].len);
+ }
+ ASSERT(buf && size);
+ HgfsRequestAsyncDispatch(buf, size);
+ if (chainStarted) {
+ /* Well chain just ended, we shall free the buffer. */
+ chainStarted = FALSE;
+ kfree(buf);
+ }
+ } else {
+ if (!chainStarted) {
+ LOG(8, (KERN_WARNING "Started chain ...\n"));
+ size = iov[i].len;
+ buf = kmalloc(size, GFP_ATOMIC);
+ ASSERT_DEVEL(buf);
+ if (!buf) {
+ /* Skip this notification, move onto next. */
+ i += (size - 1) / PAGE_SIZE;
+ continue;
+ }
+ chainStarted = TRUE;
+ offset = 0;
+ }
+ copySize = MIN(iov[i].len, PAGE_SIZE);
+ memcpy(buf + offset, (char *)va, copySize);
+ offset += copySize;
+ }
+
+ if (currIndex != prevIndex) {
+ /* This is new page. Mark is as free. */
+ gHgfsShmemPages.list[currIndex].free = TRUE;
+ gHgfsShmemPages.freePageCount++;
+ }
+ prevIndex = currIndex;
+ }
+
+ ASSERT(gHgfsShmemPages.freePageCount <= gHgfsShmemPages.totalPageCount);
+ LOG(8, (KERN_WARNING "Page count %u %u ...\n", gHgfsShmemPages.freePageCount,
+ gHgfsShmemPages.totalPageCount));
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsVmciChannelPassGuestPages --
+ *
+ * Passes down free pages to the hgfs Server. HgfsServer will use this pages
+ * for sending change notification, oplock breaks etc.
+ *
+ * XXX It seems safe to call VMCIDatagram_Send in atomic context.
+ *
+ * Results:
+ * None
+ *
+ * Side effects:
+ * None
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static Bool
+HgfsVmciChannelPassGuestPages(HgfsTransportChannel *channel) // IN:
+{
+ Bool retVal = TRUE;
+ int ret;
+ int i;
+ int j = 0;
+ size_t transportHeaderSize;
+ HgfsVmciTransportHeader *transportHeader = NULL;
+ VMCIDatagram *dg;
+
+ if (!gHgfsShmemPages.freePageCount) {
+ return TRUE;
+ }
+
+ transportHeaderSize = sizeof (HgfsVmciTransportHeader) +
+ (gHgfsShmemPages.freePageCount - 1) * sizeof (HgfsAsyncIov);
+
+ dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_ATOMIC);
+ if (!dg) {
+ LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__));
+ retVal = FALSE;
+ goto exit;
+ }
+
+ transportHeader = VMCI_DG_PAYLOAD(dg);
+
+ for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) {
+ if (gHgfsShmemPages.list[i].free) {
+ transportHeader->asyncIov[j].index = i;
+ transportHeader->asyncIov[j].va = gHgfsShmemPages.list[i].va;
+ transportHeader->asyncIov[j].pa = gHgfsShmemPages.list[i].pa;
+ transportHeader->asyncIov[j].len = PAGE_SIZE;
+ j++;
+ }
+ }
+
+ dg->src = *(VMCIHandle *)channel->priv;
+ dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT);
+ dg->payloadSize = transportHeaderSize;
+
+ transportHeader->version = HGFS_VMCI_VERSION_1;
+ ASSERT(gHgfsShmemPages.freePageCount == j);
+ transportHeader->iovCount = j;
+ transportHeader->pktType = HGFS_TH_REP_GET_PAGES;
+
+ LOG(10, (KERN_WARNING "Sending %d Guest pages \n", i));
+ if ((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
+ if (ret == HGFS_VMCI_TRANSPORT_ERROR) {
+ LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n"));
+ }
+ retVal = FALSE;
+ }
+
+exit:
+ if (retVal) {
+ /* We successfully sent pages the the host. Mark all pages as allocated */
+ for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) {
+ gHgfsShmemPages.list[i].free = FALSE;
+ }
+ gHgfsShmemPages.freePageCount = 0;
+ }
+ kfree(dg);
+ return retVal;
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsVmciChannelCompleteRequest --
+ *
+ * Completes the request that was serviced asynchronously by the server.
+ *
+ * Results:
+ * None
+ *
+ * Side effects:
+ * Request may be removed from the queue and sleeping thread is woken up.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+void
+HgfsVmciChannelCompleteRequest(uint64 id) // IN: Request ID
+{
+ HgfsVmciTransportStatus *transportStatus;
+ HgfsReq *req;
+
+ spin_lock_bh(&vmciRequestProcessLock);
+
+ /* Reference is taken here */
+ req = HgfsTransportGetPendingRequest(id);
+ if (!req) {
+ goto exit;
+ }
+
+ transportStatus = (HgfsVmciTransportStatus *)req->buffer;
+ if (transportStatus->status != HGFS_TS_IO_COMPLETE) {
+ goto exit;
+ }
+
+ /* Request is completed (yay!), let's remove it from the list */
+ HgfsTransportRemovePendingRequest(req);
+
+ req->payloadSize = transportStatus->size;
+ HgfsCompleteReq(req);
+
+exit:
+ if (req) {
+ /* Drop the reference taken in *GetPendingRequest */
+ HgfsRequestPutRef(req);
+ }
+ spin_unlock_bh(&vmciRequestProcessLock);
+}
+
/*
*-----------------------------------------------------------------------------
*
* HgfsVmciChannelCallback --
*
- * Called when VMCI datagram is received.
+ * Called when VMCI datagram is received. Note: This function runs inside
+ * tasklet. It means that this function cannot run concurrently with
+ * itself, thus it is safe to manipulate gHgfsShmemPages without locks. If this
+ * ever changes, please consider using appropriate locks.
*
* Results:
- * Always 0.
+ * 0 on Success, < 0 on Failure.
*
* Side effects:
* None
*-----------------------------------------------------------------------------
*/
-static int HgfsVmciChannelCallback(void *data, VMCIDatagram *dg)
+static int HgfsVmciChannelCallback(void *data, // IN: unused
+ VMCIDatagram *dg) // IN: datagram
{
+ HgfsVmciAsyncReply *reply = (HgfsVmciAsyncReply *)VMCI_DG_PAYLOAD(dg);
+ HgfsTransportChannel *channel;
+
+ LOG(10, (KERN_WARNING "Received VMCI channel Callback \n"));
+
+ if (reply->version != HGFS_VMCI_VERSION_1) {
+ return HGFS_VMCI_VERSION_MISMATCH;
+ }
+
+ switch (reply->pktType) {
+
+ case HGFS_ASYNC_IOREP:
+ LOG(10, (KERN_WARNING "Received ID%"FMT64"x \n", reply->response.id));
+ HgfsVmciChannelCompleteRequest(reply->response.id);
+ break;
+
+ case HGFS_ASYNC_IOREQ_SHMEM:
+ HgfsRequestAsyncShmemDispatch(reply->shmem.iov, reply->shmem.count);
+ break;
+
+ case HGFS_ASYNC_IOREQ_GET_PAGES:
+ channel = HgfsGetVmciChannel();
+ LOG(10, (KERN_WARNING "Should send pages to the host\n"));
+ HgfsVmciChannelPassGuestPages(channel);
+ break;
+
+ default:
+ ASSERT(0);
+ return HGFS_VMCI_TRANSPORT_ERROR;
+ }
+
return 0;
}
*
* HgfsVmciChannelOpen --
*
- * Open VMCI channel.
+ * Opens VMCI channel and passes guest pages to the host.
*
* Results:
* TRUE on success, FALSE on failure.
static Bool
HgfsVmciChannelOpen(HgfsTransportChannel *channel) // IN: Channel
{
- HgfsVmciTransportHeader transportHeader;
- VMCIDatagram *dg;
int ret;
+ int i;
ASSERT(channel->status == HGFS_CHANNEL_NOTCONNECTED);
ASSERT(channel->priv == NULL);
if (USE_VMCI == 0) {
- return FALSE;
+ goto error;
}
+ spin_lock_init(&vmciRequestProcessLock);
+
channel->priv = kmalloc(sizeof(VMCIHandle), GFP_KERNEL);
- if (NULL == channel->priv) {
- return FALSE;
+ if (!channel->priv) {
+ goto error;
}
ret = VMCIDatagram_CreateHnd(VMCI_INVALID_ID, /* Resource ID */
VMCI_FLAG_DG_NONE, /* Flags */
- HgfsVmciChannelCallback,/* Datagram Recv Callback*/
+ HgfsVmciChannelCallback,/* Datagram Recv Callback */
NULL, /* Callback data */
channel->priv); /* VMCI outhandle */
if (ret != VMCI_SUCCESS) {
LOG(1, (KERN_WARNING "Failed to create VMCI handle %d\n", ret));
- kfree(channel->priv);
- return FALSE;
+ goto error;
}
- transportHeader.version = HGFS_VMCI_VERSION_1;
- transportHeader.iovCount = 0;
+ gHgfsShmemPages.list = kmalloc(sizeof gHgfsShmemPages.list * HGFS_VMCI_SHMEM_PAGES,
+ GFP_KERNEL);
+ if (!gHgfsShmemPages.list) {
+ goto error;
+ }
- /*
- * Send a datagram to the VMX with the HgfsTransportHeader as the datagram
- * payload
- */
- dg = kmalloc(sizeof *dg + sizeof transportHeader, GFP_KERNEL);
+ memset(gHgfsShmemPages.list, 0, sizeof gHgfsShmemPages.list * HGFS_VMCI_SHMEM_PAGES);
+
+ for (i = 0; i < HGFS_VMCI_SHMEM_PAGES; i++) {
+ gHgfsShmemPages.list[i].va = __get_free_page(GFP_KERNEL);
+ if (!gHgfsShmemPages.list[i].va) {
+ LOG(1, (KERN_WARNING "__get_free_page returned error \n"));
+ if (i == 0) {
+ /* Ouch. We failed on first call to __get_free_page */
+ goto error;
+ }
+ /* It's ok. We can still send few pages to the host */
+ break;
+ }
+ gHgfsShmemPages.list[i].pa = virt_to_phys((void *)(size_t)gHgfsShmemPages.list[i].va);
+ gHgfsShmemPages.list[i].free = TRUE;
+ }
+
+ gHgfsShmemPages.totalPageCount = i;
+ gHgfsShmemPages.freePageCount = i;
+
+ ret = HgfsVmciChannelPassGuestPages(channel);
+ if (!ret) {
+ LOG(1, (KERN_WARNING "Failed to pass pages to the guest %d\n", ret));
+ goto error;
+ }
+
+ return TRUE;
+
+error:
+ kfree(gHgfsShmemPages.list);
+ kfree(channel->priv);
+ return FALSE;
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsVmciChannelTerminateSession --
+ *
+ * Terminate session with the server.
+ *
+ * Results:
+ * 0 on success and < 0 on error.
+ *
+ * Side effects:
+ * None
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static int
+HgfsVmciChannelTerminateSession(HgfsTransportChannel *channel) {
+
+ int ret = 0;
+ VMCIDatagram *dg;
+ HgfsVmciTransportHeader *transportHeader;
+
+ dg = kmalloc(sizeof *dg + sizeof *transportHeader, GFP_KERNEL);
if (NULL == dg) {
LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__));
- VMCIDatagram_DestroyHnd(*(VMCIHandle *)channel->priv);
- kfree(channel->priv);
- return FALSE;
+ return -ENOMEM;
}
- memcpy(VMCI_DG_PAYLOAD(dg), &transportHeader, sizeof transportHeader);
-
+ /* Initialize datagram */
dg->src = *(VMCIHandle *)channel->priv;
dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT);
- dg->payloadSize = sizeof transportHeader;
+ dg->payloadSize = sizeof *transportHeader;
+ transportHeader = VMCI_DG_PAYLOAD(dg);
+ transportHeader->version = HGFS_VMCI_VERSION_1;
+ transportHeader->iovCount = 0;
+ transportHeader->pktType = HGFS_TH_TERMINATE_SESSION;
+
+ LOG(1, (KERN_WARNING "Terminating session with host \n"));
if ((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
- LOG(4, (KERN_WARNING "Failure with %d\n", ret));
- VMCIDatagram_DestroyHnd(*(VMCIHandle *)channel->priv);
- kfree(dg);
- kfree(channel->priv);
- return FALSE;
+ if (ret == HGFS_VMCI_TRANSPORT_ERROR) {
+ LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n"));
+ }
}
kfree(dg);
- return TRUE;
+ return ret;
}
HgfsVmciChannelClose(HgfsTransportChannel *channel) // IN: Channel
{
ASSERT(channel->priv != NULL);
-
+ HgfsVmciChannelTerminateSession(channel);
VMCIDatagram_DestroyHnd(*(VMCIHandle *)channel->priv);
kfree(channel->priv);
+ kfree(gHgfsShmemPages.list);
channel->priv = NULL;
LOG(8, ("VMware hgfs: %s: vmci closed.\n", __func__));
req->bufferSize = size - sizeof (HgfsVmciTransportStatus) - sizeof *req;
}
- /* We asked for PAGE_SIZE, it should be page aligned */
- ASSERT(((long)req & 0x00000fff) == 0);
LOG(10, (KERN_WARNING "%s: Allocated Request\n", __func__));
return req;
}
{
int ret;
int iovCount = 0;
- HgfsReply *reply;
VMCIDatagram *dg;
HgfsVmciTransportHeader *transportHeader;
HgfsVmciTransportStatus *transportStatus;
size_t total;
uint64 pa;
uint64 len;
- size_t va;
+ uint64 id;
int j;
ASSERT(req);
ASSERT(req->state == HGFS_REQ_STATE_UNSENT || req->state == HGFS_REQ_STATE_ALLOCATED);
ASSERT(req->payloadSize <= req->bufferSize);
- LOG(4, ("VMware hgfs: %s: VMCI sending.\n", __func__));
-
- /*
- +------------+
- + page 1 + <----- We can have request starting from here
- +------------+
- + page 2 +
- +------------+
- + page 3 + <----- ..and ending here
- +------------+
- */
-
/* Note that req->bufferSize does not include chunk used by the transport. */
total = req->bufferSize + sizeof (HgfsVmciTransportStatus);
- bufferSize = 0;
/* Calculate number of entries for metaPacket */
- iovCount = 1;
- va = (size_t)req->buffer;
- len = total < (PAGE_SIZE - va % PAGE_SIZE) ? total : (PAGE_SIZE - va % PAGE_SIZE);
- total -= len;
- iovCount += (total + PAGE_SIZE - 1)/ PAGE_SIZE;
+ iovCount = (total + (size_t)req->buffer % PAGE_SIZE - 1)/ PAGE_SIZE + 1;
+ ASSERT(total + (size_t)req->buffer % PAGE_SIZE <= PAGE_SIZE);
- ASSERT(iovCount >= 1);
transportHeaderSize = sizeof *transportHeader +
(iovCount + req->numEntries - 1) * sizeof (HgfsIov);
dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_KERNEL);
dg->payloadSize = transportHeaderSize;
transportHeader = VMCI_DG_PAYLOAD(dg);
-
- /* Initialize transport header */
transportHeader->version = HGFS_VMCI_VERSION_1;
total = req->bufferSize + sizeof (HgfsVmciTransportStatus);
ASSERT(total == 0);
ASSERT(bufferSize == req->bufferSize + sizeof (HgfsVmciTransportStatus));
- LOG(8, ("Size of request is %Zu %Zu\n", req->payloadSize, sizeof (HgfsRequest)));
+ LOG(8, ("Size of request is %Zu\n", req->payloadSize));
for (j = 0; j < req->numEntries; j++, iovCount++) {
/* I will have to probably do page table walk here, haven't figured it out yet */
}
transportHeader->iovCount = iovCount;
+ transportHeader->pktType = HGFS_TH_REQUEST;
/* Initialize transport Status */
transportStatus = (HgfsVmciTransportStatus *)req->buffer;
- transportStatus->status = HGFS_VMCI_IO_PENDING;
- transportStatus->flags = 0;
+ transportStatus->status = HGFS_TS_IO_PENDING;
transportStatus->size = req->bufferSize + sizeof (HgfsVmciTransportStatus);
- LOG(8, (KERN_WARNING "Physical addr is %"FMT64"x len=%u iovCount=%u numEntries=%u\n",
- transportHeader->iov[0].pa,
- transportHeader->iov[0].len,
- transportHeader->iovCount,
- req->numEntries));
- LOG(8, (KERN_WARNING "Id = %u op = %u\n",
- ((HgfsRequest *)req->payload)->id,
- ((HgfsRequest *)req->payload)->op));
+ /*
+ * Don't try to set req->state after VMCIDatagram_Send().
+ * It may be too late then. We could have received a datagram by then and
+ * datagram handler expects request's state to be submitted.
+ */
+ req->state = HGFS_REQ_STATE_SUBMITTED;
+ id = req->id;
- if((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
+ if ((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
if (ret == HGFS_VMCI_TRANSPORT_ERROR) {
LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n"));
+ } else if (ret == HGFS_VMCI_VERSION_MISMATCH) {
+ LOG(0, (KERN_WARNING "Version mismatch\n"));
}
req->state = HGFS_REQ_STATE_UNSENT;
kfree(dg);
return -EIO;
}
- LOG(8, (KERN_WARNING "VMware hgfs: %s: VMCI reply received.\n", __func__));
-
- /* For HgfsVmciStage2 everything should complete sync. */
- ASSERT(transportStatus->status == HGFS_VMCI_IO_COMPLETE);
-
- if (transportStatus->status == HGFS_VMCI_IO_COMPLETE) {
- reply = (HgfsReply *)req->payload;
- req->payloadSize = transportStatus->size;
- ASSERT(transportStatus->size <= (req->bufferSize + sizeof (HgfsVmciTransportStatus)));
- HgfsCompleteReq(req);
- LOG(8, (KERN_WARNING "IO_COMPLETE: id = %u status = %u\n",
- (uint32)reply->id, (uint32)reply->status));
- }
+ LOG(0, (KERN_WARNING "Hgfs Received response\n"));
+ HgfsVmciChannelCompleteRequest(id);
kfree(dg);
return 0;
}
packetSize = data->argsSize - 1;
- HgfsServer_ProcessPacket(data->args + 1, packet, &packetSize, 0);
+ HgfsServer_ProcessPacket(data->args + 1, packet, &packetSize);
data->result = packet;
data->resultLen = packetSize;
*/
HgfsServer_ProcessPacket(data->args, // packet in buf
hgfsReplyPacket, // packet out buf
- &hgfsPacketSize, // in/out size
- 0); // receive process flags
+ &hgfsPacketSize); // in/out size
abort:
if (impersonatingVMWareUser) {