]> git.ipfire.org Git - thirdparty/open-vm-tools.git/commitdiff
HGFS: Shared memory support for linux guest
authorVMware, Inc <>
Thu, 17 Jun 2010 21:55:40 +0000 (14:55 -0700)
committerMarcelo Vanzin <mvanzin@vmware.com>
Thu, 17 Jun 2010 21:55:40 +0000 (14:55 -0700)
Support for Async operations is now present. We use shared memory mechanism
to notify guest.

Shared memory support is very minimalistic. There is no flow control
here. Here is how it works --

1) Guest on startup allocates 16 pages and sends it to the hgfs Server.
Number of pages are fixed in the system. Guest always recycles between
those pages.
2) Host will use up those pages and send notifications. When host wants to send stuff
and it doesn't find memory, it sends datagram to the guest asking for more memory.
3) Guest happily obliges and sends number of free pages.
4) This process repeats.

Note that one page can contain many requests while one request can
span many pages. When request spans many pages we need to chain
pages together. Here is how page chaining looks -

First iov from which the chain starts sets chain (Bool) to true.
Last iov sets chain to false. First iov len contains total length
of the chain.

Minor -
While I was here - I did some minor cleanup to remove HgfsReceiveFlags that
nobody was using.

Note that currently all the requests are still processed sync. I have added
terminate session request so that host doesn't touch any guest pages after
the module is unloaded or it will end up corrupting guest.

Signed-off-by: Marcelo Vanzin <mvanzin@vmware.com>
13 files changed:
open-vm-tools/lib/hgfsServer/hgfsServer.c
open-vm-tools/lib/hgfsServer/hgfsServerInt.h
open-vm-tools/lib/hgfsServer/hgfsServerLinux.c
open-vm-tools/lib/hgfsServer/hgfsServerPacketUtil.c
open-vm-tools/lib/hgfsServerManagerGuest/hgfsServerManagerGuest.c
open-vm-tools/lib/include/hgfsServer.h
open-vm-tools/lib/include/hgfsTransport.h
open-vm-tools/lib/vixTools/vixTools.c
open-vm-tools/modules/linux/vmhgfs/transport.c
open-vm-tools/modules/linux/vmhgfs/transport.h
open-vm-tools/modules/linux/vmhgfs/vmci.c
open-vm-tools/services/plugins/hgfsServer/hgfsPlugin.c
open-vm-tools/services/plugins/vix/foundryToolsDaemon.c

index 78e6485ec160332672b0465ad6663b53adca6f55..d5d8e28a984b99279603355963445ac257eeb03e 100644 (file)
@@ -33,6 +33,7 @@
 #include "hgfsDirNotify.h"
 #include "hgfsTransport.h"
 #include "userlock.h"
+#include "poll.h"
 
 #if defined(_WIN32)
 #include <io.h>
@@ -156,8 +157,7 @@ struct HgfsStaticSession {
 
 /* Session related callbacks. */
 static void HgfsServerSessionReceive(HgfsPacket *packet,
-                                     void *clientData,
-                                     HgfsReceiveFlags flags);
+                                     void *clientData);
 static Bool HgfsServerSessionConnect(void *transportData,
                                      HgfsServerChannelCallbacks *channelCbTable,
                                      void **clientData);
@@ -229,6 +229,13 @@ static void HgfsPackReplyHeaderV4(HgfsInternalStatus status,
                                   uint32 payloadSize,
                                   HgfsHeader const *packetIn,
                                   HgfsHeader *header);
+static void HgfsServer_ProcessRequest(void *data);
+void HgfsServer_ReplyWithError(HgfsPacket *packet,
+                               const char *metaPacket,
+                               HgfsStatus status,
+                               Bool v4header,
+                               HgfsSessionInfo *session);
+
 
 /*
  *----------------------------------------------------------------------------
@@ -2606,50 +2613,54 @@ static struct {
 
    /* Minimal size of the request packet */
    unsigned int minReqSize;
+
+   /* How do you process the request {sync, async} ? */
+   RequestHint reqType;
+
 } const handlers[] = {
-   { HgfsServerOpen,             sizeof (HgfsRequestOpen)              },
-   { HgfsServerRead,             sizeof (HgfsRequestRead)              },
-   { HgfsServerWrite,            sizeof (HgfsRequestWrite)             },
-   { HgfsServerClose,            sizeof (HgfsRequestClose)             },
-   { HgfsServerSearchOpen,       sizeof (HgfsRequestSearchOpen)        },
-   { HgfsServerSearchRead,       sizeof (HgfsRequestSearchRead)        },
-   { HgfsServerSearchClose,      sizeof (HgfsRequestSearchClose)       },
-   { HgfsServerGetattr,          sizeof (HgfsRequestGetattr)           },
-   { HgfsServerSetattr,          sizeof (HgfsRequestSetattr)           },
-   { HgfsServerCreateDir,        sizeof (HgfsRequestCreateDir)         },
-   { HgfsServerDeleteFile,       sizeof (HgfsRequestDelete)            },
-   { HgfsServerDeleteDir,        sizeof (HgfsRequestDelete)            },
-   { HgfsServerRename,           sizeof (HgfsRequestRename)            },
-   { HgfsServerQueryVolume,      sizeof (HgfsRequestQueryVolume)       },
-
-   { HgfsServerOpen,             sizeof (HgfsRequestOpenV2)            },
-   { HgfsServerGetattr,          sizeof (HgfsRequestGetattrV2)         },
-   { HgfsServerSetattr,          sizeof (HgfsRequestSetattrV2)         },
-   { HgfsServerSearchRead,       sizeof (HgfsRequestSearchReadV2)      },
-   { HgfsServerSymlinkCreate,    sizeof (HgfsRequestSymlinkCreate)     },
-   { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange)  },
-   { HgfsServerCreateDir,        sizeof (HgfsRequestCreateDirV2)       },
-   { HgfsServerDeleteFile,       sizeof (HgfsRequestDeleteV2)          },
-   { HgfsServerDeleteDir,        sizeof (HgfsRequestDeleteV2)          },
-   { HgfsServerRename,           sizeof (HgfsRequestRenameV2)          },
-
-   { HgfsServerOpen,             HGFS_SIZEOF_OP(HgfsRequestOpenV3)             },
-   { HgfsServerRead,             HGFS_SIZEOF_OP(HgfsRequestReadV3)             },
-   { HgfsServerWrite,            HGFS_SIZEOF_OP(HgfsRequestWriteV3)            },
-   { HgfsServerClose,            HGFS_SIZEOF_OP(HgfsRequestCloseV3)            },
-   { HgfsServerSearchOpen,       HGFS_SIZEOF_OP(HgfsRequestSearchOpenV3)       },
-   { HgfsServerSearchRead,       HGFS_SIZEOF_OP(HgfsRequestSearchReadV3)       },
-   { HgfsServerSearchClose,      HGFS_SIZEOF_OP(HgfsRequestSearchCloseV3)      },
-   { HgfsServerGetattr,          HGFS_SIZEOF_OP(HgfsRequestGetattrV3)          },
-   { HgfsServerSetattr,          HGFS_SIZEOF_OP(HgfsRequestSetattrV3)          },
-   { HgfsServerCreateDir,        HGFS_SIZEOF_OP(HgfsRequestCreateDirV3)        },
-   { HgfsServerDeleteFile,       HGFS_SIZEOF_OP(HgfsRequestDeleteV3)           },
-   { HgfsServerDeleteDir,        HGFS_SIZEOF_OP(HgfsRequestDeleteV3)           },
-   { HgfsServerRename,           HGFS_SIZEOF_OP(HgfsRequestRenameV3)           },
-   { HgfsServerQueryVolume,      HGFS_SIZEOF_OP(HgfsRequestQueryVolumeV3)      },
-   { HgfsServerSymlinkCreate,    HGFS_SIZEOF_OP(HgfsRequestSymlinkCreateV3)    },
-   { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange)          },
-   { HgfsServerWriteWin32Stream, HGFS_SIZEOF_OP(HgfsRequestWriteWin32StreamV3) },
+   { HgfsServerOpen,             sizeof (HgfsRequestOpen),              REQ_SYNC },
+   { HgfsServerRead,             sizeof (HgfsRequestRead),              REQ_SYNC },
+   { HgfsServerWrite,            sizeof (HgfsRequestWrite),             REQ_SYNC },
+   { HgfsServerClose,            sizeof (HgfsRequestClose),             REQ_SYNC },
+   { HgfsServerSearchOpen,       sizeof (HgfsRequestSearchOpen),        REQ_SYNC },
+   { HgfsServerSearchRead,       sizeof (HgfsRequestSearchRead),        REQ_SYNC },
+   { HgfsServerSearchClose,      sizeof (HgfsRequestSearchClose),       REQ_SYNC },
+   { HgfsServerGetattr,          sizeof (HgfsRequestGetattr),           REQ_SYNC },
+   { HgfsServerSetattr,          sizeof (HgfsRequestSetattr),           REQ_SYNC },
+   { HgfsServerCreateDir,        sizeof (HgfsRequestCreateDir),         REQ_SYNC },
+   { HgfsServerDeleteFile,       sizeof (HgfsRequestDelete),            REQ_SYNC },
+   { HgfsServerDeleteDir,        sizeof (HgfsRequestDelete),            REQ_SYNC },
+   { HgfsServerRename,           sizeof (HgfsRequestRename),            REQ_SYNC },
+   { HgfsServerQueryVolume,      sizeof (HgfsRequestQueryVolume),       REQ_SYNC },
+
+   { HgfsServerOpen,             sizeof (HgfsRequestOpenV2),            REQ_SYNC },
+   { HgfsServerGetattr,          sizeof (HgfsRequestGetattrV2),         REQ_SYNC },
+   { HgfsServerSetattr,          sizeof (HgfsRequestSetattrV2),         REQ_SYNC },
+   { HgfsServerSearchRead,       sizeof (HgfsRequestSearchReadV2),      REQ_SYNC },
+   { HgfsServerSymlinkCreate,    sizeof (HgfsRequestSymlinkCreate),     REQ_SYNC },
+   { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange),  REQ_SYNC },
+   { HgfsServerCreateDir,        sizeof (HgfsRequestCreateDirV2),       REQ_SYNC },
+   { HgfsServerDeleteFile,       sizeof (HgfsRequestDeleteV2),          REQ_SYNC },
+   { HgfsServerDeleteDir,        sizeof (HgfsRequestDeleteV2),          REQ_SYNC },
+   { HgfsServerRename,           sizeof (HgfsRequestRenameV2),          REQ_SYNC },
+
+   { HgfsServerOpen,             HGFS_SIZEOF_OP(HgfsRequestOpenV3),             REQ_SYNC },
+   { HgfsServerRead,             HGFS_SIZEOF_OP(HgfsRequestReadV3),             REQ_SYNC },
+   { HgfsServerWrite,            HGFS_SIZEOF_OP(HgfsRequestWriteV3),            REQ_SYNC },
+   { HgfsServerClose,            HGFS_SIZEOF_OP(HgfsRequestCloseV3),            REQ_SYNC },
+   { HgfsServerSearchOpen,       HGFS_SIZEOF_OP(HgfsRequestSearchOpenV3),       REQ_SYNC },
+   { HgfsServerSearchRead,       HGFS_SIZEOF_OP(HgfsRequestSearchReadV3),       REQ_SYNC },
+   { HgfsServerSearchClose,      HGFS_SIZEOF_OP(HgfsRequestSearchCloseV3),      REQ_SYNC },
+   { HgfsServerGetattr,          HGFS_SIZEOF_OP(HgfsRequestGetattrV3),          REQ_SYNC },
+   { HgfsServerSetattr,          HGFS_SIZEOF_OP(HgfsRequestSetattrV3),          REQ_SYNC },
+   { HgfsServerCreateDir,        HGFS_SIZEOF_OP(HgfsRequestCreateDirV3),        REQ_SYNC },
+   { HgfsServerDeleteFile,       HGFS_SIZEOF_OP(HgfsRequestDeleteV3),           REQ_SYNC },
+   { HgfsServerDeleteDir,        HGFS_SIZEOF_OP(HgfsRequestDeleteV3),           REQ_SYNC },
+   { HgfsServerRename,           HGFS_SIZEOF_OP(HgfsRequestRenameV3),           REQ_SYNC },
+   { HgfsServerQueryVolume,      HGFS_SIZEOF_OP(HgfsRequestQueryVolumeV3),      REQ_SYNC },
+   { HgfsServerSymlinkCreate,    HGFS_SIZEOF_OP(HgfsRequestSymlinkCreateV3),    REQ_SYNC },
+   { HgfsServerServerLockChange, sizeof (HgfsRequestServerLockChange),          REQ_SYNC },
+   { HgfsServerWriteWin32Stream, HGFS_SIZEOF_OP(HgfsRequestWriteWin32StreamV3), REQ_SYNC },
    /*
     * XXX
     *    Will be replaced with the real thing when during merge with another outstanding
@@ -2657,10 +2668,10 @@ static struct {
     *    For now just set min size big enough so request gets rejected when
     *    such request comes from the client.
     */
-   { NULL, 0xffffff      },   // Implemented in another change
-   { NULL, 0xffffff      },   // Implemented in another change
-   { HgfsServerRead,             HGFS_SIZEOF_OP(HgfsRequestReadV3)             },
-   { HgfsServerWrite,            HGFS_SIZEOF_OP(HgfsRequestWriteV3)            },
+   { NULL, 0xffffff, REQ_ASYNC      },   // Implemented in another change
+   { NULL, 0xffffff, REQ_ASYNC      },   // Implemented in another change
+   { HgfsServerRead,             HGFS_SIZEOF_OP(HgfsRequestReadV3),             REQ_SYNC },
+   { HgfsServerWrite,            HGFS_SIZEOF_OP(HgfsRequestWriteV3),            REQ_SYNC },
 };
 
 
@@ -2696,8 +2707,7 @@ static struct {
 
 static void
 HgfsServerSessionReceive(HgfsPacket *packet,      // IN: Hgfs Packet
-                         void *clientData,        // IN: session info
-                         HgfsReceiveFlags flags)  // IN: flags to indicate processing
+                         void *clientData)        // IN: session info
 {
    HgfsSessionInfo *session = (HgfsSessionInfo *)clientData;
    HgfsRequest *request;
@@ -2705,7 +2715,7 @@ HgfsServerSessionReceive(HgfsPacket *packet,      // IN: Hgfs Packet
    HgfsOp op;
    HgfsStatus status;
    Bool v4header = FALSE;
-   HgfsInputParam input;
+   HgfsInputParam *input;
    size_t metaPacketSize;
    char *metaPacket;
 
@@ -2714,7 +2724,6 @@ HgfsServerSessionReceive(HgfsPacket *packet,      // IN: Hgfs Packet
    if (session->state == HGFS_SESSION_STATE_CLOSED) {
       LOG(4, ("%s: %d: Received packet after disconnected.\n", __FUNCTION__,
               __LINE__));
-
       return;
    }
 
@@ -2746,11 +2755,11 @@ HgfsServerSessionReceive(HgfsPacket *packet,      // IN: Hgfs Packet
    /* Increment the session's reference count until we send the reply. */
    HgfsServerSessionGet(session);
 
-   id = request->id;
+   packet->id = id = request->id;
    op = request->op;
 
    /* If it is a V4 packet then handle it appropriately. */
-  if (HGFS_V4_LEGACY_OPCODE == op) {
+   if (HGFS_V4_LEGACY_OPCODE == op) {
       HgfsHeader *header = (HgfsHeader *)metaPacket;
       if (metaPacketSize < sizeof *header) {
          status = HGFS_STATUS_PROTOCOL_ERROR;
@@ -2771,13 +2780,55 @@ HgfsServerSessionReceive(HgfsPacket *packet,      // IN: Hgfs Packet
    HGFS_ASSERT_MINIMUM_OP(op);
    if (op < sizeof handlers / sizeof handlers[0]) {
       if (metaPacketSize >= handlers[op].minReqSize) {
-         HgfsInternalStatus internalStatus;
-         input.metaPacket = metaPacket;
-         input.metaPacketSize = metaPacketSize;
-         input.session = session;
-         input.packet = packet;
-         internalStatus = (*handlers[op].handler)(&input);
-         status = HgfsConvertFromInternalStatus(internalStatus);
+         input = Util_SafeMalloc(sizeof *input);
+         input->metaPacket = NULL;
+         input->metaPacketSize = 0;
+         input->session = session;
+         input->packet = packet;
+         input->v4header = v4header;
+         input->op = op;
+
+         /*
+          * Do the decision making here, whether we want to process request
+          * synchronously or asynchronously. Various factors to consider:
+          *
+          * - Use hints from the client, for instance, windows OS explicitly
+          * tells the file system whether request is async or not.
+          * - Determine statically - Simple to reason out, Simple to code
+          */
+         if (packet->supportsAsync &&
+             ((handlers[op].reqType == REQ_ASYNC) || HGFS_DEBUG_ASYNC)) {
+            /*
+             * Asynchronous processing is supported by the transport.
+             * We can release mappings here and reacquire when needed.
+             */
+            HSPU_PutMetaPacket(packet, session);
+            packet->processedAsync = TRUE;
+            LOG(4, ("%s: %d: @@Async\n", __FUNCTION__, __LINE__));
+#ifndef VMX86_TOOLS
+            /* Remove pending requests during poweroff */
+            Poll_Callback(POLL_CS_MAIN,
+                          POLL_FLAG_REMOVE_AT_POWEROFF,
+                          HgfsServer_ProcessRequest,
+                          input,
+                          POLL_REALTIME,
+                          1000,
+                          NULL);
+#else
+            /* Tools code should never process request async */
+            ASSERT(0);
+#endif
+            /* free(input) in HgfsServer_ProcessRequest */
+         } else {
+            LOG(4, ("%s: %d: ##Sync\n", __FUNCTION__, __LINE__));
+            packet->processedAsync = FALSE;
+            input->metaPacket = metaPacket;
+            input->metaPacketSize = metaPacketSize;
+
+            HgfsServer_ProcessRequest(input);
+            /* free(input) in HgfsServer_ProcessRequest */
+         }
+         return;
       } else {
          /*
           * The input packet is smaller than the minimal size needed for the
@@ -2799,44 +2850,112 @@ HgfsServerSessionReceive(HgfsPacket *packet,      // IN: Hgfs Packet
 err:
    /* Send error if we fail to process the op. */
    if (status != HGFS_STATUS_SUCCESS) {
-      char *packetOut;
-      uint32 replySize;
-      size_t replyPacketSize;
-      if (v4header) {
-         HgfsHeader *header;
-         replyPacketSize = sizeof *header;
-         header = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
-         if (!header || sizeof *header > replyPacketSize) {
-            /*
-             * Transport should probably check for minimum hgfs packet size.
-             * How should we send an error back if there is no meta packet ?
-             */
-            return;
-         }
-         HgfsPackReplyHeaderV4(status, 0, (HgfsHeader *)metaPacket, header);
-         packetOut = (char *)header;
-         replySize = sizeof *header;
-      } else {
-         HgfsReply *reply;
-         replyPacketSize = sizeof *reply;
-         reply = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
-         if (!reply || sizeof *reply > replyPacketSize) {
-            /*
-             * Transport should probably check for minimum hgfs packet size.
-             * How should we send an error back if there is no meta packet ?
-             */
-            return;
-         }
-         reply->id = id;
-         reply->status = status;
-         packetOut = (char *)reply;
-         replySize = sizeof *reply;
+      HgfsServer_ReplyWithError(packet, metaPacket, status, v4header, session);
+   }
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsServer_ProcessRequest --
+ *
+ *    Reply with an error packet
+ *
+ * Results:
+ *    None.
+ *
+ * Side effects:
+ *    Guest memory mappings may be established.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static void
+HgfsServer_ProcessRequest(void *data)
+{
+   HgfsStatus status;
+   HgfsInternalStatus internalStatus;
+   HgfsInputParam *input = (HgfsInputParam *)data;
+
+   if (!input->metaPacket) {
+      input->metaPacket = HSPU_GetMetaPacket(input->packet,
+                                             &input->metaPacketSize,
+                                             input->session);
+   }
+   ASSERT(input->metaPacket);
+
+   internalStatus = (*handlers[input->op].handler)(input);
+   status = HgfsConvertFromInternalStatus(internalStatus);
+   if (status != HGFS_STATUS_SUCCESS) {
+      HgfsServer_ReplyWithError(input->packet, input->metaPacket,
+                                status, input->v4header, input->session);
+   }
+   free(input);
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsServer_ReplyWithError --
+ *
+ *    Reply with an error packet
+ *
+ * Results:
+ *    TRUE if succeeded, FALSE if failed.
+ *
+ * Side effects:
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+void
+HgfsServer_ReplyWithError(HgfsPacket *packet,
+                          const char *metaPacket,
+                          HgfsStatus status,
+                          Bool v4header,
+                          HgfsSessionInfo *session)
+{
+   char *packetOut;
+   uint32 replySize;
+   size_t replyPacketSize;
+
+   if (v4header) {
+      HgfsHeader *header;
+      replyPacketSize = sizeof *header;
+      header = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
+      if (!header || sizeof *header > replyPacketSize) {
+         /*
+          * Transport should probably check for minimum hgfs packet size.
+          * How should we send an error back if there is no meta packet ?
+          */
+         return;
       }
-      LOG(4, ("Error occured for id = %u\n", (uint32)id));
-      if (!HgfsPacketSend(packet, packetOut, replySize, session, 0)) {
-         /* Send failed. Drop the reply. */
-         HSPU_PutReplyPacket(packet, session);
+
+      HgfsPackReplyHeaderV4(status, 0, (HgfsHeader *)metaPacket, header);
+      packetOut = (char *)header;
+      replySize = sizeof *header;
+   } else {
+      HgfsReply *reply;
+      replyPacketSize = sizeof *reply;
+      reply = HSPU_GetReplyPacket(packet, &replyPacketSize, session);
+      if (!reply || sizeof *reply > replyPacketSize) {
+         /*
+          * Transport should probably check for minimum hgfs packet size.
+          * How should we send an error back if there is no meta packet ?
+          */
+         return;
       }
+      reply->id = packet->id;
+      reply->status = status;
+      packetOut = (char *)reply;
+      replySize = sizeof *reply;
+   }
+   LOG(0, ("Error occured for id = %u %d status\n", (uint32)packet->id, status));
+   if (!HgfsPacketSend(packet, packetOut, replySize, session, 0)) {
+      /* Send failed. Drop the reply. */
+      HSPU_PutReplyPacket(packet, session);
    }
 }
 
@@ -3268,8 +3387,7 @@ HgfsServer_SetHandleCounter(uint32 newHandleCounter)
 void
 HgfsServer_ProcessPacket(char const *packetIn,   // IN: incoming packet
                          char *packetOut,        // OUT: outgoing packet
-                         size_t *packetLen,      // IN/OUT: packet length
-                         HgfsReceiveFlags flags) // IN: flags
+                         size_t *packetLen)     // IN/OUT: packet length
 {
    HgfsPacket packet;
    ASSERT(packetIn);
@@ -3305,10 +3423,10 @@ HgfsServer_ProcessPacket(char const *packetIn,   // IN: incoming packet
    packet.metaPacketSize = *packetLen;
    packet.replyPacket = packetOut;
    packet.replyPacketSize = HGFS_LARGE_PACKET_MAX;
+   packet.supportsAsync = FALSE;
 
    HgfsServerSessionReceive(&packet,
-                            hgfsStaticSession.session,
-                            0);
+                            hgfsStaticSession.session);
 
    /*
     * At this point, all the HGFS ops send reply synchronously. So
@@ -3381,7 +3499,6 @@ HgfsPacketSend(HgfsPacket *packet,            // IN/OUT: Hgfs Packet
    Bool result = FALSE;
 
    ASSERT(packet);
-   ASSERT(packetOut);
    ASSERT(session);
 
    if (session->state == HGFS_SESSION_STATE_OPEN) {
@@ -4632,6 +4749,7 @@ HgfsValidatePacket(char const *packetIn,        // IN: request packet
 {
    HgfsRequest *request = (HgfsRequest *)packetIn;
    Bool result = TRUE;
+
    if (packetSize < sizeof *request) {
       return FALSE;
    }
@@ -4645,7 +4763,7 @@ HgfsValidatePacket(char const *packetIn,        // IN: request packet
                packetSize >= header->packetSize;
    } else {
        result = packetSize >= sizeof *request;
-  }
+   }
    return result;
 }
 
@@ -4682,6 +4800,7 @@ HgfsGetPayloadSize(char const *packetIn,        // IN: request packet
       ASSERT(header->packetSize >= header->headerSize);
       result = header->packetSize - header->headerSize;
    }
+
    return result;
 }
 
@@ -4973,7 +5092,7 @@ HgfsUnpackOpenRequest(char const *packetIn,        // IN: request packet
 
    ASSERT(packetIn);
    ASSERT(openInfo);
-   
+
    if (!HgfsParseRequest(packetIn, packetSize, &payload, &payloadSize, &op)) {
       return FALSE;
    }
index 2d9e0f3b0f90e2dfa4f38b7488f846b785b01db6..1d982fa76642576d2ad04423d8033d72c7d2ea32 100644 (file)
@@ -92,6 +92,7 @@
       }                                                         \
    } while(0)
 
+#define HGFS_DEBUG_ASYNC   (0)
 
 /*
  * Does this platform have oplock support? We define it here to avoid long
@@ -112,6 +113,11 @@ typedef struct HgfsLocalId {
    uint64 fileId;
 } HgfsLocalId;
 
+typedef enum {
+   REQ_ASYNC,    /* Hint that request should be processed Async */
+   REQ_SYNC,     /*               "                       Sync  */
+} RequestHint;
+
 
 /* Three possible filenode states */
 typedef enum {
@@ -438,6 +444,8 @@ struct HgfsInputParam {
    size_t metaPacketSize;
    HgfsSessionInfo *session;
    HgfsPacket *packet;
+   Bool v4header;
+   HgfsOp op;
 }
 HgfsInputParam;
 
@@ -928,7 +936,7 @@ HSPU_GetBuf(HgfsPacket *packet,           // IN/OUT: Hgfs Packet
             void **buf,                   // OUT: Contigous buffer
             size_t bufSize,               // IN: Size of buffer
             Bool *isAllocated,            // OUT: Was buffer allocated ?
-            uint32 mappingType,           // IN: Readable/ Writeable ?
+            MappingType mappingType,      // IN: Readable/ Writeable ?
             HgfsSessionInfo *session);    // IN: Session Info
 
 void *
@@ -938,7 +946,7 @@ HSPU_GetMetaPacket(HgfsPacket *packet,          // IN/OUT: Hgfs Packet
 
 void *
 HSPU_GetDataPacketBuf(HgfsPacket *packet,        // IN/OUT: Hgfs Packet
-                      uint32 mappingType,        // IN: Readable/ Writeable ?
+                      MappingType mappingType,   // IN: Readable/ Writeable ?
                       HgfsSessionInfo *session); // IN: Session Info
 
 void
@@ -951,7 +959,7 @@ HSPU_PutBuf(HgfsPacket *packet,        // IN/OUT: Hgfs Packet
             void **buf,                // IN/OUT: Buffer to be freed
             size_t *bufSize,           // IN: Size of the buffer
            Bool *isAllocated,         // IN: Was buffer allocated ?
-            uint32 mappingType,        // IN: Readable/ Writeable ?
+            MappingType mappingType,        // IN: Readable/ Writeable ?
            HgfsSessionInfo *session); // IN: Session info
 
 void
index 764080e5458cabfec24b77deadebcb890f059257..1e6cad5601457da7be057513572517f59372dfa1 100644 (file)
@@ -3598,7 +3598,7 @@ HgfsServerRead(HgfsInputParam *input)  // IN: Input params
 
       replySize = HGFS_REP_PAYLOAD_SIZE_V3(reply) - 1;
       /* Get a data packet buffer that is writeable */
-      payload = HSPU_GetDataPacketBuf(input->packet, HGFS_BUF_WRITEABLE, session);
+      payload = HSPU_GetDataPacketBuf(input->packet, BUF_WRITEABLE, session);
       if (!payload) {
          ASSERT_DEVEL(payload);
          status = EPROTO;
@@ -3826,7 +3826,7 @@ HgfsServerWrite(HgfsInputParam *input)  // IN: Input params
       reply->reserved = 0;
       actualSize = &reply->actualSize;
       /* Get a data packet buffer that is readable */
-      payload = HSPU_GetDataPacketBuf(input->packet, HGFS_BUF_READABLE, session);
+      payload = HSPU_GetDataPacketBuf(input->packet, BUF_READABLE, session);
       if (!payload) {
          ASSERT_DEVEL(payload);
          status = EPROTO;
index 371e40350efe47bad2f1c29b8a67e690b7812a1d..1e2889bafd3631c6fb94f7b2a37e8ef5249ea0fa 100644 (file)
@@ -83,7 +83,7 @@ HSPU_GetReplyPacket(HgfsPacket *packet,        // IN/OUT: Hgfs Packet
          packet->replyPacket = HSPU_GetBuf(packet, 0, &packet->metaPacket,
                                            packet->metaPacketSize,
                                            &packet->metaPacketIsAllocated,
-                                           HGFS_BUF_WRITEABLE,
+                                           BUF_WRITEABLE,
                                            session);
          /*
           * Really this can never happen, we would have caught bad physical address
@@ -159,7 +159,7 @@ HSPU_GetMetaPacket(HgfsPacket *packet,        // IN/OUT: Hgfs Packet
    return HSPU_GetBuf(packet, 0, &packet->metaPacket,
                       packet->metaPacketSize,
                       &packet->metaPacketIsAllocated,
-                      HGFS_BUF_WRITEABLE, session);
+                      BUF_WRITEABLE, session);
 }
 
 
@@ -208,7 +208,7 @@ HSPU_GetDataPacketIov(HgfsPacket *packet,       // IN/OUT: Hgfs Packet
 
 void *
 HSPU_GetDataPacketBuf(HgfsPacket *packet,       // IN/OUT: Hgfs Packet
-                      uint32 mappingType,       // IN: Writeable/Readable
+                      MappingType mappingType,  // IN: Writeable/Readable
                       HgfsSessionInfo *session) // IN: Session Info
 {
    packet->dataMappingType = mappingType;
@@ -240,7 +240,7 @@ HSPU_GetBuf(HgfsPacket *packet,           // IN/OUT: Hgfs Packet
             void **buf,                   // OUT: Contigous buffer
             size_t  bufSize,              // IN: Size of buffer
             Bool *isAllocated,            // OUT: Was buffer allocated ?
-            uint32 mappingType,           // IN: Readable/Writeable ?
+            MappingType mappingType,      // IN: Readable/Writeable ?
             HgfsSessionInfo *session)     // IN: Session Info
 {
    uint32 iovCount;
@@ -256,19 +256,18 @@ HSPU_GetBuf(HgfsPacket *packet,           // IN/OUT: Hgfs Packet
       return NULL;
    }
 
-   ASSERT_DEVEL(session->channelCbTable);
    if (!session->channelCbTable) {
       return NULL;
    }
 
-   if (mappingType == HGFS_BUF_WRITEABLE) {
+   if (mappingType == BUF_WRITEABLE) {
       func = session->channelCbTable->getWriteVa;
    } else {
-      ASSERT(mappingType == HGFS_BUF_READABLE);
+      ASSERT(mappingType == BUF_READABLE);
       func = session->channelCbTable->getReadVa;
    }
 
-   ASSERT_DEVEL(func);
+   /* Looks like we are in the middle of poweroff. */
    if (func == NULL) {
       return NULL;
    }
@@ -281,7 +280,7 @@ HSPU_GetBuf(HgfsPacket *packet,           // IN/OUT: Hgfs Packet
 
       /* Debugging check: Iov in VMCI should never cross page boundary */
       ASSERT_DEVEL(packet->iov[iovCount].len <=
-      (4096 - (packet->iov[iovCount].pa & 0xfff)));
+      (PAGE_SIZE - PAGE_OFFSET(packet->iov[iovCount].pa)));
 
       packet->iov[iovCount].va = func(packet->iov[iovCount].pa,
                                       packet->iov[iovCount].len,
@@ -362,7 +361,7 @@ HSPU_PutMetaPacket(HgfsPacket *packet,       // IN/OUT: Hgfs Packet
    HSPU_PutBuf(packet, 0, &packet->metaPacket,
                &packet->metaPacketSize,
                &packet->metaPacketIsAllocated,
-               HGFS_BUF_WRITEABLE, session);
+               BUF_WRITEABLE, session);
 }
 
 
@@ -454,7 +453,7 @@ HSPU_PutBuf(HgfsPacket *packet,        // IN/OUT: Hgfs Packet
    }
 
    if (*isAllocated) {
-      if (mappingType == HGFS_BUF_WRITEABLE) {
+      if (mappingType == BUF_WRITEABLE) {
          HSPU_CopyBufToIovec(packet, startIndex, *buf, *bufSize, session);
       }
       LOG(10, ("%s: Hgfs Freeing buffer \n", __FUNCTION__));
@@ -472,7 +471,6 @@ HSPU_PutBuf(HgfsPacket *packet,        // IN/OUT: Hgfs Packet
       ASSERT(size <= 0);
    }
    *buf = NULL;
-   *bufSize = 0;
 }
 
 
@@ -575,7 +573,7 @@ HSPU_CopyBufToIovec(HgfsPacket *packet,       // IN/OUT: Hgfs Packet
 
       /* Debugging check: Iov in VMCI should never cross page boundary */
       ASSERT_DEVEL(packet->iov[iovCount].len <=
-                  (4096 - (packet->iov[iovCount].pa & 0xfff)));
+                  (PAGE_SIZE - PAGE_OFFSET(packet->iov[iovCount].pa)));
 
       packet->iov[iovCount].va = session->channelCbTable->getWriteVa(packet->iov[iovCount].pa,
                                                      packet->iov[iovCount].len,
index 333354360eb97721e08ab75c0c53ba071ce101ea..ed41901d5984b7c433a214f7be1d429ddd171ca5 100644 (file)
@@ -134,7 +134,7 @@ HgfsServerManagerRpcInDispatch(char const **result,        // OUT
 
    ASSERT(args[0] == ' ');
    packetSize = argsSize - 1;
-   HgfsServer_ProcessPacket((char const *)(args + 1), packet, &packetSize, 0);
+   HgfsServer_ProcessPacket((char const *)(args + 1), packet, &packetSize);
 
    *result = packet;
    *resultLen = packetSize;
index 29c8faef09a0071ff54640d5280c50735203bf82..412722e779aa4ae7e9cdac56e53b455d1f1ebee6 100644 (file)
@@ -35,9 +35,6 @@ typedef struct HgfsServerStateLogger {
    void                       *loggerData;   // logger callback private data
 } HgfsServerStateLogger;
 
-#define HGFS_BUF_READABLE  0x0000cafe
-#define HGFS_BUF_WRITEABLE 0x0000babe
-
 typedef
 struct HgfsVmxIov {
    void *va;           /* Virtual addr */
@@ -50,10 +47,26 @@ typedef
 struct HgfsVaIov {
    void *va;
    uint32 len;
-} HgfsVaIov;
+}HgfsVaIov;
+
+typedef enum {
+   BUF_READABLE,  /* Establish readable mappings */
+   BUF_WRITEABLE, /* Establish writeable mappings */
+} MappingType;
 
 typedef
 struct HgfsPacket {
+   uint64 id;
+
+   /* Does the transport support Async operations ? */
+   Bool supportsAsync;
+
+   /* Does transport need to send Async reply ? */
+   Bool processedAsync;
+
+   /* Is the packet guest initiated ? */
+   Bool guestInitiated;
+
    /* For metapacket we always establish writeable mappings */
    void *metaPacket;
    size_t metaPacketSize;
@@ -64,7 +77,7 @@ struct HgfsPacket {
    uint32 dataPacketIovIndex;
    Bool dataPacketIsAllocated;
    /* What type of mapping was established - readable/ writeable ? */
-   uint32 dataMappingType;
+   MappingType dataMappingType;
 
    void *replyPacket;
    size_t replyPacketSize;
@@ -98,17 +111,6 @@ typedef uint32 HgfsSendFlags;
 #define HGFS_SEND_CAN_DELAY         (1 << 0)
 #define HGFS_SEND_NO_COMPLETE       (1 << 1)
 
-/*
- * Receive flags.
- *
- * Contains a bitwise OR of a combination of the following flags:
- * HGFS_RECEIVE_CAN_DELAY - directs the server to handle the message
- * asynchronously.
- */
-
-typedef uint32 HgfsReceiveFlags;
-
-#define HGFS_RECEIVE_CAN_DELAY      (1 << 0)
 
 typedef Bool
 HgfsSessionSendFunc(void *opaqueSession,  // IN
@@ -128,7 +130,7 @@ typedef struct HgfsServerSessionCallbacks {
    Bool (*connect)(void *, HgfsServerChannelCallbacks *, void **);
    void (*disconnect)(void *);
    void (*close)(void *);
-   void (*receive)(HgfsPacket *packet, void *, HgfsReceiveFlags);
+   void (*receive)(HgfsPacket *packet, void *);
    void (*invalidateObjects)(void *, DblLnkLst_Links *);
    void (*sendComplete)(HgfsPacket *, void *);
 } HgfsServerSessionCallbacks;
@@ -142,8 +144,7 @@ void HgfsServer_SetHandleCounter(uint32 newHandleCounter);
 #ifdef VMX86_TOOLS
 void HgfsServer_ProcessPacket(char const *packetIn,
                               char *packetOut,
-                              size_t *packetSize,
-                              HgfsReceiveFlags flags);
+                              size_t *packetSize);
 #endif
 
 /*
index 109b6b37173f33b9bcb86c2fbbd27f26eb87bdf2..72ef91075bdd1e292256f108dc86d36e70f9f456 100644 (file)
@@ -98,15 +98,28 @@ HgfsSocketHeader;
  *    VMCI specific data structures, macros     *
  ************************************************/
 
-#define HGFS_VMCI_VERSION_1          0xabcdabcd
+#define HGFS_VMCI_VERSION_1          0x1
 
-/* Helpful for debugging purposes */
-#define HGFS_VMCI_IO_PENDING         0xdeadbeef
-#define HGFS_VMCI_IO_COMPLETE        0xfaceb00c
-#define HGFS_VMCI_MORE_SPACE_NEEDED  0xc00becaf
-#define HGFS_VMCI_IO_FAILED          0xbeef0000
+typedef enum {
+   HGFS_TS_IO_PENDING,
+   HGFS_TS_IO_COMPLETE,
+   HGFS_TS_IO_FAILED,
+} HgfsTransportRequestState;
+
+typedef enum {
+   HGFS_ASYNC_IOREQ_SHMEM,
+   HGFS_ASYNC_IOREQ_GET_PAGES,
+   HGFS_ASYNC_IOREP,
+} HgfsAsyncReplyFlags;
+
+typedef enum {
+   HGFS_TH_REP_GET_PAGES,
+   HGFS_TH_REQUEST,
+   HGFS_TH_TERMINATE_SESSION,
+} HgfsTransportPacketType;
 
 #define HGFS_VMCI_TRANSPORT_ERROR   (VMCI_ERROR_CLIENT_MIN - 1)
+#define HGFS_VMCI_VERSION_MISMATCH  (VMCI_ERROR_CLIENT_MIN - 2)
 
 /*
  * Used By : Guest and Host
@@ -122,6 +135,24 @@ struct HgfsIov {
 #include "vmware_pack_end.h"
 HgfsIov;
 
+/*
+ * Used By : Guest and Host
+ * Lives in : Inside HgfsVmciTransportHeader
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsAsyncIov {
+   uint64 pa;                 /* Physical addr */
+   uint64 va;                 /* Virtual addr */
+   uint32 len;                /* length of data; should be <= PAGE_SIZE */
+   uint64 index;              /* Guest opaque data; should not be changed by
+                                 host */
+   Bool chain;                /* Are pages chained ? */
+}
+#include "vmware_pack_end.h"
+HgfsAsyncIov;
+
 /*
  * Every VMCI request will have this transport Header sent over
  * in the datagram by the Guest OS.
@@ -133,8 +164,12 @@ typedef
 #include "vmware_pack_begin.h"
 struct HgfsVmciTransportHeader {
    uint32 version;                          /* Version number */
+   HgfsTransportPacketType pktType;         /* Type of packet */
    uint32 iovCount;                         /* Number of iovs */
-   HgfsIov iov[1];                          /* (PA, len) */
+   union {
+      HgfsIov iov[1];                       /* (PA, len) */
+      HgfsAsyncIov asyncIov[1];
+   };
 }
 #include "vmware_pack_end.h"
 HgfsVmciTransportHeader;
@@ -149,12 +184,41 @@ HgfsVmciTransportHeader;
 typedef
 #include "vmware_pack_begin.h"
 struct HgfsVmciTransportStatus {
-   uint32 status;              /* IO_PENDING, COMPLETE, MORE SPACE NEEDED, FAILED etc */
-   uint32 flags;               /* ASYNC_PEND, VALID_ASYNC_PEND_REPLY */
-   uint32 size;                /* G->H: Size of the packet,H->G: How much more space is needed */
+   HgfsTransportRequestState status; /* IO_PENDING, IO_COMPLETE, IO_FAILED etc */
+   uint32 size;                      /* G->H: Size of the packet,H->G: How much more space is needed */
 }
 #include "vmware_pack_end.h"
 HgfsVmciTransportStatus;
 
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsVmciAsyncResponse {
+   uint64 id;            /* Id corresponding to the guest request */
+}
+#include "vmware_pack_end.h"
+HgfsVmciAsyncResponse;
+
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsVmciAsyncShmem {
+   uint32 count;          /* Number of iovs */
+   HgfsAsyncIov iov[1];
+}
+#include "vmware_pack_end.h"
+HgfsVmciAsyncShmem;
+
+typedef
+#include "vmware_pack_begin.h"
+struct HgfsVmciAsyncReply {
+   uint32 version;
+   HgfsAsyncReplyFlags pktType;
+   union {
+     HgfsVmciAsyncResponse response;
+     HgfsVmciAsyncShmem shmem;
+   };
+}
+#include "vmware_pack_end.h"
+HgfsVmciAsyncReply;
+
 #endif /* _HGFS_TRANSPORT_H_ */
 
index 23d5ab28035dc9ef39aedcba48086b0f5504bd95..11079a61dfffe417078660ce7454e3001f9a549a 100644 (file)
@@ -3526,8 +3526,7 @@ VixToolsProcessHgfsPacket(VixCommandHgfsSendPacket *requestMsg,   // IN
     */
    HgfsServer_ProcessPacket(hgfsPacket,        // packet in buf
                             hgfsReplyPacket,   // packet out buf
-                            &hgfsPacketSize,   // in/out size
-                            0);                // in flags
+                            &hgfsPacketSize);  // in/out size
 #endif
 
    if (NULL != resultValueResult) {
index 735aa0bf27b0452a133bafac59d2c01e7427a90d..20112fec866f0cda9f9d4e54be8147e77cb44a2e 100644 (file)
@@ -44,6 +44,8 @@
 #include <linux/timer.h>
 /* Must be included after sched.h. */
 #include <linux/smp_lock.h>
+#include <linux/interrupt.h> /* for spin_lock_bh */
+
 
 #include "hgfsDevLinux.h"
 #include "hgfsProto.h"
@@ -215,9 +217,9 @@ HgfsTransportAddPendingRequest(HgfsReq *req)   // IN: Request to add
 {
    ASSERT(req);
 
-   spin_lock(&hgfsRepQueueLock);
+   spin_lock_bh(&hgfsRepQueueLock);
    list_add_tail(&req->list, &hgfsRepPending);
-   spin_unlock(&hgfsRepQueueLock);
+   spin_unlock_bh(&hgfsRepQueueLock);
 }
 
 
@@ -237,14 +239,14 @@ HgfsTransportAddPendingRequest(HgfsReq *req)   // IN: Request to add
  *----------------------------------------------------------------------
  */
 
-static void
+void
 HgfsTransportRemovePendingRequest(HgfsReq *req)   // IN: Request to dequeue
 {
    ASSERT(req);
 
-   spin_lock(&hgfsRepQueueLock);
+   spin_lock_bh(&hgfsRepQueueLock);
    list_del_init(&req->list);
-   spin_unlock(&hgfsRepQueueLock);
+   spin_unlock_bh(&hgfsRepQueueLock);
 }
 
 
@@ -270,7 +272,7 @@ HgfsTransportFlushPendingRequests(void)
 {
    struct HgfsReq *req;
 
-   spin_lock(&hgfsRepQueueLock);
+   spin_lock_bh(&hgfsRepQueueLock);
 
    list_for_each_entry(req, &hgfsRepPending, list) {
       if (req->state == HGFS_REQ_STATE_SUBMITTED) {
@@ -280,7 +282,7 @@ HgfsTransportFlushPendingRequests(void)
       }
    }
 
-   spin_unlock(&hgfsRepQueueLock);
+   spin_unlock_bh(&hgfsRepQueueLock);
 }
 
 /*
@@ -306,7 +308,7 @@ HgfsTransportGetPendingRequest(HgfsHandle id)   // IN: id of the request
 {
    HgfsReq *cur, *req = NULL;
 
-   spin_lock(&hgfsRepQueueLock);
+   spin_lock_bh(&hgfsRepQueueLock);
 
    list_for_each_entry(cur, &hgfsRepPending, list) {
       if (cur->id == id) {
@@ -316,7 +318,7 @@ HgfsTransportGetPendingRequest(HgfsHandle id)   // IN: id of the request
       }
    }
 
-   spin_unlock(&hgfsRepQueueLock);
+   spin_unlock_bh(&hgfsRepQueueLock);
 
    return req;
 }
@@ -478,11 +480,13 @@ out:
    compat_mutex_unlock(&hgfsChannelLock);
 
    if (likely(ret == 0)) {
-      /* Send succeeded, wait for the reply */
-      if (wait_event_interruptible(req->queue,
-                                   req->state == HGFS_REQ_STATE_COMPLETED)) {
-         ret = -EINTR; /* Interrupted by some signal. */
-      }
+      /*
+       * Send succeeded, wait for the reply.
+       * Right now, we cannot cancel request once they
+       * are dispatched to the host.
+       */
+      wait_event(req->queue,
+                 req->state == HGFS_REQ_STATE_COMPLETED);
    }
 
    HgfsTransportRemovePendingRequest(req);
index e4ce8e3bc7ee13572ac739fca437ae3d1af5bbc6..f2b39bd6c22bc61952c7e1d1944297631c6a0273 100644 (file)
@@ -61,6 +61,7 @@ HgfsReq *HgfsTransportAllocateRequest(size_t payloadSize);
 void HgfsTransportFreeRequest(HgfsReq *req);
 int HgfsTransportSendRequest(HgfsReq *req);
 HgfsReq *HgfsTransportGetPendingRequest(HgfsHandle id);
+void HgfsTransportRemovePendingRequest(HgfsReq *req);
 void HgfsTransportFinishRequest(HgfsReq *req, Bool success, Bool do_put);
 void HgfsTransportFlushRequests(void);
 void HgfsTransportMarkDead(void);
index 617165293ee4eee98d8a1ce651ba4832f415df74..d44a1b04245daf55dbad8228ab47dd90f58c9c17 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/errno.h>
 #include <linux/moduleparam.h>
+#include <linux/interrupt.h>  /* for spin_lock_bh */
 #include <asm/io.h>
 
 #include "compat_mm.h"
@@ -45,6 +46,7 @@ static void HgfsVmciChannelClose(HgfsTransportChannel *channel);
 static HgfsReq * HgfsVmciChannelAllocate(size_t payloadSize);
 void HgfsVmciChannelFree(HgfsReq *req);
 static int HgfsVmciChannelSend(HgfsTransportChannel *channel, HgfsReq *req);
+static void HgfsRequestAsyncDispatch(char *payload, uint32 size);
 
 int USE_VMCI = 0;
 module_param(USE_VMCI, int, 0444);
@@ -60,16 +62,307 @@ static HgfsTransportChannel channel = {
    .status = HGFS_CHANNEL_NOTCONNECTED
 };
 
+static spinlock_t vmciRequestProcessLock;
+
+typedef struct HgfsShmemPage {
+   uint64 va;
+   uint64 pa;
+   Bool free;
+} HgfsShmemPage;
+
+typedef struct HgfsShmemPages {
+   HgfsShmemPage *list;
+   uint32 totalPageCount;
+   uint32 freePageCount;
+} HgfsShmemPages;
+
+HgfsShmemPages gHgfsShmemPages;
+#define HGFS_VMCI_SHMEM_PAGES (16)
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsRequestAsyncDispatch --
+ *
+ *   XXX Main dispatcher function. Currently just a stub. Needs to run
+ *   in atomic context.
+ *
+ * Results:
+ *    None
+ *
+ * Side effects:
+ *    None
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+HgfsRequestAsyncDispatch(char *payload, // IN: request header
+                         uint32 size)   // IN: size of payload
+{
+   HgfsRequest *reqHeader = (HgfsRequest *)payload;
+
+   LOG(4, (KERN_WARNING "Size in Dispatch %u\n", size));
+
+   switch (reqHeader->op) {
+   case HGFS_OP_NOTIFY_V4: {
+      LOG(4, (KERN_WARNING "Calling HGFS_OP_NOTIFY_V4 dispatch function\n"));
+      break;
+   }
+   default:
+      LOG(4, (KERN_WARNING "%s: Unknown opcode = %d", __func__, reqHeader->op));
+   }
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsRequestAsyncShmemDispatch --
+ *
+ *    Shared memory dispatcher. It extracts packets from the shared
+ *    memory and dispatches to the main hgfs dispatcher function. When
+ *    the buffer is larger than 4K, we may fail do deliver notifications.
+ *    Main dispatcher function should run in atomic context.
+ *
+ * Results:
+ *    None
+ *
+ * Side effects:
+ *    None
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+HgfsRequestAsyncShmemDispatch(HgfsAsyncIov *iov, // IN: request vectors
+                              uint32 count)      // IN: number of iovs
+{
+   uint32 i;
+   char *buf = NULL;
+   uint32 size = 0;
+   Bool chainStarted = FALSE;
+   uint32 offset = 0;
+   uint32 copySize;
+   uint64 prevIndex = -1;
+   uint64 currIndex;
+   size_t va;
+
+   LOG(10, (KERN_WARNING "%s count = %u\n",__FUNCTION__, count));
+
+   /*
+    * When requests cross 4K boundary we have to chain pages together
+    * since guest passes 4k pages to the host. Here is how chaining works
+    *
+    * - All the vectors except the last one in the chain sets iov[].chain
+    * to TRUE.
+    * - Every iov[].len field indicates remaining bytes. So the first
+    * vector will contain total size of the request while the last vector
+    * will contain only size of data present in last vector.
+    */
+
+   for (i = 0; i < count; i++) {
+      va = (size_t)iov[i].va;
+      currIndex = iov[i].index;
+
+      if (LIKELY(!iov[i].chain)) {
+         /* When the chain ends we dispatch the datagram.*/
+         if (!chainStarted) {
+            buf = (char *)va;
+            LOG(8, (KERN_WARNING " Chain wasn't started...\n"));
+            size = iov[i].len;
+         } else {
+            memcpy(buf + offset, (char *)va, iov[i].len);
+         }
+         ASSERT(buf && size);
+         HgfsRequestAsyncDispatch(buf, size);
+         if (chainStarted) {
+            /* Well chain just ended, we shall free the buffer. */
+            chainStarted = FALSE;
+            kfree(buf);
+         }
+      } else {
+           if (!chainStarted) {
+              LOG(8, (KERN_WARNING "Started chain ...\n"));
+              size = iov[i].len;
+              buf = kmalloc(size, GFP_ATOMIC);
+              ASSERT_DEVEL(buf);
+              if (!buf) {
+                 /* Skip this notification, move onto next. */
+                 i += (size - 1) / PAGE_SIZE;
+                 continue;
+              }
+              chainStarted = TRUE;
+              offset = 0;
+           }
+           copySize = MIN(iov[i].len, PAGE_SIZE);
+           memcpy(buf + offset, (char *)va, copySize);
+           offset += copySize;
+      }
+
+      if (currIndex != prevIndex) {
+         /* This is new page. Mark is as free. */
+         gHgfsShmemPages.list[currIndex].free = TRUE;
+         gHgfsShmemPages.freePageCount++;
+      }
+      prevIndex = currIndex;
+   }
+
+   ASSERT(gHgfsShmemPages.freePageCount <= gHgfsShmemPages.totalPageCount);
+   LOG(8, (KERN_WARNING "Page count %u %u ...\n", gHgfsShmemPages.freePageCount,
+           gHgfsShmemPages.totalPageCount));
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsVmciChannelPassGuestPages --
+ *
+ *      Passes down free pages to the hgfs Server. HgfsServer will use this pages
+ *      for sending change notification, oplock breaks etc.
+ *
+ *      XXX It seems safe to call VMCIDatagram_Send in atomic context.
+ *
+ * Results:
+ *      None
+ *
+ * Side effects:
+ *      None
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static Bool
+HgfsVmciChannelPassGuestPages(HgfsTransportChannel *channel) // IN:
+{
+   Bool retVal = TRUE;
+   int ret;
+   int i;
+   int j = 0;
+   size_t transportHeaderSize;
+   HgfsVmciTransportHeader *transportHeader = NULL;
+   VMCIDatagram *dg;
+
+   if (!gHgfsShmemPages.freePageCount) {
+      return TRUE;
+   }
+
+   transportHeaderSize = sizeof (HgfsVmciTransportHeader) +
+          (gHgfsShmemPages.freePageCount - 1) * sizeof (HgfsAsyncIov);
+
+   dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_ATOMIC);
+   if (!dg) {
+      LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__));
+      retVal = FALSE;
+      goto exit;
+   }
+
+   transportHeader = VMCI_DG_PAYLOAD(dg);
+
+   for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) {
+      if (gHgfsShmemPages.list[i].free) {
+         transportHeader->asyncIov[j].index = i;
+         transportHeader->asyncIov[j].va = gHgfsShmemPages.list[i].va;
+         transportHeader->asyncIov[j].pa = gHgfsShmemPages.list[i].pa;
+         transportHeader->asyncIov[j].len = PAGE_SIZE;
+         j++;
+      }
+   }
+
+   dg->src = *(VMCIHandle *)channel->priv;
+   dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT);
+   dg->payloadSize = transportHeaderSize;
+
+   transportHeader->version = HGFS_VMCI_VERSION_1;
+   ASSERT(gHgfsShmemPages.freePageCount == j);
+   transportHeader->iovCount = j;
+   transportHeader->pktType = HGFS_TH_REP_GET_PAGES;
+
+   LOG(10, (KERN_WARNING "Sending %d Guest pages \n", i));
+   if ((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
+      if (ret == HGFS_VMCI_TRANSPORT_ERROR) {
+         LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n"));
+      }
+      retVal = FALSE;
+   }
+
+exit:
+   if (retVal) {
+      /* We successfully sent pages the the host. Mark all pages as allocated */
+      for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) {
+         gHgfsShmemPages.list[i].free = FALSE;
+      }
+      gHgfsShmemPages.freePageCount = 0;
+   }
+   kfree(dg);
+   return retVal;
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsVmciChannelCompleteRequest --
+ *
+ *      Completes the request that was serviced asynchronously by the server.
+ *
+ * Results:
+ *      None
+ *
+ * Side effects:
+ *      Request may be removed from the queue and sleeping thread is woken up.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+void
+HgfsVmciChannelCompleteRequest(uint64 id) // IN: Request ID
+{
+   HgfsVmciTransportStatus *transportStatus;
+   HgfsReq *req;
+
+   spin_lock_bh(&vmciRequestProcessLock);
+
+   /* Reference is taken here */
+   req = HgfsTransportGetPendingRequest(id);
+   if (!req) {
+      goto exit;
+   }
+
+   transportStatus = (HgfsVmciTransportStatus *)req->buffer;
+   if (transportStatus->status != HGFS_TS_IO_COMPLETE) {
+      goto exit;
+   }
+
+   /* Request is completed (yay!), let's remove it from the list */
+   HgfsTransportRemovePendingRequest(req);
+
+   req->payloadSize = transportStatus->size;
+   HgfsCompleteReq(req);
+
+exit:
+   if (req) {
+      /* Drop the reference taken in *GetPendingRequest */
+      HgfsRequestPutRef(req);
+   }
+   spin_unlock_bh(&vmciRequestProcessLock);
+}
+
 
 /*
  *-----------------------------------------------------------------------------
  *
  * HgfsVmciChannelCallback --
  *
- *      Called when VMCI datagram is received.
+ *      Called when VMCI datagram is received. Note: This function runs inside
+ *      tasklet. It means that this function cannot run concurrently with
+ *      itself, thus it is safe to manipulate gHgfsShmemPages without locks. If this
+ *      ever changes, please consider using appropriate locks.
  *
  * Results:
- *      Always 0.
+ *      0 on Success, < 0 on Failure.
  *
  * Side effects:
  *      None
@@ -77,8 +370,40 @@ static HgfsTransportChannel channel = {
  *-----------------------------------------------------------------------------
  */
 
-static int HgfsVmciChannelCallback(void *data, VMCIDatagram *dg)
+static int HgfsVmciChannelCallback(void *data,       // IN: unused
+                                   VMCIDatagram *dg) // IN: datagram
 {
+   HgfsVmciAsyncReply *reply  = (HgfsVmciAsyncReply *)VMCI_DG_PAYLOAD(dg);
+   HgfsTransportChannel *channel;
+
+   LOG(10, (KERN_WARNING "Received VMCI channel Callback \n"));
+
+   if (reply->version != HGFS_VMCI_VERSION_1) {
+      return HGFS_VMCI_VERSION_MISMATCH;
+   }
+
+   switch (reply->pktType) {
+
+   case HGFS_ASYNC_IOREP:
+      LOG(10, (KERN_WARNING "Received ID%"FMT64"x \n", reply->response.id));
+      HgfsVmciChannelCompleteRequest(reply->response.id);
+      break;
+
+   case HGFS_ASYNC_IOREQ_SHMEM:
+      HgfsRequestAsyncShmemDispatch(reply->shmem.iov, reply->shmem.count);
+      break;
+
+   case HGFS_ASYNC_IOREQ_GET_PAGES:
+      channel = HgfsGetVmciChannel();
+      LOG(10, (KERN_WARNING "Should send pages to the host\n"));
+      HgfsVmciChannelPassGuestPages(channel);
+      break;
+
+   default:
+      ASSERT(0);
+      return HGFS_VMCI_TRANSPORT_ERROR;
+   }
+
    return 0;
 }
 
@@ -88,7 +413,7 @@ static int HgfsVmciChannelCallback(void *data, VMCIDatagram *dg)
  *
  * HgfsVmciChannelOpen --
  *
- *      Open VMCI channel.
+ *      Opens VMCI channel and passes guest pages to the host.
  *
  * Results:
  *      TRUE on success, FALSE on failure.
@@ -102,64 +427,122 @@ static int HgfsVmciChannelCallback(void *data, VMCIDatagram *dg)
 static Bool
 HgfsVmciChannelOpen(HgfsTransportChannel *channel) // IN: Channel
 {
-   HgfsVmciTransportHeader transportHeader;
-   VMCIDatagram *dg;
    int ret;
+   int i;
 
    ASSERT(channel->status == HGFS_CHANNEL_NOTCONNECTED);
    ASSERT(channel->priv == NULL);
 
    if (USE_VMCI == 0) {
-      return FALSE;
+      goto error;
    }
 
+   spin_lock_init(&vmciRequestProcessLock);
+
    channel->priv = kmalloc(sizeof(VMCIHandle), GFP_KERNEL);
-   if (NULL == channel->priv) {
-      return FALSE;
+   if (!channel->priv) {
+      goto error;
    }
 
    ret = VMCIDatagram_CreateHnd(VMCI_INVALID_ID,        /* Resource ID */
                                 VMCI_FLAG_DG_NONE,      /* Flags */
-                                HgfsVmciChannelCallback,/* Datagram Recv Callback*/
+                                HgfsVmciChannelCallback,/* Datagram Recv Callback */
                                 NULL,                   /* Callback data */
                                 channel->priv);         /* VMCI outhandle */
    if (ret != VMCI_SUCCESS) {
       LOG(1, (KERN_WARNING "Failed to create VMCI handle %d\n", ret));
-      kfree(channel->priv);
-      return FALSE;
+      goto error;
    }
 
-   transportHeader.version = HGFS_VMCI_VERSION_1;
-   transportHeader.iovCount = 0;
+   gHgfsShmemPages.list = kmalloc(sizeof gHgfsShmemPages.list * HGFS_VMCI_SHMEM_PAGES,
+                                  GFP_KERNEL);
+   if (!gHgfsShmemPages.list) {
+      goto error;
+   }
 
-   /*
-    * Send a datagram to the VMX with the HgfsTransportHeader as the datagram
-    * payload
-    */
-   dg = kmalloc(sizeof *dg + sizeof transportHeader, GFP_KERNEL);
+   memset(gHgfsShmemPages.list, 0, sizeof gHgfsShmemPages.list * HGFS_VMCI_SHMEM_PAGES);
+
+   for (i = 0; i < HGFS_VMCI_SHMEM_PAGES; i++) {
+      gHgfsShmemPages.list[i].va = __get_free_page(GFP_KERNEL);
+      if (!gHgfsShmemPages.list[i].va) {
+         LOG(1, (KERN_WARNING "__get_free_page returned error \n"));
+         if (i == 0) {
+            /* Ouch. We failed on first call to __get_free_page */
+            goto error;
+         }
+         /* It's ok. We can still send few pages to the host */
+         break;
+      }
+      gHgfsShmemPages.list[i].pa = virt_to_phys((void *)(size_t)gHgfsShmemPages.list[i].va);
+      gHgfsShmemPages.list[i].free = TRUE;
+   }
+
+   gHgfsShmemPages.totalPageCount = i;
+   gHgfsShmemPages.freePageCount = i;
+
+   ret = HgfsVmciChannelPassGuestPages(channel);
+   if (!ret) {
+      LOG(1, (KERN_WARNING "Failed to pass pages to the guest %d\n", ret));
+      goto error;
+   }
+
+   return TRUE;
+
+error:
+   kfree(gHgfsShmemPages.list);
+   kfree(channel->priv);
+   return FALSE;
+}
+
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * HgfsVmciChannelTerminateSession --
+ *
+ *      Terminate session with the server.
+ *
+ * Results:
+ *      0 on success and < 0 on error.
+ *
+ * Side effects:
+ *      None
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static int
+HgfsVmciChannelTerminateSession(HgfsTransportChannel *channel) {
+
+   int ret = 0;
+   VMCIDatagram *dg;
+   HgfsVmciTransportHeader *transportHeader;
+
+   dg = kmalloc(sizeof *dg + sizeof *transportHeader, GFP_KERNEL);
    if (NULL == dg) {
       LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__));
-      VMCIDatagram_DestroyHnd(*(VMCIHandle *)channel->priv);
-      kfree(channel->priv);
-      return FALSE;
+      return -ENOMEM;
    }
 
-   memcpy(VMCI_DG_PAYLOAD(dg), &transportHeader, sizeof transportHeader);
-
+   /* Initialize datagram */
    dg->src = *(VMCIHandle *)channel->priv;
    dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT);
-   dg->payloadSize = sizeof transportHeader;
+   dg->payloadSize = sizeof *transportHeader;
 
+   transportHeader = VMCI_DG_PAYLOAD(dg);
+   transportHeader->version = HGFS_VMCI_VERSION_1;
+   transportHeader->iovCount = 0;
+   transportHeader->pktType = HGFS_TH_TERMINATE_SESSION;
+
+   LOG(1, (KERN_WARNING "Terminating session with host \n"));
    if ((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
-      LOG(4, (KERN_WARNING "Failure with %d\n", ret));
-      VMCIDatagram_DestroyHnd(*(VMCIHandle *)channel->priv);
-      kfree(dg);
-      kfree(channel->priv);
-      return FALSE;
+      if (ret == HGFS_VMCI_TRANSPORT_ERROR) {
+         LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n"));
+      }
    }
 
    kfree(dg);
-   return TRUE;
+   return ret;
 }
 
 
@@ -183,9 +566,10 @@ static void
 HgfsVmciChannelClose(HgfsTransportChannel *channel) // IN: Channel
 {
    ASSERT(channel->priv != NULL);
-
+   HgfsVmciChannelTerminateSession(channel);
    VMCIDatagram_DestroyHnd(*(VMCIHandle *)channel->priv);
    kfree(channel->priv);
+   kfree(gHgfsShmemPages.list);
    channel->priv = NULL;
 
    LOG(8, ("VMware hgfs: %s: vmci closed.\n", __func__));
@@ -222,8 +606,6 @@ HgfsVmciChannelAllocate(size_t payloadSize) // IN: Ignored
       req->bufferSize = size - sizeof (HgfsVmciTransportStatus) - sizeof *req;
    }
 
-   /* We asked for PAGE_SIZE, it should be page aligned */
-   ASSERT(((long)req & 0x00000fff) == 0);
    LOG(10, (KERN_WARNING "%s: Allocated Request\n", __func__));
    return req;
 }
@@ -275,7 +657,6 @@ HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel
 {
    int ret;
    int iovCount = 0;
-   HgfsReply *reply;
    VMCIDatagram *dg;
    HgfsVmciTransportHeader *transportHeader;
    HgfsVmciTransportStatus *transportStatus;
@@ -284,37 +665,20 @@ HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel
    size_t total;
    uint64 pa;
    uint64 len;
-   size_t va;
+   uint64 id;
    int j;
 
    ASSERT(req);
    ASSERT(req->state == HGFS_REQ_STATE_UNSENT || req->state == HGFS_REQ_STATE_ALLOCATED);
    ASSERT(req->payloadSize <= req->bufferSize);
 
-   LOG(4, ("VMware hgfs: %s: VMCI sending.\n", __func__));
-
-   /*
-    +------------+
-    +   page 1   + <----- We can have request starting from here
-    +------------+
-    +   page 2   +
-    +------------+
-    +   page 3   + <----- ..and ending here
-    +------------+
-    */
-
    /* Note that req->bufferSize does not include chunk used by the transport. */
    total = req->bufferSize + sizeof (HgfsVmciTransportStatus);
-   bufferSize = 0;
 
    /* Calculate number of entries for metaPacket */
-   iovCount = 1;
-   va = (size_t)req->buffer;
-   len = total < (PAGE_SIZE - va % PAGE_SIZE) ? total : (PAGE_SIZE - va % PAGE_SIZE);
-   total -= len;
-   iovCount += (total + PAGE_SIZE - 1)/ PAGE_SIZE;
+   iovCount = (total + (size_t)req->buffer % PAGE_SIZE - 1)/ PAGE_SIZE + 1;
+   ASSERT(total + (size_t)req->buffer % PAGE_SIZE <= PAGE_SIZE);
 
-   ASSERT(iovCount >= 1);
    transportHeaderSize = sizeof *transportHeader +
                          (iovCount + req->numEntries - 1) * sizeof (HgfsIov);
    dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_KERNEL);
@@ -329,8 +693,6 @@ HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel
    dg->payloadSize = transportHeaderSize;
 
    transportHeader = VMCI_DG_PAYLOAD(dg);
-
-   /* Initialize transport header */
    transportHeader->version = HGFS_VMCI_VERSION_1;
 
    total = req->bufferSize + sizeof (HgfsVmciTransportStatus);
@@ -356,7 +718,7 @@ HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel
    ASSERT(total == 0);
    ASSERT(bufferSize == req->bufferSize + sizeof (HgfsVmciTransportStatus));
 
-   LOG(8, ("Size of request is %Zu %Zu\n", req->payloadSize, sizeof (HgfsRequest)));
+   LOG(8, ("Size of request is %Zu\n", req->payloadSize));
 
    for (j = 0; j < req->numEntries; j++, iovCount++) {
       /* I will have to probably do page table walk here, haven't figured it out yet */
@@ -369,44 +731,34 @@ HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel
    }
 
    transportHeader->iovCount = iovCount;
+   transportHeader->pktType = HGFS_TH_REQUEST;
 
    /* Initialize transport Status */
    transportStatus = (HgfsVmciTransportStatus *)req->buffer;
-   transportStatus->status = HGFS_VMCI_IO_PENDING;
-   transportStatus->flags = 0;
+   transportStatus->status = HGFS_TS_IO_PENDING;
    transportStatus->size = req->bufferSize + sizeof (HgfsVmciTransportStatus);
 
-   LOG(8, (KERN_WARNING "Physical addr is %"FMT64"x len=%u iovCount=%u numEntries=%u\n",
-           transportHeader->iov[0].pa,
-           transportHeader->iov[0].len,
-           transportHeader->iovCount,
-           req->numEntries));
-   LOG(8, (KERN_WARNING "Id = %u op = %u\n",
-           ((HgfsRequest *)req->payload)->id,
-           ((HgfsRequest *)req->payload)->op));
+   /*
+    * Don't try to set req->state after VMCIDatagram_Send().
+    * It may be too late then. We could have received a datagram by then and
+    * datagram handler expects request's state to be submitted.
+    */
+   req->state = HGFS_REQ_STATE_SUBMITTED;
+   id = req->id;
 
-   if((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
+   if ((ret = VMCIDatagram_Send(dg)) < VMCI_SUCCESS) {
       if (ret == HGFS_VMCI_TRANSPORT_ERROR) {
          LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n"));
+      } else if (ret == HGFS_VMCI_VERSION_MISMATCH) {
+         LOG(0, (KERN_WARNING "Version mismatch\n"));
       }
       req->state = HGFS_REQ_STATE_UNSENT;
       kfree(dg);
       return -EIO;
    }
 
-   LOG(8, (KERN_WARNING "VMware hgfs: %s: VMCI reply received.\n", __func__));
-
-   /* For HgfsVmciStage2 everything should complete sync. */
-   ASSERT(transportStatus->status == HGFS_VMCI_IO_COMPLETE);
-
-   if (transportStatus->status == HGFS_VMCI_IO_COMPLETE) {
-      reply = (HgfsReply *)req->payload;
-      req->payloadSize = transportStatus->size;
-      ASSERT(transportStatus->size <= (req->bufferSize + sizeof (HgfsVmciTransportStatus)));
-      HgfsCompleteReq(req);
-      LOG(8, (KERN_WARNING "IO_COMPLETE: id = %u status = %u\n",
-              (uint32)reply->id, (uint32)reply->status));
-   }
+   LOG(0, (KERN_WARNING "Hgfs Received response\n"));
+   HgfsVmciChannelCompleteRequest(id);
 
    kfree(dg);
    return 0;
index d0c61a56a48effff1e2809fc8308fac4b5aa6cd6..d7dff151a12bbef3a8a516dd4b4fedd2f1a853c1 100644 (file)
@@ -102,7 +102,7 @@ HgfsServerRpcInDispatch(RpcInData *data)
    }
 
    packetSize = data->argsSize - 1;
-   HgfsServer_ProcessPacket(data->args + 1, packet, &packetSize, 0);
+   HgfsServer_ProcessPacket(data->args + 1, packet, &packetSize);
 
    data->result = packet;
    data->resultLen = packetSize;
index 3b1f9b5f8a9e316910e0978e3a8aa5820de80118..6860191579134ed0647a16cf467baaec164da341 100644 (file)
@@ -1328,8 +1328,7 @@ ToolsDaemonHgfsImpersonated(RpcInData *data) // IN
     */
    HgfsServer_ProcessPacket(data->args,        // packet in buf
                             hgfsReplyPacket,   // packet out buf
-                            &hgfsPacketSize,   // in/out size
-                            0);                // receive process flags
+                            &hgfsPacketSize);   // in/out size
 
 abort:
    if (impersonatingVMWareUser) {