]> git.ipfire.org Git - thirdparty/sqlite.git/commitdiff
Correct opfs-sahpool VFS after the pebkac involving the previous speedtest1 runs...
authorstephan <stephan@noemail.net>
Sat, 15 Jul 2023 19:08:58 +0000 (19:08 +0000)
committerstephan <stephan@noemail.net>
Sat, 15 Jul 2023 19:08:58 +0000 (19:08 +0000)
FossilOrigin-Name: 41bf1fe31f2f3d0daa2bac25dc57262a4b90f22fed6fa97e4e92467c32ae02dc

ext/wasm/api/sqlite3-vfs-opfs-sahpool.js
ext/wasm/api/sqlite3-vfs-opfs.c-pp.js
ext/wasm/speedtest1-worker.js
manifest
manifest.uuid

index acff9ea2076cc6bb33ca00ddba47ecbddb9d5a90..57d674e9fc6d5d421efdde401163e63f72e0c927 100644 (file)
 */
 'use strict';
 globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
-const installOpfsVfs = async function(sqlite3){
+const toss = sqlite3.util.toss;
+let vfsRegisterResult = undefined;
+/**
+   installOpfsSAHPoolVfs() asynchronously initializes the
+   OPFS SyncAccessHandle Pool VFS. It returns a Promise
+   which either resolves to the sqlite3 object or rejects
+   with an Error value.
+
+   Initialization of this VFS is not automatic because its
+   registration requires that it lock all resources it
+   will potentially use, even if client code does not want
+   to use them. That, in turn, can lead to locking errors
+   when, for example, one page in a given origin has loaded
+   this VFS but does not use it, then another page in that
+   origin tries to use the VFS. If the VFS were automatically
+   registered, the second page would fail to load the VFS
+   due to OPFS locking errors.
+
+   On calls after the first this function immediately returns a
+   resolved or rejected Promise. If called while the first call is
+   still pending resolution, a rejected promise with a descriptive
+   error is returned.
+*/
+sqlite3.installOpfsSAHPoolVfs = async function(){
+  if(sqlite3===vfsRegisterResult) return Promise.resolve(sqlite3);
+  else if(undefined!==vfsRegisterResult){
+    return Promise.reject(vfsRegisterResult);
+  }
   if(!globalThis.FileSystemHandle ||
      !globalThis.FileSystemDirectoryHandle ||
      !globalThis.FileSystemFileHandle ||
      !globalThis.FileSystemFileHandle.prototype.createSyncAccessHandle ||
      !navigator?.storage?.getDirectory){
-    return Promise.reject(new Error("Missing required OPFS APIs."));
+    return Promise.reject(vfsRegisterResult = new Error("Missing required OPFS APIs."));
   }
-  return new Promise(async function(promiseResolve, promiseReject_){
-    const verbosity = 2;
-    const loggers = [
-      sqlite3.config.error,
-      sqlite3.config.warn,
-      sqlite3.config.log
-    ];
-    const logImpl = (level,...args)=>{
-      if(verbosity>level) loggers[level]("opfs-sahpool:",...args);
-    };
-    const log =    (...args)=>logImpl(2, ...args);
-    const warn =   (...args)=>logImpl(1, ...args);
-    const error =  (...args)=>logImpl(0, ...args);
-    const toss = sqlite3.util.toss;
-    const capi = sqlite3.capi;
-    const wasm = sqlite3.wasm;
-    const opfsIoMethods = new capi.sqlite3_io_methods();
-    const opfsVfs = new capi.sqlite3_vfs()
-          .addOnDispose(()=>opfsIoMethods.dispose());
-    const promiseReject = (err)=>{
-      opfsVfs.dispose();
-      return promiseReject_(err);
-    };
-
-    // Config opts for the VFS...
-    const SECTOR_SIZE = 4096;
-    const HEADER_MAX_PATH_SIZE = 512;
-    const HEADER_FLAGS_SIZE = 4;
-    const HEADER_DIGEST_SIZE = 8;
-    const HEADER_CORPUS_SIZE = HEADER_MAX_PATH_SIZE + HEADER_FLAGS_SIZE;
-    const HEADER_OFFSET_FLAGS = HEADER_MAX_PATH_SIZE;
-    const HEADER_OFFSET_DIGEST = HEADER_CORPUS_SIZE;
-    const HEADER_OFFSET_DATA = SECTOR_SIZE;
-    const DEFAULT_CAPACITY =
-          sqlite3.config['opfs-sahpool.defaultCapacity'] || 6;
-    /* Bitmask of file types which may persist across sessions.
-       SQLITE_OPEN_xyz types not listed here may be inadvertently
-       left in OPFS but are treated as transient by this VFS and
-       they will be cleaned up during VFS init. */
-    const PERSISTENT_FILE_TYPES =
-          capi.SQLITE_OPEN_MAIN_DB |
-          capi.SQLITE_OPEN_MAIN_JOURNAL |
-          capi.SQLITE_OPEN_SUPER_JOURNAL |
-          capi.SQLITE_OPEN_WAL /* noting that WAL support is
-                                  unavailable in the WASM build.*/;
-    /* We fetch the default VFS so that we can inherit some
-       methods from it. */
-    const pDVfs = capi.sqlite3_vfs_find(null);
-    const dVfs = pDVfs
-          ? new capi.sqlite3_vfs(pDVfs)
-          : null /* dVfs will be null when sqlite3 is built with
-                    SQLITE_OS_OTHER. */;
-    opfsVfs.$iVersion = 2/*yes, two*/;
-    opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
-    opfsVfs.$mxPathname = HEADER_MAX_PATH_SIZE;
-    opfsVfs.addOnDispose(
-      opfsVfs.$zName = wasm.allocCString("opfs-sahpool"),
-      ()=>(dVfs ? dVfs.dispose() : null)
-    );
-
+  vfsRegisterResult = new Error("VFS initialization still underway.");
+  const verbosity = 2 /*3+ == everything*/;
+  const loggers = [
+    sqlite3.config.error,
+    sqlite3.config.warn,
+    sqlite3.config.log
+  ];
+  const logImpl = (level,...args)=>{
+    if(verbosity>level) loggers[level]("opfs-sahpool:",...args);
+  };
+  const log =    (...args)=>logImpl(2, ...args);
+  const warn =   (...args)=>logImpl(1, ...args);
+  const error =  (...args)=>logImpl(0, ...args);
+  const capi = sqlite3.capi;
+  const wasm = sqlite3.wasm;
+  const opfsIoMethods = new capi.sqlite3_io_methods();
+  const opfsVfs = new capi.sqlite3_vfs()
+        .addOnDispose(()=>opfsIoMethods.dispose());
+  const promiseReject = (err)=>{
+    error("rejecting promise:",err);
+    //opfsVfs.dispose();
+    vfsRegisterResult = err;
+    return Promise.reject(err);
+  };
+  const promiseResolve =
+        ()=>Promise.resolve(vfsRegisterResult = sqlite3);
+  // Config opts for the VFS...
+  const SECTOR_SIZE = 4096;
+  const HEADER_MAX_PATH_SIZE = 512;
+  const HEADER_FLAGS_SIZE = 4;
+  const HEADER_DIGEST_SIZE = 8;
+  const HEADER_CORPUS_SIZE = HEADER_MAX_PATH_SIZE + HEADER_FLAGS_SIZE;
+  const HEADER_OFFSET_FLAGS = HEADER_MAX_PATH_SIZE;
+  const HEADER_OFFSET_DIGEST = HEADER_CORPUS_SIZE;
+  const HEADER_OFFSET_DATA = SECTOR_SIZE;
+  const DEFAULT_CAPACITY =
+        sqlite3.config['opfs-sahpool.defaultCapacity'] || 6;
+  /* Bitmask of file types which may persist across sessions.
+     SQLITE_OPEN_xyz types not listed here may be inadvertently
+     left in OPFS but are treated as transient by this VFS and
+     they will be cleaned up during VFS init. */
+  const PERSISTENT_FILE_TYPES =
+        capi.SQLITE_OPEN_MAIN_DB |
+        capi.SQLITE_OPEN_MAIN_JOURNAL |
+        capi.SQLITE_OPEN_SUPER_JOURNAL |
+        capi.SQLITE_OPEN_WAL /* noting that WAL support is
+                                unavailable in the WASM build.*/;
+  /* We fetch the default VFS so that we can inherit some
+     methods from it. */
+  const pDVfs = capi.sqlite3_vfs_find(null);
+  const dVfs = pDVfs
+        ? new capi.sqlite3_vfs(pDVfs)
+        : null /* dVfs will be null when sqlite3 is built with
+                  SQLITE_OS_OTHER. */;
+  opfsIoMethods.$iVersion = 1;
+  opfsVfs.$iVersion = 2/*yes, two*/;
+  opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
+  opfsVfs.$mxPathname = HEADER_MAX_PATH_SIZE;
+  opfsVfs.addOnDispose(
+    opfsVfs.$zName = wasm.allocCString("opfs-sahpool"),
+    ()=>(dVfs ? dVfs.dispose() : null)
+  );
+
+  /**
+     Returns short a string of random alphanumeric characters
+     suitable for use as a random filename.
+  */
+  const getRandomName = ()=>Math.random().toString(36).slice(2);
+
+  /**
+     All state for the VFS.
+  */
+  const SAHPool = Object.assign(Object.create(null),{
+    /* OPFS dir in which VFS metadata is stored. */
+    vfsDir: sqlite3.config['vfs:opfs-sahpool:dir']
+      || ".sqlite3-opfs-sahpool",
+    /* Directory handle to this.vfsDir. */
+    dirHandle: undefined,
+    /* Maps SAHs to their opaque file names. */
+    mapSAHToName: new Map(),
+    /* Maps client-side file names to SAHs. */
+    mapPathToSAH: new Map(),
+    /* Set of currently-unused SAHs. */
+    availableSAH: new Set(),
+    /* Maps (sqlite3_file*) to xOpen's file objects. */
+    mapIdToFile: new Map(),
+    /* Current pool capacity. */
+    getCapacity: function(){return this.mapSAHToName.size},
+    /* Current number of in-use files from pool. */
+    getFileCount: function(){return this.mapPathToSAH.size},
     /**
-       Returns short a string of random alphanumeric characters
-       suitable for use as a random filename.
+       Adds n files to the pool's capacity. This change is
+       persistent across settings. Returns a Promise which resolves
+       to the new capacity.
     */
-    const getRandomName = ()=>Math.random().toString(36).slice(2);
-
+    addCapacity: async function(n){
+      const cap = this.getCapacity();
+      for(let i = cap; i < cap+n; ++i){
+        const name = getRandomName();
+        const h = await this.dirHandle.getFileHandle(name, {create:true});
+        const ah = await h.createSyncAccessHandle();
+        this.mapSAHToName.set(ah,name);
+        this.setAssociatedPath(ah, '', 0);
+      }
+      return i;
+    },
     /**
-       All state for the VFS.
+       Removes n entries from the pool's current capacity
+       if possible. It can only remove currently-unallocated
+       files. Returns a Promise resolving to the number of
+       removed files.
     */
-    const SAHPool = Object.assign(Object.create(null),{
-      /* OPFS dir in which VFS metadata is stored. */
-      vfsDir: sqlite3.config['vfs:opfs-sahpool:dir']
-        || ".sqlite3-opfs-sahpool",
-      /* Directory handle to this.vfsDir. */
-      dirHandle: undefined,
-      /* Maps SAHs to their opaque file names. */
-      mapSAHToName: new Map(),
-      /* Maps client-side file names to SAHs. */
-      mapPathToSAH: new Map(),
-      /* Set of currently-unused SAHs. */
-      availableSAH: new Set(),
-      /* Maps (sqlite3_file*) to xOpen's file objects. */
-      mapIdToFile: new Map(),
-      /* Current pool capacity. */
-      getCapacity: function(){return this.mapSAHToName.size},
-      /* Current number of in-use files from pool. */
-      getFileCount: function(){return this.mapPathToSAH.size},
-      /**
-         Adds n files to the pool's capacity. This change is
-         persistent across settings. Returns a Promise which resolves
-         to the new capacity.
-      */
-      addCapacity: async function(n){
-        const cap = this.getCapacity();
-        for(let i = cap; i < cap+n; ++i){
-          const name = getRandomName();
-          const h = await this.dirHandle.getFileHandle(name, {create:true});
-          const ah = await h.createSyncAccessHandle();
-          this.mapSAHToName.set(ah,name);
-          this.setAssociatedPath(ah, '', 0);
+    reduceCapacity: async function(n){
+      let nRm = 0;
+      for(const ah of Array.from(this.availableSAH)){
+        if(nRm === n || this.getFileCount() === this.getCapacity()){
+          break;
         }
-        return i;
-      },
-      /**
-         Removes n entries from the pool's current capacity
-         if possible. It can only remove currently-unallocated
-         files. Returns a Promise resolving to the number of
-         removed files.
-      */
-      reduceCapacity: async function(n){
-        let nRm = 0;
-        for(const ah of Array.from(this.availableSAH)){
-          if(nRm === n || this.getFileCount() === this.getCapacity()){
-            break;
-          }
-          const name = this.mapSAHToName.get(ah);
-          ah.close();
-          await this.dirHandle.removeEntry(name);
-          this.mapSAHToName.delete(ah);
-          this.availableSAH.delete(ah);
-          ++nRm;
-        }
-        return nRm;
-      },
-      /**
-         Releases all currently-opened SAHs.
-      */
-      releaseAccessHandles: function(){
-        for(const ah of this.mapSAHToName.keys()) ah.close();
-        this.mapSAHToName.clear();
-        this.mapPathToSAH.clear();
-        this.availableSAH.clear();
-      },
-      /**
-         Opens all files under this.vfsDir/this.dirHandle and acquires
-         a SAH for each. returns a Promise which resolves to no value
-         but completes once all SAHs are acquired. If acquiring an SAH
-         throws, SAHPool.$error will contain the corresponding
-         exception.
-      */
-      acquireAccessHandles: async function(){
-        const files = [];
-        for await (const [name,h] of this.dirHandle){
-          if('file'===h.kind){
-            files.push([name,h]);
-          }
-        }
-        await Promise.all(files.map(async ([name,h])=>{
-          try{
-            const ah = await h.createSyncAccessHandle()
-            /*TODO: clean up and fail vfs init on error*/;
-            this.mapSAHToName.set(ah, name);
-            const path = this.getAssociatedPath(ah);
-            if(path){
-              this.mapPathToSAH.set(path, ah);
-            }else{
-              this.availableSAH.add(ah);
-            }
-          }catch(e){
-            SAHPool.storeErr(e);
-            throw e;
-          }
-        }));
-      },
-      /** Buffer used by [sg]etAssociatedPath(). */
-      apBody: new Uint8Array(HEADER_CORPUS_SIZE),
-      textDecoder: new TextDecoder(),
-      textEncoder: new TextEncoder(),
-      /**
-         Given an SAH, returns the client-specified name of
-         that file by extracting it from the SAH's header.
-
-         On error, it disassociates SAH from the pool and
-         returns an empty string.
-      */
-      getAssociatedPath: function(sah){
-        const body = this.apBody;
-        sah.read(body, {at: 0});
-        // Delete any unexpected files left over by previous
-        // untimely errors...
-        const dv = new DataView(body.buffer, body.byteOffset);
-        const flags = dv.getUint32(HEADER_OFFSET_FLAGS);
-        if(body[0] &&
-           ((flags & capi.SQLITE_OPEN_DELETEONCLOSE) ||
-            (flags & PERSISTENT_FILE_TYPES)===0)){
-          warn(`Removing file with unexpected flags ${flags.toString(16)}`);
-          this.setAssociatedPath(sah, '', 0);
-          return '';
+        const name = this.mapSAHToName.get(ah);
+        ah.close();
+        await this.dirHandle.removeEntry(name);
+        this.mapSAHToName.delete(ah);
+        this.availableSAH.delete(ah);
+        ++nRm;
+      }
+      return nRm;
+    },
+    /**
+       Releases all currently-opened SAHs.
+    */
+    releaseAccessHandles: function(){
+      for(const ah of this.mapSAHToName.keys()) ah.close();
+      this.mapSAHToName.clear();
+      this.mapPathToSAH.clear();
+      this.availableSAH.clear();
+    },
+    /**
+       Opens all files under this.vfsDir/this.dirHandle and acquires
+       a SAH for each. returns a Promise which resolves to no value
+       but completes once all SAHs are acquired. If acquiring an SAH
+       throws, SAHPool.$error will contain the corresponding
+       exception.
+    */
+    acquireAccessHandles: async function(){
+      const files = [];
+      for await (const [name,h] of this.dirHandle){
+        if('file'===h.kind){
+          files.push([name,h]);
         }
-
-        const fileDigest = new Uint32Array(HEADER_DIGEST_SIZE / 4);
-        sah.read(fileDigest, {at: HEADER_OFFSET_DIGEST});
-        const compDigest = this.computeDigest(body);
-        if(fileDigest.every((v,i) => v===compDigest[i])){
-          // Valid digest
-          const pathBytes = body.findIndex((v)=>0===v);
-          if(0===pathBytes){
-            // This file is unassociated, so truncate it to avoid
-            // leaving stale db data laying around.
-            sah.truncate(HEADER_OFFSET_DATA);
+      }
+      await Promise.all(files.map(async ([name,h])=>{
+        try{
+          const ah = await h.createSyncAccessHandle()
+          /*TODO: clean up and fail vfs init on error*/;
+          this.mapSAHToName.set(ah, name);
+          const path = this.getAssociatedPath(ah);
+          if(path){
+            this.mapPathToSAH.set(path, ah);
+          }else{
+            this.availableSAH.add(ah);
           }
-          return this.textDecoder.decode(body.subarray(0,pathBytes));
-        }else{
-          // Invalid digest
-          warn('Disassociating file with bad digest.');
-          this.setAssociatedPath(sah, '', 0);
-          return '';
-        }
-      },
-      /**
-         Stores the given client-defined path and SQLITE_OPEN_xyz
-         flags into the given SAH.
-      */
-      setAssociatedPath: function(sah, path, flags){
-        const body = this.apBody;
-        const enc = this.textEncoder.encodeInto(path, body);
-        if(HEADER_MAX_PATH_SIZE <= enc.written){
-          toss("Path too long:",path);
+        }catch(e){
+          SAHPool.storeErr(e);
+          throw e;
         }
+      }));
+    },
+    /** Buffer used by [sg]etAssociatedPath(). */
+    apBody: new Uint8Array(HEADER_CORPUS_SIZE),
+    textDecoder: new TextDecoder(),
+    textEncoder: new TextEncoder(),
+    /**
+       Given an SAH, returns the client-specified name of
+       that file by extracting it from the SAH's header.
 
-        const dv = new DataView(body.buffer, body.byteOffset);
-        dv.setUint32(HEADER_OFFSET_FLAGS, flags);
-
-        const digest = this.computeDigest(body);
-        sah.write(body, {at: 0});
-        sah.write(digest, {at: HEADER_OFFSET_DIGEST});
-        sah.flush();
+       On error, it disassociates SAH from the pool and
+       returns an empty string.
+    */
+    getAssociatedPath: function(sah){
+      const body = this.apBody;
+      sah.read(body, {at: 0});
+      // Delete any unexpected files left over by previous
+      // untimely errors...
+      const dv = new DataView(body.buffer, body.byteOffset);
+      const flags = dv.getUint32(HEADER_OFFSET_FLAGS);
+      if(body[0] &&
+         ((flags & capi.SQLITE_OPEN_DELETEONCLOSE) ||
+          (flags & PERSISTENT_FILE_TYPES)===0)){
+        warn(`Removing file with unexpected flags ${flags.toString(16)}`);
+        this.setAssociatedPath(sah, '', 0);
+        return '';
+      }
 
-        if(path){
-          this.mapPathToSAH.set(path, sah);
-          this.availableSAH.delete(sah);
-        }else{
-          // This is not a persistent file, so eliminate the contents.
+      const fileDigest = new Uint32Array(HEADER_DIGEST_SIZE / 4);
+      sah.read(fileDigest, {at: HEADER_OFFSET_DIGEST});
+      const compDigest = this.computeDigest(body);
+      if(fileDigest.every((v,i) => v===compDigest[i])){
+        // Valid digest
+        const pathBytes = body.findIndex((v)=>0===v);
+        if(0===pathBytes){
+          // This file is unassociated, so truncate it to avoid
+          // leaving stale db data laying around.
           sah.truncate(HEADER_OFFSET_DATA);
-          this.mapPathToSAH.delete(path);
-          this.availableSAH.add(sah);
-        }
-      },
-      /**
-         Computes a digest for the given byte array and
-         returns it as a two-element Uint32Array.
-      */
-      computeDigest: function(byteArray){
-        if(!byteArray[0]){
-          // Deleted file
-          return new Uint32Array([0xfecc5f80, 0xaccec037]);
-        }
-        let h1 = 0xdeadbeef;
-        let h2 = 0x41c6ce57;
-        for(const v of byteArray){
-          h1 = 31 * h1 + (v * 307);
-          h2 = 31 * h2 + (v * 307);
-        }
-        return new Uint32Array([h1>>>0, h2>>>0]);
-      },
-      reset: async function(){
-        await this.isReady;
-        let h = await navigator.storage.getDirectory();
-        for(const d of this.vfsDir.split('/')){
-          if(d){
-            h = await h.getDirectoryHandle(d,{create:true});
-          }
-        }
-        this.dirHandle = h;
-        this.releaseAccessHandles();
-        await this.acquireAccessHandles();
-      },
-      /**
-         Returns the pathname part of the given argument,
-         which may be any of:
-
-         - a URL object
-         - A JS string representing a file name
-         - Wasm C-string representing a file name
-      */
-      getPath: function(arg) {
-        if(wasm.isPtr(arg)) arg = wasm.cstrToJs(arg);
-        return ((arg instanceof URL)
-                ? arg
-                : new URL(arg, 'file://localhost/')).pathname;
-      },
-      /**
-         Removes the association of the given client-specified file
-         name (JS string) from the pool.
-      */
-      deletePath: function(path) {
-        const sah = this.mapPathToSAH.get(path);
-        if(sah) {
-          // Un-associate the SQLite path from the OPFS file.
-          this.setAssociatedPath(sah, '', 0);
         }
-      },
-      /**
-         Sets e as this object's current error. Pass a falsy
-         (or no) value to clear it.
-      */
-      storeErr: function(e){
-        return this.$error = e;
-      },
-      /**
-         Pops this object's Error object and returns
-         it (a falsy value if no error is set).
-      */
-      popErr: function(e){
-        const rc = this.$error;
-        this.$error = undefined;
-        return rc;
+        return this.textDecoder.decode(body.subarray(0,pathBytes));
+      }else{
+        // Invalid digest
+        warn('Disassociating file with bad digest.');
+        this.setAssociatedPath(sah, '', 0);
+        return '';
       }
-    })/*SAHPool*/;
-    sqlite3.SAHPool = SAHPool/*only for testing*/;
+    },
     /**
-       Impls for the sqlite3_io_methods methods. Maintenance reminder:
-       members are in alphabetical order to simplify finding them.
+       Stores the given client-defined path and SQLITE_OPEN_xyz
+       flags into the given SAH.
     */
-    const ioSyncWrappers = {
-      xCheckReservedLock: function(pFile,pOut){
-        SAHPool.storeErr();
-        return 0;
-      },
-      xClose: function(pFile){
-        SAHPool.storeErr();
-        const file = SAHPool.mapIdToFile.get(pFile);
-        if(file) {
-          try{
-            log(`xClose ${file.path}`);
-            file.sah.flush();
-            SAHPool.mapIdToFile.delete(pFIle);
-            if(file.flags & capi.SQLITE_OPEN_DELETEONCLOSE){
-              SAHPool.deletePath(file.path);
-            }
-          }catch(e){
-            SAHPool.storeErr(e);
-            error("xClose() failed:",e.message);
-            return capi.SQLITE_IOERR;
-          }
-        }
-        return 0;
-      },
-      xDeviceCharacteristics: function(pFile){
-        return capi.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN;
-      },
-      xFileControl: function(pFile, opId, pArg){
-        return capi.SQLITE_NOTFOUND;
-      },
-      xFileSize: function(pFile,pSz64){
-        const file = SAHPool.mapIdToFile(pFile);
-        const size = file.sah.getSize() - HEADER_OFFSET_DATA;
-        //log(`xFileSize ${file.path} ${size}`);
-        wasm.poke64(pSz64, BigInt(size));
-        return 0;
-      },
-      xLock: function(pFile,lockType){
-        SAHPool.storeErr();
-        let rc = capi.SQLITE_IOERR;
-        return rc;
-      },
-      xRead: function(pFile,pDest,n,offset64){
-        SAHPool.storeErr();
-        const file = SAHPool.mapIdToFile.get(pFile);
-        log(`xRead ${file.path} ${n} ${offset64}`);
-        try {
-          const nRead = file.sah.read(
-            pDest, {at: HEADER_OFFSET_DATA + offset64}
-          );
-          if(nRead < n){
-            wasm.heap8u().fill(0, pDest + nRead, pDest + n);
-            return capi.SQLITE_IOERR_SHORT_READ;
-          }
-          return 0;
-        }catch(e){
-          SAHPool.storeErr(e);
-          error("xRead() failed:",e.message);
-          return capi.SQLITE_IOERR;
+    setAssociatedPath: function(sah, path, flags){
+      const body = this.apBody;
+      const enc = this.textEncoder.encodeInto(path, body);
+      if(HEADER_MAX_PATH_SIZE <= enc.written){
+        toss("Path too long:",path);
+      }
+
+      const dv = new DataView(body.buffer, body.byteOffset);
+      dv.setUint32(HEADER_OFFSET_FLAGS, flags);
+
+      const digest = this.computeDigest(body);
+      sah.write(body, {at: 0});
+      sah.write(digest, {at: HEADER_OFFSET_DIGEST});
+      sah.flush();
+
+      if(path){
+        this.mapPathToSAH.set(path, sah);
+        this.availableSAH.delete(sah);
+      }else{
+        // This is not a persistent file, so eliminate the contents.
+        sah.truncate(HEADER_OFFSET_DATA);
+        this.mapPathToSAH.delete(path);
+        this.availableSAH.add(sah);
+      }
+    },
+    /**
+       Computes a digest for the given byte array and
+       returns it as a two-element Uint32Array.
+    */
+    computeDigest: function(byteArray){
+      if(!byteArray[0]){
+        // Deleted file
+        return new Uint32Array([0xfecc5f80, 0xaccec037]);
+      }
+      let h1 = 0xdeadbeef;
+      let h2 = 0x41c6ce57;
+      for(const v of byteArray){
+        h1 = 31 * h1 + (v * 307);
+        h2 = 31 * h2 + (v * 307);
+      }
+      return new Uint32Array([h1>>>0, h2>>>0]);
+    },
+    reset: async function(){
+      await this.isReady;
+      let h = await navigator.storage.getDirectory();
+      for(const d of this.vfsDir.split('/')){
+        if(d){
+          h = await h.getDirectoryHandle(d,{create:true});
         }
-      },
-      xSectorSize: function(pFile){
-        return SECTOR_SIZE;
-      },
-      xSync: function(pFile,flags){
-        SAHPool.storeErr();
-        const file = SAHPool.mapIdToFile.get(pFile);
-        //log(`xSync ${file.path} ${flags}`);
+      }
+      this.dirHandle = h;
+      this.releaseAccessHandles();
+      await this.acquireAccessHandles();
+    },
+    /**
+       Returns the pathname part of the given argument,
+       which may be any of:
+
+       - a URL object
+       - A JS string representing a file name
+       - Wasm C-string representing a file name
+    */
+    getPath: function(arg) {
+      if(wasm.isPtr(arg)) arg = wasm.cstrToJs(arg);
+      return ((arg instanceof URL)
+              ? arg
+              : new URL(arg, 'file://localhost/')).pathname;
+    },
+    /**
+       Removes the association of the given client-specified file
+       name (JS string) from the pool.
+    */
+    deletePath: function(path) {
+      const sah = this.mapPathToSAH.get(path);
+      if(sah) {
+        // Un-associate the SQLite path from the OPFS file.
+        this.setAssociatedPath(sah, '', 0);
+      }
+    },
+    /**
+       Sets e as this object's current error. Pass a falsy
+       (or no) value to clear it.
+    */
+    storeErr: function(e){
+      if(e) error(e);
+      return this.$error = e;
+    },
+    /**
+       Pops this object's Error object and returns
+       it (a falsy value if no error is set).
+    */
+    popErr: function(){
+      const rc = this.$error;
+      this.$error = undefined;
+      return rc;
+    }
+  })/*SAHPool*/;
+  sqlite3.SAHPool = SAHPool/*only for testing*/;
+  /**
+     Impls for the sqlite3_io_methods methods. Maintenance reminder:
+     members are in alphabetical order to simplify finding them.
+  */
+  const ioSyncWrappers = {
+    xCheckReservedLock: function(pFile,pOut){
+      log('xCheckReservedLock');
+      SAHPool.storeErr();
+      wasm.poke32(pOut, 1);
+      return 0;
+    },
+    xClose: function(pFile){
+      SAHPool.storeErr();
+      const file = SAHPool.mapIdToFile.get(pFile);
+      if(file) {
         try{
+          log(`xClose ${file}`);
+          if(file.sq3File) file.sq3File.dispose();
           file.sah.flush();
-          return 0;
+          SAHPool.mapIdToFile.delete(pFile);
+          if(file.flags & capi.SQLITE_OPEN_DELETEONCLOSE){
+            SAHPool.deletePath(file.path);
+          }
         }catch(e){
           SAHPool.storeErr(e);
-          error("xSync() failed:",e.message);
           return capi.SQLITE_IOERR;
         }
-      },
-      xTruncate: function(pFile,sz64){
-        SAHPool.storeErr();
-        const file = SAHPool.mapIdToFile.get(pFile);
-        //log(`xTruncate ${file.path} ${iSize}`);
-        try{
-          file.sah.truncate(HEADER_OFFSET_DATA + Number(sz64));
-          return 0;
-        }catch(e){
-          SAHPool.storeErr(e);
-          error("xTruncate() failed:",e.message);
-          return capi.SQLITE_IOERR;
+      }
+      return 0;
+    },
+    xDeviceCharacteristics: function(pFile){
+      return capi.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN;
+    },
+    xFileControl: function(pFile, opId, pArg){
+      return capi.SQLITE_NOTFOUND;
+    },
+    xFileSize: function(pFile,pSz64){
+      log(`xFileSize`);
+      const file = SAHPool.mapIdToFile.get(pFile);
+      const size = file.sah.getSize() - HEADER_OFFSET_DATA;
+      //log(`xFileSize ${file.path} ${size}`);
+      wasm.poke64(pSz64, BigInt(size));
+      return 0;
+    },
+    xLock: function(pFile,lockType){
+      log(`xLock ${lockType}`);
+      SAHPool.storeErr();
+      const file = SAHPool.mapIdToFile.get(pFile);
+      file.lockType = lockType;
+      return 0;
+    },
+    xRead: function(pFile,pDest,n,offset64){
+      log(`xRead ${n}@${offset64}`);
+      SAHPool.storeErr();
+      const file = SAHPool.mapIdToFile.get(pFile);
+      log(`xRead ${file.path} ${n} ${offset64}`);
+      try {
+        const nRead = file.sah.read(
+          wasm.heap8u().subarray(pDest, pDest+n),
+          {at: HEADER_OFFSET_DATA + Number(offset64)}
+        );
+        if(nRead < n){
+          wasm.heap8u().fill(0, pDest + nRead, pDest + n);
+          return capi.SQLITE_IOERR_SHORT_READ;
         }
-      },
-      /**xUnlock: function(pFile,lockType){
+        return 0;
+      }catch(e){
+        SAHPool.storeErr(e);
+        error("xRead() failed:",e.message);
         return capi.SQLITE_IOERR;
-      },*/
-      xWrite: function(pFile,pSrc,n,offset64){
-        SAHPool.storeErr();
-        const file = SAHPool.mapIdToFile(pFile);
-        //log(`xWrite ${file.path} ${n} ${offset64}`);
-        try{
-          const nBytes = file.sah.write(
-            pSrc, { at: HEADER_OFFSET_DATA + Number(offset64) }
-          );
-          return nBytes === n ? 0 : capi.SQLITE_IOERR;
-        }catch(e){
-          SAHPool.storeErr(e);
-          error("xWrite() failed:",e.message);
-          return capi.SQLITE_IOERR;
-        }
       }
-    }/*ioSyncWrappers*/;
-
-    /**
-       Impls for the sqlite3_vfs methods. Maintenance reminder: members
-       are in alphabetical order to simplify finding them.
-    */
-    const vfsSyncWrappers = {
-      xAccess: function(pVfs,zName,flags,pOut){
-        SAHPool.storeErr();
-        try{
-          const name = this.getPath(zName);
-          wasm.poke32(pOut, SAHPool.mapPathToSAH.has(name) ? 1 : 0);
-        }catch(e){
-          /*ignored*/;
-        }
+    },
+    /*xSectorSize: function(pFile){
+      return SECTOR_SIZE;
+    },*/
+    xSync: function(pFile,flags){
+      log(`xSync ${flags}`);
+      SAHPool.storeErr();
+      const file = SAHPool.mapIdToFile.get(pFile);
+      //log(`xSync ${file.path} ${flags}`);
+      try{
+        file.sah.flush();
         return 0;
-      },
-      xCurrentTime: function(pVfs,pOut){
-        wasm.poke(pOut, 2440587.5 + (new Date().getTime()/86400000),
-                  'double');
+      }catch(e){
+        SAHPool.storeErr(e);
+        error("xSync() failed:",e.message);
+        return capi.SQLITE_IOERR;
+      }
+    },
+    xTruncate: function(pFile,sz64){
+      log(`xTruncate ${sz64}`);
+      SAHPool.storeErr();
+      const file = SAHPool.mapIdToFile.get(pFile);
+      //log(`xTruncate ${file.path} ${iSize}`);
+      try{
+        file.sah.truncate(HEADER_OFFSET_DATA + Number(sz64));
         return 0;
-      },
-      xCurrentTimeInt64: function(pVfs,pOut){
-        wasm.poke(pOut, (2440587.5 * 86400000) + new Date().getTime(),
-                  'i64');
+      }catch(e){
+        SAHPool.storeErr(e);
+        error("xTruncate() failed:",e.message);
+        return capi.SQLITE_IOERR;
+      }
+    },
+    xUnlock: function(pFile,lockType){
+      log('xUnlock');
+      const file = SAHPool.mapIdToFile.get(pFile);
+      file.lockType = lockType;
+      return 0;
+    },
+    xWrite: function(pFile,pSrc,n,offset64){
+      SAHPool.storeErr();
+      const file = SAHPool.mapIdToFile.get(pFile);
+      log(`xWrite ${file.path} ${n} ${offset64}`);
+      try{
+        const nBytes = file.sah.write(
+          wasm.heap8u().subarray(pSrc, pSrc+n),
+          { at: HEADER_OFFSET_DATA + Number(offset64) }
+        );
+        return nBytes === n ? 0 : capi.SQLITE_IOERR;
+      }catch(e){
+        SAHPool.storeErr(e);
+        error("xWrite() failed:",e.message);
+        return capi.SQLITE_IOERR;
+      }
+    }
+  }/*ioSyncWrappers*/;
+
+  /**
+     Impls for the sqlite3_vfs methods. Maintenance reminder: members
+     are in alphabetical order to simplify finding them.
+  */
+  const vfsSyncWrappers = {
+    xAccess: function(pVfs,zName,flags,pOut){
+      log(`xAccess ${wasm.cstrToJs(zName)}`);
+      SAHPool.storeErr();
+      try{
+        const name = this.getPath(zName);
+        wasm.poke32(pOut, SAHPool.mapPathToSAH.has(name) ? 1 : 0);
+      }catch(e){
+        /*ignored*/;
+      }
+      return 0;
+    },
+    xCurrentTime: function(pVfs,pOut){
+      wasm.poke(pOut, 2440587.5 + (new Date().getTime()/86400000),
+                'double');
+      return 0;
+    },
+    xCurrentTimeInt64: function(pVfs,pOut){
+      wasm.poke(pOut, (2440587.5 * 86400000) + new Date().getTime(),
+                'i64');
+      return 0;
+    },
+    xDelete: function(pVfs, zName, doSyncDir){
+      log(`xDelete ${wasm.cstrToJs(zName)}`);
+      SAHPool.storeErr();
+      try{
+        SAHPool.deletePath(SAHPool.getPath(zName));
         return 0;
-      },
-      xDelete: function(pVfs, zName, doSyncDir){
-        SAHPool.storeErr();
+      }catch(e){
+        SAHPool.storeErr(e);
+        error("Error xDelete()ing file:",e.message);
+        return capi.SQLITE_IOERR_DELETE;
+      }
+    },
+    xFullPathname: function(pVfs,zName,nOut,pOut){
+      log(`xFullPathname ${wasm.cstrToJs(zName)}`);
+      const i = wasm.cstrncpy(pOut, zName, nOut);
+      return i<nOut ? 0 : capi.SQLITE_CANTOPEN;
+    },
+    xGetLastError: function(pVfs,nOut,pOut){
+      log(`xGetLastError ${nOut}`);
+      const e = SAHPool.popErr();
+      if(e){
+        const scope = wasm.scopedAllocPush();
         try{
-          SAHPool.deletePath(SAHPool.getPath(zName));
-          return 0;
+          const [cMsg, n] = wasm.scopedAllocCString(e.message, true);
+          wasm.cstrncpy(pOut, cMsg, nOut);
+          if(n > nOut) wasm.poke8(pOut + nOut - 1, 0);
         }catch(e){
-          SAHPool.storeErr(e);
-          error("Error xDelete()ing file:",e.message);
-          return capi.SQLITE_IOERR_DELETE;
+          return capi.SQLITE_NOMEM;
+        }finally{
+          wasm.scopedAllocPop(scope);
         }
-      },
-      xFullPathname: function(pVfs,zName,nOut,pOut){
-        const i = wasm.cstrncpy(pOut, zName, nOut);
-        return i<nOut ? 0 : capi.SQLITE_CANTOPEN;
-      },
-      xGetLastError: function(pVfs,nOut,pOut){
-        const e = SAHPool.popErr();
-        if(e){
-          const scope = wasm.scopedAllocPush();
-          try{
-            const [cMsg, n] = wasm.scopedAllocCString(e.message, true);
-            wasm.cstrncpy(pOut, cMsg, nOut);
-            if(n > nOut) wasm.poke8(pOut + nOut - 1, 0);
-          }catch(e){
-            return capi.SQLITE_NOMEM;
-          }finally{
-            wasm.scopedAllocPop(scope);
+      }
+      return 0;
+    },
+    //xSleep is optionally defined below
+    xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
+      log(`xOpen ${wasm.cstrToJs(zName)} ${flags}`);
+      try{
+        // First try to open a path that already exists in the file system.
+        const path = (zName && wasm.peek8(zName))
+              ? SAHPool.getPath(zName)
+              : getRandomName();
+        let sah = SAHPool.mapPathToSAH.get(path);
+        if(!sah && (flags & capi.SQLITE_OPEN_CREATE)) {
+          // File not found so try to create it.
+          if(SAHPool.getFileCount() < SAHPool.getCapacity()) {
+            // Choose an unassociated OPFS file from the pool.
+            [sah] = SAHPool.availableSAH.keys();
+            SAHPool.setAssociatedPath(sah, path, flags);
+          }else{
+            // File pool is full.
+            toss('SAH pool is full. Cannot create file',path);
           }
         }
-        return 0;
-      },
-      //xSleep is optionally defined below
-      xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
-        try{
-          // First try to open a path that already exists in the file system.
-          const path = (zName && wasm.peek8(zName))
-                ? SAHPool.getPath(name)
-                : getRandomName();
-          let ah = SAHPool.mapPathToSAH.get(path);
-          if(!ah && (flags & capi.SQLITE_OPEN_CREATE)) {
-            // File not found so try to create it.
-            if(SAHPool.getFileCount() < SAHPool.getCapacity()) {
-              // Choose an unassociated OPFS file from the pool.
-              ah = SAHPool.availableSAH.keys()[0];
-              SAHPool.setAssociatedPath(ah, path, flags);
-            }else{
-              // File pool is full.
-              toss('SAH pool is full. Cannot create file',path);
-            }
-          }
-          if(!ah){
-            toss('file not found:',path);
-          }
-          // Subsequent methods are only passed the file pointer, so
-          // map the relevant info we need to that pointer.
-          const file = { path, flags, ah };
-          SAHPool.mapIdToFile.set(pFile, file);
-          wasm.poke32(pOutFlags, flags);
-          return 0;
-        }catch(e){
-          SAHPool.storeErr(e);
-          return capi.SQLITE_CANTOPEN;
+        if(!sah){
+          toss('file not found:',path);
         }
-      }/*xOpen()*/
-    }/*vfsSyncWrappers*/;
-
-    if(dVfs){
-      /* Inherit certain VFS members from the default VFS,
-         if available. */
-      opfsVfs.$xRandomness = dVfs.$xRandomness;
-      opfsVfs.$xSleep = dVfs.$xSleep;
-    }
-    if(!opfsVfs.$xRandomness){
-      /* If the default VFS has no xRandomness(), add a basic JS impl... */
-      vfsSyncWrappers.xRandomness = function(pVfs, nOut, pOut){
-        const heap = wasm.heap8u();
-        let i = 0;
-        for(; i < nOut; ++i) heap[pOut + i] = (Math.random()*255000) & 0xFF;
-        return i;
-      };
-    }
-    if(!opfsVfs.$xSleep){
-      vfsSyncWrappers.xSleep = function(pVfs,ms){
+        // Subsequent methods are only passed the file pointer, so
+        // map the relevant info we need to that pointer.
+        const file = {path, flags, sah};
+        SAHPool.mapIdToFile.set(pFile, file);
+        wasm.poke32(pOutFlags, flags);
+        file.sq3File = new capi.sqlite3_file(pFile);
+        file.sq3File.$pMethods = opfsIoMethods.pointer;
+        file.lockType = capi.SQLITE_LOCK_NONE;
         return 0;
-      };
-    }
-
-    /**
-       Ensure that the client has a "fully-sync" SAH impl,
-       else reject the promise. Returns true on success,
-       else false.
-    */
-    if(!(async ()=>{
-      try {
-        const dh = await navigator.storage.getDirectory();
-        const fn = '.opfs-sahpool-sync-check-'+getRandomName();
-        const fh = await dh.getFileHandle(fn, { create: true });
-        const ah = await fh.createSyncAccessHandle();
-        const close = ah.close();
-        await close;
-        await dh.removeEntry(fn);
-        if(close?.then){
-          toss("The local OPFS API is too old for opfs-sahpool:",
-               "it has an async FileSystemSyncAccessHandle.close() method.");
-        }
-        return true;
       }catch(e){
-        promiseReject(e);
-        return false;
+        SAHPool.storeErr(e);
+        return capi.SQLITE_CANTOPEN;
       }
-    })()){
-      return;
-    }
+    }/*xOpen()*/
+  }/*vfsSyncWrappers*/;
+
+  if(dVfs){
+    /* Inherit certain VFS members from the default VFS,
+       if available. */
+    opfsVfs.$xRandomness = dVfs.$xRandomness;
+    opfsVfs.$xSleep = dVfs.$xSleep;
+  }
+  if(!opfsVfs.$xRandomness){
+    /* If the default VFS has no xRandomness(), add a basic JS impl... */
+    vfsSyncWrappers.xRandomness = function(pVfs, nOut, pOut){
+      const heap = wasm.heap8u();
+      let i = 0;
+      for(; i < nOut; ++i) heap[pOut + i] = (Math.random()*255000) & 0xFF;
+      return i;
+    };
+  }
+  if(!opfsVfs.$xSleep){
+    vfsSyncWrappers.xSleep = (pVfs,ms)=>0;
+  }
 
-    SAHPool.isReady = SAHPool.reset().then(async ()=>{
-      if(SAHPool.$error){
-        throw SAHPool.$error;
-      }
-      if(0===SAHPool.getCapacity()){
-        await SAHPool.addCapacity(DEFAULT_CAPACITY);
+  /**
+     Ensure that the client has a "fully-sync" SAH impl,
+     else reject the promise. Returns true on success,
+     else a value intended to be returned via the containing
+     function's Promise result.
+  */
+  const apiVersionCheck = await (async ()=>{
+    try {
+      const dh = await navigator.storage.getDirectory();
+      const fn = '.opfs-sahpool-sync-check-'+getRandomName();
+      const fh = await dh.getFileHandle(fn, { create: true });
+      const ah = await fh.createSyncAccessHandle();
+      const close = ah.close();
+      await close;
+      await dh.removeEntry(fn);
+      if(close?.then){
+        toss("The local OPFS API is too old for opfs-sahpool:",
+             "it has an async FileSystemSyncAccessHandle.close() method.");
       }
-      //log("vfs list:",capi.sqlite3_js_vfs_list());
-      sqlite3.vfs.installVfs({
-        io: {struct: opfsIoMethods, methods: ioSyncWrappers},
-        vfs: {struct: opfsVfs, methods: vfsSyncWrappers}
-      });
-      //log("vfs list:",capi.sqlite3_js_vfs_list());
-      if(sqlite3.oo1){
-        const OpfsSAHPoolDb = function(...args){
-          const opt = sqlite3.oo1.DB.dbCtorHelper.normalizeArgs(...args);
-          opt.vfs = opfsVfs.$zName;
-          sqlite3.oo1.DB.dbCtorHelper.call(this, opt);
-        };
-        OpfsSAHPoolDb.prototype = Object.create(sqlite3.oo1.DB.prototype);
-        OpfsSAHPoolDb.addPoolCapacity = async (n)=>SAHPool.addCapacity(n);
-        OpfsSAHPoolDb.reducePoolCapacity = async (n)=>SAHPool.reduceCapacity(n);
-        OpfsSAHPoolDb.getPoolCapacity = ()=>SAHPool.getCapacity();
-        OpfsSAHPoolDb.getPoolUsage = ()=>SAHPool.getFileCount();
-        sqlite3.oo1.OpfsSAHPoolDb = OpfsSAHPoolDb;
-        sqlite3.oo1.DB.dbCtorHelper.setVfsPostOpenSql(
-          opfsVfs.pointer,
-          function(oo1Db, sqlite3){
-            sqlite3.capi.sqlite3_exec(oo1Db, [
-              /* As of July 2023, the PERSIST journal mode on OPFS is
-                 somewhat slower than DELETE or TRUNCATE (it was faster
-                 before Chrome version 108 or 109). TRUNCATE and DELETE
-                 have very similar performance on OPFS.
-
-                 Roy Hashimoto notes that TRUNCATE and PERSIST modes may
-                 decrease OPFS concurrency because multiple connections
-                 can open the journal file in those modes:
-
-                 https://github.com/rhashimoto/wa-sqlite/issues/68
-
-                 Given that, and the fact that testing has not revealed
-                 any appreciable difference between performance of
-                 TRUNCATE and DELETE modes on OPFS, we currently (as of
-                 2023-07-13) default to DELETE mode.
-              */
-              "pragma journal_mode=DELETE;",
-              /*
-                OPFS benefits hugely from cache on moderate/large
-                speedtest1 --size 50 and --size 100 workloads. We
-                currently rely on setting a non-default cache size when
-                building sqlite3.wasm. If that policy changes, the cache
-                can be set here.
-              */
-              "pragma cache_size=-16384;"
-            ], 0, 0, 0);
-          }
-        );
-      }/*extend sqlite3.oo1*/
-      log("VFS initialized.");
-      promiseResolve(sqlite3);
-    }).catch(promiseReject);
-  })/*return Promise*/;
-}/*installOpfsVfs()*/;
-
-globalThis.sqlite3ApiBootstrap.initializersAsync.push(async (sqlite3)=>{
-  return installOpfsVfs(sqlite3).catch((e)=>{
-    sqlite3.config.warn("Ignoring inability to install opfs-sahpool sqlite3_vfs:",
-                        e.message, e);
-  });
-}/*sqlite3ApiBootstrap.initializersAsync*/);
+      return true;
+    }catch(e){
+      return e;
+    }
+  })();
+  if(true!==apiVersionCheck){
+    return promiseReject(apiVersionCheck);
+  }
+  return SAHPool.isReady = SAHPool.reset().then(async ()=>{
+    if(SAHPool.$error){
+      throw SAHPool.$error;
+    }
+    if(0===SAHPool.getCapacity()){
+      await SAHPool.addCapacity(DEFAULT_CAPACITY);
+    }
+    //log("vfs list:",capi.sqlite3_js_vfs_list());
+    sqlite3.vfs.installVfs({
+      io: {struct: opfsIoMethods, methods: ioSyncWrappers},
+      vfs: {struct: opfsVfs, methods: vfsSyncWrappers},
+      applyArgcCheck: true
+    });
+    log("opfsVfs",opfsVfs,"opfsIoMethods",opfsIoMethods);
+    log("vfs list:",capi.sqlite3_js_vfs_list());
+    if(sqlite3.oo1){
+      const OpfsSAHPoolDb = function(...args){
+        const opt = sqlite3.oo1.DB.dbCtorHelper.normalizeArgs(...args);
+        opt.vfs = opfsVfs.$zName;
+        sqlite3.oo1.DB.dbCtorHelper.call(this, opt);
+      };
+      OpfsSAHPoolDb.prototype = Object.create(sqlite3.oo1.DB.prototype);
+      OpfsSAHPoolDb.addPoolCapacity = async (n)=>SAHPool.addCapacity(n);
+      OpfsSAHPoolDb.reducePoolCapacity = async (n)=>SAHPool.reduceCapacity(n);
+      OpfsSAHPoolDb.getPoolCapacity = ()=>SAHPool.getCapacity();
+      OpfsSAHPoolDb.getPoolUsage = ()=>SAHPool.getFileCount();
+      sqlite3.oo1.OpfsSAHPoolDb = OpfsSAHPoolDb;
+      sqlite3.oo1.DB.dbCtorHelper.setVfsPostOpenSql(
+        opfsVfs.pointer,
+        function(oo1Db, sqlite3){
+          sqlite3.capi.sqlite3_exec(oo1Db, [
+            /* As of July 2023, the PERSIST journal mode on OPFS is
+               somewhat slower than DELETE or TRUNCATE (it was faster
+               before Chrome version 108 or 109). TRUNCATE and DELETE
+               have very similar performance on OPFS.
+
+               Roy Hashimoto notes that TRUNCATE and PERSIST modes may
+               decrease OPFS concurrency because multiple connections
+               can open the journal file in those modes:
+
+               https://github.com/rhashimoto/wa-sqlite/issues/68
+
+               Given that, and the fact that testing has not revealed
+               any appreciable difference between performance of
+               TRUNCATE and DELETE modes on OPFS, we currently (as of
+               2023-07-13) default to DELETE mode.
+            */
+            "pragma journal_mode=DELETE;",
+            /*
+              OPFS benefits hugely from cache on moderate/large
+              speedtest1 --size 50 and --size 100 workloads. We
+              currently rely on setting a non-default cache size when
+              building sqlite3.wasm. If that policy changes, the cache
+              can be set here.
+            */
+            "pragma cache_size=-16384;"
+          ], 0, 0, 0);
+        }
+      );
+    }/*extend sqlite3.oo1*/
+    log("VFS initialized.");
+    return promiseResolve();
+  }).catch(promiseReject);
+}/*installOpfsSAHPoolVfs()*/;
 }/*sqlite3ApiBootstrap.initializers*/);
index 40c6090bf91888aa673721533020f9cd15b89d84..c7a752441f133e50e3c374ddde5d3dacbea45bdf 100644 (file)
@@ -236,6 +236,7 @@ const installOpfsVfs = function callee(options){
           ? new sqlite3_vfs(pDVfs)
           : null /* dVfs will be null when sqlite3 is built with
                     SQLITE_OS_OTHER. */;
+    opfsIoMethods.$iVersion = 1;
     opfsVfs.$iVersion = 2/*yes, two*/;
     opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
     opfsVfs.$mxPathname = 1024/*sure, why not?*/;
index c61cab9190b803268fd2abbc3766a3771232b6e6..89ab2149eff4e44c081f6af349336de8e1b56c15 100644 (file)
   self.onmessage = function(msg){
     msg = msg.data;
     switch(msg.type){
-        case 'run': runSpeedtest(msg.data || []); break;
+        case 'run':
+          try {
+            runSpeedtest(msg.data || []);
+          }catch(e){
+            mPost('error',e);
+          }
+          break;
         default:
           logErr("Unhandled worker message type:",msg.type);
           break;
     }
   };
 
+  const sahpSanityChecks = function(sqlite3){
+    log("Attempting OpfsSAHPoolDb sanity checks...");
+    const db = new sqlite3.oo1.OpfsSAHPoolDb('opfs-sahpoool.db');
+    const fn = db.filename;
+    db.exec([
+      'create table t(a);',
+      'insert into t(a) values(1),(2),(3);'
+    ]);
+    db.close();
+    sqlite3.wasm.sqlite3_wasm_vfs_unlink(sqlite3_vfs_find("opfs-sahpool"), fn);
+    log("SAH sanity checks done.");
+  };
+
   const EmscriptenModule = {
     print: log,
     printErr: logErr,
     setStatus: (text)=>mPost('load-status',text)
   };
-  self.sqlite3InitModule(EmscriptenModule).then((sqlite3)=>{
-    const S = sqlite3;
+  log("Initializing speedtest1 module...");
+  self.sqlite3InitModule(EmscriptenModule).then(async (sqlite3)=>{
+    const S = globalThis.S = sqlite3;
+    log("Loaded speedtest1 module. Setting up...");
+    if(S.installOpfsSAHPoolVfs){
+      await S.installOpfsSAHPoolVfs().then(()=>{
+        log("Loaded SAHPool.");
+      }).catch(e=>{
+        logErr("Error setting up SAHPool:",e.message);
+      });
+    }
     App.vfsUnlink = function(pDb, fname){
       const pVfs = S.wasm.sqlite3_wasm_db_vfs(pDb, 0);
       if(pVfs) S.wasm.sqlite3_wasm_vfs_unlink(pVfs, fname||0);
     //else log("Using transient storage.");
     mPost('ready',true);
     log("Registered VFSes:", ...S.capi.sqlite3_js_vfs_list());
+    if(0 && S.installOpfsSAHPoolVfs){
+      sahpSanityChecks(S);
+    }
+  }).catch(e=>{
+    logErr(e);
   });
 })();
index 9a3fc29be16973f88501f7f565703160f0bd10f3..c5fd720553c877211076319035e2f0417725736e 100644 (file)
--- a/manifest
+++ b/manifest
@@ -1,5 +1,5 @@
-C speedtest1\sJS:\sonly\sadd\s--memdb\sflag\sby\sdefault\sif\sno\s--vfs\sis\sprovided.
-D 2023-07-15T16:30:46.383
+C Correct\sopfs-sahpool\sVFS\safter\sthe\spebkac\sinvolving\sthe\sprevious\sspeedtest1\sruns.\sMake\sthat\sVFS\sexplicitly\sopt-in\sto\savoid\scertain\sunfortunate\slocking\ssituations.
+D 2023-07-15T19:08:58.138
 F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
 F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
 F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724
@@ -502,8 +502,8 @@ F ext/wasm/api/sqlite3-api-worker1.js 9f32af64df1a031071912eea7a201557fe39b17386
 F ext/wasm/api/sqlite3-license-version-header.js 0c807a421f0187e778dc1078f10d2994b915123c1223fe752b60afdcd1263f89
 F ext/wasm/api/sqlite3-opfs-async-proxy.js 961bbc3ccc1fa4e91d6519a96e8811ad7ae60173bd969fee7775dacb6eee1da2
 F ext/wasm/api/sqlite3-v-helper.js e5c202a9ecde9ef818536d3f5faf26c03a1a9f5192b1ddea8bdabf30d75ef487
-F ext/wasm/api/sqlite3-vfs-opfs-sahpool.js ad6ec4e87f47152a871a23bf90b64709094bf04e8ee76671fc6cedd1ce45086d
-F ext/wasm/api/sqlite3-vfs-opfs.c-pp.js 891f3a18d9ac9b0422b32fd975319dfcd0af5a8ca392f0cce850524e51b49c87
+F ext/wasm/api/sqlite3-vfs-opfs-sahpool.js 83388ead4bfc489bee008298ab51948ccb75227795ce8d1634f2eec8e02548f1
+F ext/wasm/api/sqlite3-vfs-opfs.c-pp.js a5c3195203e6085d7aa89fae4b84cf3f3eec4ff4f928c6d0e5d3ef8b14cbc1c0
 F ext/wasm/api/sqlite3-wasm.c 12a096d8e58a0af0589142bae5a3c27a0c7e19846755a1a37d2c206352fbedda
 F ext/wasm/api/sqlite3-worker1-promiser.c-pp.js bc06df0d599e625bde6a10a394e326dc68da9ff07fa5404354580f81566e591f
 F ext/wasm/api/sqlite3-worker1.c-pp.js da509469755035e919c015deea41b4514b5e84c12a1332e6cc8d42cb2cc1fb75
@@ -540,7 +540,7 @@ F ext/wasm/scratchpad-wasmfs.mjs 66034b9256b218de59248aad796760a1584c1dd84223150
 F ext/wasm/speedtest1-wasmfs.html 0e9d335a9b5b5fafe6e1bc8dc0f0ca7e22e6eb916682a2d7c36218bb7d67379d
 F ext/wasm/speedtest1-wasmfs.mjs ac5cadbf4ffe69e9eaac8b45e8523f030521e02bb67d654c6eb5236d9c456cbe
 F ext/wasm/speedtest1-worker.html bbcf1e7fd79541040c1a7ca2ebf1cb7793ddaf9900d6bde1784148f11b807c34
-F ext/wasm/speedtest1-worker.js 13b57c4a41729678a1194014afec2bd5b94435dcfc8d1039dfa9a533ac819ee1
+F ext/wasm/speedtest1-worker.js 4de92e4e6718b8bd1cdecb75af62739d1115fa66656a700b0b51822c848948f5
 F ext/wasm/speedtest1.html ff048b4a623aa192e83e143e48f1ce2a899846dd42c023fdedc8772b6e3f07da
 F ext/wasm/split-speedtest1-script.sh a3e271938d4d14ee49105eb05567c6a69ba4c1f1293583ad5af0cd3a3779e205 x
 F ext/wasm/sql/000-mandelbrot.sql 775337a4b80938ac8146aedf88808282f04d02d983d82675bd63d9c2d97a15f0
@@ -2044,8 +2044,8 @@ F vsixtest/vsixtest.tcl 6a9a6ab600c25a91a7acc6293828957a386a8a93
 F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc
 F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e
 F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0
-P fff68e9f25a57045e9d636b02ffa073cf1b984b2587d4fce10f6e35c9988469c
-R 30945b34df9134e0f98668ba08cfc13f
+P 676ffe6280c1ce787b04d0cdb4a0664229c6125c601af4b18d1bfa125aac3675
+R bca4913f68935c8abed9e461aac753fd
 U stephan
-Z 32f9fd4e9e8f1a70cce170b39e2a4458
+Z 2546a1c8fd9c0ca0c4fd392086704b47
 # Remove this line to create a well-formed Fossil manifest.
index dc9bec08cdde1d55d62f90513f34e68a7b07051c..9ae594ffea99638bb6bb0ab937d4e60832151cfa 100644 (file)
@@ -1 +1 @@
-676ffe6280c1ce787b04d0cdb4a0664229c6125c601af4b18d1bfa125aac3675
\ No newline at end of file
+41bf1fe31f2f3d0daa2bac25dc57262a4b90f22fed6fa97e4e92467c32ae02dc
\ No newline at end of file