but which has a 99% identical structure for each.
*/
//#endif
+//#// vfs.metrics.enable is a refactoring crutch.
+//#define vfs.metrics.enable=0
const initS11n = function(){
/**
This proxy de/serializes cross-thread function arguments and
is cleared after deserialization.
*/
state.s11n.deserialize = function(clear=false){
-//#if defined opfs-has-metrics
+//#if vfs.metrics.enable
++metrics.s11n.deserialize.count;
//#endif
const t = performance.now();
}
if(clear) viewU8[0] = 0;
//log("deserialize:",argc, rc);
-//#if defined opfs-has-metrics
+//#if vfs.metrics.enable
metrics.s11n.deserialize.time += performance.now() - t;
//#endif
return rc;
*/
state.s11n.serialize = function(...args){
const t = performance.now();
-//#if defined opfs-has-metrics
+//#if vfs.metrics.enable
++metrics.s11n.serialize.count;
//#endif
if(args.length){
}else{
viewU8[0] = 0;
}
-//#if defined opfs-has-metrics
+//#if vfs.metrics.enable
metrics.s11n.serialize.time += performance.now() - t;
//#endif
};
//#endif
return state.s11n;
+//#undef vfs.metrics.enable
}/*initS11n()*/;
globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
'use strict';
const toss = sqlite3.util.toss;
- const toss3 = sqlite3.util.toss3;
const capi = sqlite3.capi;
const util = sqlite3.util;
const wasm = sqlite3.wasm;
}
};
+ /**
+ Populates the main state object used by "opfs" and "opfs-wl", and
+ transfered from those to their async counterpart.
+
+ State which we send to the async-api Worker or share with it.
+ This object must initially contain only cloneable or sharable
+ objects. After the worker's "inited" message arrives, other types
+ of data may be added to it.
+
+ For purposes of Atomics.wait() and Atomics.notify(), we use a
+ SharedArrayBuffer with one slot reserved for each of the API
+ proxy's methods. The sync side of the API uses Atomics.wait()
+ on the corresponding slot and the async side uses
+ Atomics.notify() on that slot.
+
+ The approach of using a single SAB to serialize comms for all
+ instances might(?) lead to deadlock situations in multi-db
+ cases. We should probably have one SAB here with a single slot
+ for locking a per-file initialization step and then allocate a
+ separate SAB like the above one for each file. That will
+ require a bit of acrobatics but should be feasible. The most
+ problematic part is that xOpen() would have to use
+ postMessage() to communicate its SharedArrayBuffer, and mixing
+ that approach with Atomics.wait/notify() gets a bit messy.
+ */
+ opfsUtil.createVfsStateObject = function(opfsVfs){
+ if( !(opfsVfs instanceof capi.sqlite3_vfs) ){
+ toss("Expecting a sqlite3_vfs instance");
+ }
+ const vfsName = wasm.cstrToJs(opfsVfs.$zName);
+ const isWebLocker = 'opfs-wl'===vfsName;
+ const state = util.nu();
+ /**
+ asyncIdleWaitTime is how long (ms) to wait, in the async proxy,
+ for each Atomics.wait() when waiting on inbound VFS API calls.
+ We need to wake up periodically to give the thread a chance to
+ do other things. If this is too high (e.g. 500ms) then even two
+ workers/tabs can easily run into locking errors. Some multiple
+ of this value is also used for determining how long to wait on
+ lock contention to free up.
+ */
+ state.asyncIdleWaitTime = isWebLocker ? 100 : 150;
+
+ /**
+ Whether the async counterpart should log exceptions to
+ the serialization channel. That produces a great deal of
+ noise for seemingly innocuous things like xAccess() checks
+ for missing files, so this option may have one of 3 values:
+
+ 0 = no exception logging.
+
+ 1 = only log exceptions for "significant" ops like xOpen(),
+ xRead(), and xWrite().
+
+ 2 = log all exceptions.
+ */
+ state.asyncS11nExceptions = 1;
+ /* Size of file I/O buffer block. 64k = max sqlite3 page size, and
+ xRead/xWrite() will never deal in blocks larger than that. */
+ state.fileBufferSize = 1024 * 64;
+ state.sabS11nOffset = state.fileBufferSize;
+ /**
+ The size of the block in our SAB for serializing arguments and
+ result values. Needs to be large enough to hold serialized
+ values of any of the proxied APIs. Filenames are the largest
+ part but are limited to opfsVfs.$mxPathname bytes. We also
+ store exceptions there, so it needs to be long enough to hold
+ a reasonably long exception string.
+ */
+ state.sabS11nSize = opfsVfs.$mxPathname * 2;
+ /**
+ The SAB used for all data I/O between the synchronous and
+ async halves (file i/o and arg/result s11n).
+ */
+ state.sabIO = new SharedArrayBuffer(
+ state.fileBufferSize/* file i/o block */
+ + state.sabS11nSize/* argument/result serialization block */
+ );
+ state.opIds = Object.create(null);
+ {
+ /*
+ Maintenance reminder:
+
+ Some of these fields are only for use by the "opfs-wl" VFS,
+ but they must also be set up for the "ofps" VFS so that the
+ sizes and offsets calculated here are consistent in the async
+ proxy. Hypothetically they could differ and it would cope
+ but... why invite disaster over eliding a few superfluous (for
+ "opfs') properties?
+ */
+ /* Indexes for use in our SharedArrayBuffer... */
+ let i = 0;
+ /* SAB slot used to communicate which operation is desired
+ between both workers. This worker writes to it and the other
+ listens for changes. */
+ state.opIds.whichOp = i++;
+ /* Slot for storing return values. This worker listens to that
+ slot and the other worker writes to it. */
+ state.opIds.rc = i++;
+ /* Each function gets an ID which this worker writes to
+ the whichOp slot. The async-api worker uses Atomic.wait()
+ on the whichOp slot to figure out which operation to run
+ next. */
+ state.opIds.xAccess = i++;
+ state.opIds.xClose = i++;
+ state.opIds.xDelete = i++;
+ state.opIds.xDeleteNoWait = i++;
+ state.opIds.xFileSize = i++;
+ state.opIds.xLock = i++;
+ state.opIds.xOpen = i++;
+ state.opIds.xRead = i++;
+ state.opIds.xSleep = i++;
+ state.opIds.xSync = i++;
+ state.opIds.xTruncate = i++;
+ state.opIds.xUnlock = i++;
+ state.opIds.xWrite = i++;
+ state.opIds.mkdir = i++;
+ state.opIds.lockControl = i++ /* opfs-wl signals the intent to lock here */;
+ /** Internal signals which are used only during development and
+ testing via the dev console. */
+ state.opIds['opfs-async-metrics'] = i++;
+ state.opIds['opfs-async-shutdown'] = i++;
+ /* The retry slot is used by the async part for wait-and-retry
+ semantics. Though we could hypothetically use the xSleep slot
+ for that, doing so might lead to undesired side effects. */
+ state.opIds.retry = i++;
+
+ /* Slots for submitting the lock type and receiving its acknowledgement.
+ Only used by "opfs-wl". */
+ state.lock = util.nu({
+ type: i++ /* SQLITE_LOCK_xyz value */,
+ atomicsHandshake: i++ /* 0=pending, 1=release, 2=granted */
+ });
+ state.sabOP = new SharedArrayBuffer(
+ i * 4/* ==sizeof int32, noting that Atomics.wait() and friends
+ can only function on Int32Array views of an SAB. */);
+ }
+ /**
+ SQLITE_xxx constants to export to the async worker
+ counterpart...
+ */
+ state.sq3Codes = Object.create(null);
+ [
+ 'SQLITE_ACCESS_EXISTS',
+ 'SQLITE_ACCESS_READWRITE',
+ 'SQLITE_BUSY',
+ 'SQLITE_CANTOPEN',
+ 'SQLITE_ERROR',
+ 'SQLITE_IOERR',
+ 'SQLITE_IOERR_ACCESS',
+ 'SQLITE_IOERR_CLOSE',
+ 'SQLITE_IOERR_DELETE',
+ 'SQLITE_IOERR_FSYNC',
+ 'SQLITE_IOERR_LOCK',
+ 'SQLITE_IOERR_READ',
+ 'SQLITE_IOERR_SHORT_READ',
+ 'SQLITE_IOERR_TRUNCATE',
+ 'SQLITE_IOERR_UNLOCK',
+ 'SQLITE_IOERR_WRITE',
+ 'SQLITE_LOCK_EXCLUSIVE',
+ 'SQLITE_LOCK_NONE',
+ 'SQLITE_LOCK_PENDING',
+ 'SQLITE_LOCK_RESERVED',
+ 'SQLITE_LOCK_SHARED',
+ 'SQLITE_LOCKED',
+ 'SQLITE_MISUSE',
+ 'SQLITE_NOTFOUND',
+ 'SQLITE_OPEN_CREATE',
+ 'SQLITE_OPEN_DELETEONCLOSE',
+ 'SQLITE_OPEN_MAIN_DB',
+ 'SQLITE_OPEN_READONLY',
+ 'SQLITE_LOCK_NONE',
+ 'SQLITE_LOCK_SHARED',
+ 'SQLITE_LOCK_RESERVED',
+ 'SQLITE_LOCK_PENDING',
+ 'SQLITE_LOCK_EXCLUSIVE'
+ ].forEach((k)=>{
+ if(undefined === (state.sq3Codes[k] = capi[k])){
+ toss("Maintenance required: not found:",k);
+ }
+ });
+
+ state.opfsFlags = Object.assign(Object.create(null),{
+ /**
+ Flag for use with xOpen(). URI flag "opfs-unlock-asap=1"
+ enables this. See defaultUnlockAsap, below.
+ */
+ OPFS_UNLOCK_ASAP: 0x01,
+ /**
+ Flag for use with xOpen(). URI flag "delete-before-open=1"
+ tells the VFS to delete the db file before attempting to open
+ it. This can be used, e.g., to replace a db which has been
+ corrupted (without forcing us to expose a delete/unlink()
+ function in the public API).
+
+ Failure to unlink the file is ignored but may lead to
+ downstream errors. An unlink can fail if, e.g., another tab
+ has the handle open.
+
+ It goes without saying that deleting a file out from under another
+ instance results in Undefined Behavior.
+ */
+ OPFS_UNLINK_BEFORE_OPEN: 0x02,
+ /**
+ If true, any async routine which implicitly acquires a sync
+ access handle (i.e. an OPFS lock) will release that lock at
+ the end of the call which acquires it. If false, such
+ "autolocks" are not released until the VFS is idle for some
+ brief amount of time.
+
+ The benefit of enabling this is much higher concurrency. The
+ down-side is much-reduced performance (as much as a 4x decrease
+ in speedtest1).
+ */
+ defaultUnlockAsap: false
+ });
+
+//#if nope
+/* does not yet work this way */
+//#define opfs-has-metrics
+//#include api/opfs-common-inline.c-pp.js
+//#undef opfs-has-metrics
+ state.initS11n = initS11n;
+//#endif
+ return state;
+ }/*createVfsStateObject()*/;
+
}/*sqlite3ApiBootstrap.initializers*/);
-//#else
-/*
- The OPFS SAH Pool parts are elided from builds targeting node.js.
-*/
//#endif target:node
toss: function(...args){throw new Error(args.join(' '))},
toss3,
typedArrayPart: wasm.typedArrayPart,
+ nu: (...obj)=>Object.assign(Object.create(null),...obj),
assert: function(arg,msg){
if( !arg ){
util.toss("Assertion failed:",msg);
usage of those methods to remove the "await".
*/
"use strict";
-const wPost = (type,...args)=>postMessage({type, payload:args});
const urlParams = new URL(globalThis.location.href).searchParams;
if( !urlParams.has('vfs') ){
throw new Error("Expecting vfs=opfs|opfs-wl URL argument for this worker");
}
const isWebLocker = 'opfs-wl'===urlParams.get('vfs');
+const wPost = (type,...args)=>postMessage({type, payload:args});
const installAsyncProxy = function(){
const toss = function(...args){throw new Error(args.join(' '))};
if(globalThis.window === globalThis){
*/
'use strict';
globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
- const opfsUtil = sqlite3.opfs || sqlite3.util.toss("Missing sqlite3.opfs")
- /* Gets removed from sqlite3 during bootstrap, so we need an
- early reference to it. */;
+ /* These get removed from sqlite3 during bootstrap, so we need an
+ early reference to it. */
+ const util = sqlite3.util;
+ const opfsUtil = sqlite3.opfs || sqlite3.util.toss("Missing sqlite3.opfs");
+
/**
installOpfsWlVfs() returns a Promise which, on success, installs an
sqlite3_vfs named "opfs-wl", suitable for use with all sqlite3 APIs
On success, the Promise resolves to the top-most sqlite3 namespace
object.
+
+ Code-diver notes: this file is particularly sparse on documentation
+ because much of it is identical to the code in
+ sqlite3-vfs-opfs.c-pp.js. See that file for more details.
*/
const installOpfsWlVfs = function callee(options){
try{
}catch(e){
return Promise.reject(e);
}
- const nu = (...obj)=>Object.assign(Object.create(null),...obj);
- options = nu(options);
+ options = util.nu(options);
const urlParams = new URL(globalThis.location.href).searchParams;
if(urlParams.has('opfs-disable')){
//sqlite3.config.warn('Explicitly not installing 'opfs-wl' VFS due to opfs-disable flag.');
const error = (...args)=>logImpl(0, ...args);
const toss = sqlite3.util.toss;
const capi = sqlite3.capi;
- const util = sqlite3.util;
const wasm = sqlite3.wasm;
const sqlite3_vfs = capi.sqlite3_vfs;
const sqlite3_file = capi.sqlite3_file;
const sqlite3_io_methods = capi.sqlite3_io_methods;
- /**
- State which we send to the async-api Worker or share with it.
- This object must initially contain only cloneable or sharable
- objects. After the worker's "inited" message arrives, other types
- of data may be added to it.
-
- For purposes of Atomics.wait() and Atomics.notify(), we use a
- SharedArrayBuffer with one slot reserved for each of the API
- proxy's methods. The sync side of the API uses Atomics.wait()
- on the corresponding slot and the async side uses
- Atomics.notify() on that slot.
-
- The approach of using a single SAB to serialize comms for all
- instances might(?) lead to deadlock situations in multi-db
- cases. We should probably have one SAB here with a single slot
- for locking a per-file initialization step and then allocate a
- separate SAB like the above one for each file. That will
- require a bit of acrobatics but should be feasible. The most
- problematic part is that xOpen() would have to use
- postMessage() to communicate its SharedArrayBuffer, and mixing
- that approach with Atomics.wait/notify() gets a bit messy.
- */
- const state = Object.create(null);
+ const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
+ const dVfs = pDVfs
+ ? new sqlite3_vfs(pDVfs)
+ : null /* dVfs will be null when sqlite3 is built with
+ SQLITE_OS_OTHER. */;
+ const opfsIoMethods = new sqlite3_io_methods();
+ const opfsVfs = new sqlite3_vfs()
+ .addOnDispose( ()=>opfsIoMethods.dispose());
+ opfsIoMethods.$iVersion = 1;
+ opfsVfs.$iVersion = 2/*yes, two*/;
+ opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
+ opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
+ is undocumented/unspecified. */;
+ opfsVfs.$zName = wasm.allocCString('opfs-wl');
+ opfsVfs.addOnDispose(
+ '$zName', opfsVfs.$zName,
+ 'cleanup default VFS wrapper', ()=>(dVfs ? dVfs.dispose() : null)
+ );
+ const state = opfsUtil.createVfsStateObject(opfsVfs);
+ state.verbose = options.verbose;
const metrics = Object.create(null);
//#define opfs-has-metrics
//#include api/opfs-common-inline.c-pp.js
s = metrics.s11n.deserialize = Object.create(null);
s.count = s.time = 0;
}
- }/*metrics*/;
- const opfsIoMethods = new sqlite3_io_methods();
- const opfsVfs = new sqlite3_vfs()
- .addOnDispose( ()=>opfsIoMethods.dispose());
+ }/*vfsMetrics*/;
+ vfsMetrics.reset();
let promiseWasRejected = undefined;
const promiseReject = (err)=>{
promiseWasRejected = true;
promiseWasRejected = false;
return promiseResolve_(sqlite3);
};
- const workerArgs = '?vfs=opfs-wl';
+ options.proxyUri += '?vfs=opfs-wl';
const W =
//#if target:es6-bundler-friendly
new Worker(new URL("sqlite3-opfs-async-proxy.js?vfs=opfs-wl", import.meta.url));
//#elif target:es6-module
- new Worker(new URL(options.proxyUri+workerArgs, import.meta.url));
+ new Worker(new URL(options.proxyUri, import.meta.url));
//#else
- new Worker(options.proxyUri+workerArgs);
+ new Worker(options.proxyUri);
//#endif
setTimeout(()=>{
/* At attempt to work around a browser-specific quirk in which
error("Error initializing OPFS asyncer:",err);
promiseReject(new Error("Loading OPFS async Worker failed for unknown reasons."));
};
- const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
- const dVfs = pDVfs
- ? new sqlite3_vfs(pDVfs)
- : null /* dVfs will be null when sqlite3 is built with
- SQLITE_OS_OTHER. */;
- opfsIoMethods.$iVersion = 1;
- opfsVfs.$iVersion = 2/*yes, two*/;
- opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
- opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
- is undocumented/unspecified. */;
- opfsVfs.$zName = wasm.allocCString('opfs-wl');
- // All C-side memory of opfsVfs is zeroed out, but just to be explicit:
- opfsVfs.$xDlOpen = opfsVfs.$xDlError = opfsVfs.$xDlSym = opfsVfs.$xDlClose = null;
- opfsVfs.addOnDispose(
- '$zName', opfsVfs.$zName,
- 'cleanup default VFS wrapper', ()=>(dVfs ? dVfs.dispose() : null)
- );
- /**
- Pedantic sidebar about opfsVfs.ondispose: the entries in that array
- are items to clean up when opfsVfs.dispose() is called, but in this
- environment it will never be called. The VFS instance simply
- hangs around until the WASM module instance is cleaned up. We
- "could" _hypothetically_ clean it up by "importing" an
- sqlite3_os_end() impl into the wasm build, but the shutdown order
- of the wasm engine and the JS one are undefined so there is no
- guaranty that the opfsVfs instance would be available in one
- environment or the other when sqlite3_os_end() is called (_if_ it
- gets called at all in a wasm build, which is undefined).
- */
-
- state.verbose = options.verbose;
- state.littleEndian = (()=>{
- const buffer = new ArrayBuffer(2);
- new DataView(buffer).setInt16(0, 256, true /* ==>littleEndian */);
- // Int16Array uses the platform's endianness.
- return new Int16Array(buffer)[0] === 256;
- })();
- /**
- asyncIdleWaitTime is how long (ms) to wait, in the async proxy,
- for each Atomics.wait() when waiting on inbound VFS API calls.
- We need to wake up periodically to give the thread a chance to
- do other things. If this is too high (e.g. 500ms) then even two
- workers/tabs can easily run into locking errors. Some multiple
- of this value is also used for determining how long to wait on
- lock contention to free up.
- */
- state.asyncIdleWaitTime = 150;
-
- /**
- Whether the async counterpart should log exceptions to
- the serialization channel. That produces a great deal of
- noise for seemingly innocuous things like xAccess() checks
- for missing files, so this option may have one of 3 values:
-
- 0 = no exception logging.
-
- 1 = only log exceptions for "significant" ops like xOpen(),
- xRead(), and xWrite().
-
- 2 = log all exceptions.
- */
- state.asyncS11nExceptions = 1;
- /* Size of file I/O buffer block. 64k = max sqlite3 page size, and
- xRead/xWrite() will never deal in blocks larger than that. */
- state.fileBufferSize = 1024 * 64;
- state.sabS11nOffset = state.fileBufferSize;
- /**
- The size of the block in our SAB for serializing arguments and
- result values. Needs to be large enough to hold serialized
- values of any of the proxied APIs. Filenames are the largest
- part but are limited to opfsVfs.$mxPathname bytes. We also
- store exceptions there, so it needs to be long enough to hold
- a reasonably long exception string.
- */
- state.sabS11nSize = opfsVfs.$mxPathname * 2;
- /**
- The SAB used for all data I/O between the synchronous and
- async halves (file i/o and arg/result s11n).
- */
- state.sabIO = new SharedArrayBuffer(
- state.fileBufferSize/* file i/o block */
- + state.sabS11nSize/* argument/result serialization block */
- );
- state.opIds = Object.create(null);
- {
- /* Indexes for use in our SharedArrayBuffer... */
- let i = 0;
- /* SAB slot used to communicate which operation is desired
- between both workers. This worker writes to it and the other
- listens for changes. */
- state.opIds.whichOp = i++;
- /* Slot for storing return values. This worker listens to that
- slot and the other worker writes to it. */
- state.opIds.rc = i++;
- /* Each function gets an ID which this worker writes to
- the whichOp slot. The async-api worker uses Atomic.wait()
- on the whichOp slot to figure out which operation to run
- next. */
- state.opIds.xAccess = i++;
- state.opIds.xClose = i++;
- state.opIds.xDelete = i++;
- state.opIds.xDeleteNoWait = i++;
- state.opIds.xFileSize = i++;
- state.opIds.xLock = i++;
- state.opIds.xOpen = i++;
- state.opIds.xRead = i++;
- state.opIds.xSleep = i++;
- state.opIds.xSync = i++;
- state.opIds.xTruncate = i++;
- state.opIds.xUnlock = i++;
- state.opIds.xWrite = i++;
- state.opIds.mkdir = i++;
- state.opIds.lockControl = i++ /* we signal the intent to lock here */;
- /** Internal signals which are used only during development and
- testing via the dev console. */
- state.opIds['opfs-async-metrics'] = i++;
- state.opIds['opfs-async-shutdown'] = i++;
- /* The retry slot is used by the async part for wait-and-retry
- semantics. Though we could hypothetically use the xSleep slot
- for that, doing so might lead to undesired side effects. */
- state.opIds.retry = i++;
-
- /* Slots for submitting the lock type and receiving its acknowledgement. */
- state.lock = nu({
- type: i++ /* SQLITE_LOCK_xyz value */,
- atomicsHandshake: i++ /* 1=release, 2=granted */
- });
- state.sabOP = new SharedArrayBuffer(
- i * 4/* ==sizeof int32, noting that Atomics.wait() and friends
- can only function on Int32Array views of an SAB. */);
- vfsMetrics.reset();
- }
- /**
- SQLITE_xxx constants to export to the async worker
- counterpart...
- */
- state.sq3Codes = Object.create(null);
- [
- 'SQLITE_ACCESS_EXISTS',
- 'SQLITE_ACCESS_READWRITE',
- 'SQLITE_BUSY',
- 'SQLITE_CANTOPEN',
- 'SQLITE_ERROR',
- 'SQLITE_IOERR',
- 'SQLITE_IOERR_ACCESS',
- 'SQLITE_IOERR_CLOSE',
- 'SQLITE_IOERR_DELETE',
- 'SQLITE_IOERR_FSYNC',
- 'SQLITE_IOERR_LOCK',
- 'SQLITE_IOERR_READ',
- 'SQLITE_IOERR_SHORT_READ',
- 'SQLITE_IOERR_TRUNCATE',
- 'SQLITE_IOERR_UNLOCK',
- 'SQLITE_IOERR_WRITE',
- 'SQLITE_LOCK_EXCLUSIVE',
- 'SQLITE_LOCK_NONE',
- 'SQLITE_LOCK_PENDING',
- 'SQLITE_LOCK_RESERVED',
- 'SQLITE_LOCK_SHARED',
- 'SQLITE_LOCKED',
- 'SQLITE_MISUSE',
- 'SQLITE_NOTFOUND',
- 'SQLITE_OPEN_CREATE',
- 'SQLITE_OPEN_DELETEONCLOSE',
- 'SQLITE_OPEN_MAIN_DB',
- 'SQLITE_OPEN_READONLY',
- 'SQLITE_LOCK_NONE',
- 'SQLITE_LOCK_SHARED',
- 'SQLITE_LOCK_RESERVED',
- 'SQLITE_LOCK_PENDING',
- 'SQLITE_LOCK_EXCLUSIVE'
- ].forEach((k)=>{
- if(undefined === (state.sq3Codes[k] = capi[k])){
- toss("Maintenance required: not found:",k);
- }
- });
- state.opfsFlags = Object.assign(Object.create(null),{
- /**
- Flag for use with xOpen(). URI flag "opfs-unlock-asap=1"
- enables this. See defaultUnlockAsap, below.
- */
- OPFS_UNLOCK_ASAP: 0x01,
- /**
- Flag for use with xOpen(). URI flag "delete-before-open=1"
- tells the VFS to delete the db file before attempting to open
- it. This can be used, e.g., to replace a db which has been
- corrupted (without forcing us to expose a delete/unlink()
- function in the public API).
-
- Failure to unlink the file is ignored but may lead to
- downstream errors. An unlink can fail if, e.g., another tab
- has the handle open.
-
- It goes without saying that deleting a file out from under another
- instance results in Undefined Behavior.
- */
- OPFS_UNLINK_BEFORE_OPEN: 0x02,
- /**
- If true, any async routine which implicitly acquires a sync
- access handle (i.e. an OPFS lock) will release that lock at
- the end of the call which acquires it. If false, such
- "autolocks" are not released until the VFS is idle for some
- brief amount of time.
-
- The benefit of enabling this is much higher concurrency. The
- down-side is much-reduced performance (as much as a 4x decrease
- in speedtest1).
- */
- defaultUnlockAsap: false
- });
/**
Runs the given operation (by name) in the async worker
const ioSyncWrappers = {
xCheckReservedLock: function(pFile,pOut){
/**
- As of late 2022, only a single lock can be held on an OPFS
- file. We have no way of checking whether any _other_ db
- connection has a lock except by trying to obtain and (on
- success) release a sync-handle for it, but doing so would
- involve an inherent race condition. For the time being,
- pending a better solution, we simply report whether the
- given pFile is open.
-
- Update 2024-06-12: based on forum discussions, this
- function now always sets pOut to 0 (false):
-
- https://sqlite.org/forum/forumpost/a2f573b00cda1372
+ After consultation with a topic expert: "opfs-wl" will
+ continue to use the same no-op impl which "opfs" does
+ because:
+
+ - xCheckReservedLock() is just a hint. If SQLite needs to
+ lock, it's still going to try to lock.
+
+ - We cannot do this check synchronously in "opfs-wl",
+ so would need to pass it to the async proxy. That would
+ make it inordinately expensive considering that it's
+ just a hint.
*/
wasm.poke(pOut, 0, 'i32');
return 0;
}
}/*sanityCheck()*/;
+ //const initS11n = state.initS11n || toss("Missing state.initS11n()");
+ //delete state.initS11n;
W.onmessage = function({data}){
//log("Worker.onmessage:",data);
switch(data.type){
/* Arrives as soon as the asyc proxy finishes loading.
Pass our config and shared state on to the async
worker. */
- W.postMessage({type: 'opfs-async-init',args: state});
+ W.postMessage({type: 'opfs-async-init', args: util.nu(state)});
break;
case 'opfs-async-inited': {
/* Indicates that the async partner has received the 'init'
const sqlite3_file = capi.sqlite3_file;
const sqlite3_io_methods = capi.sqlite3_io_methods;
+ const opfsIoMethods = new sqlite3_io_methods();
+ const opfsVfs = new sqlite3_vfs()
+ .addOnDispose( ()=>opfsIoMethods.dispose());
+ const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
+ const dVfs = pDVfs
+ ? new sqlite3_vfs(pDVfs)
+ : null /* dVfs will be null when sqlite3 is built with
+ SQLITE_OS_OTHER. */;
+
+ opfsIoMethods.$iVersion = 1;
+ opfsVfs.$iVersion = 2/*yes, two*/;
+ opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
+ opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
+ is undocumented/unspecified. */;
+ opfsVfs.$zName = wasm.allocCString("opfs");
+ // All C-side memory of opfsVfs is zeroed out, but just to be explicit:
+ opfsVfs.$xDlOpen = opfsVfs.$xDlError = opfsVfs.$xDlSym = opfsVfs.$xDlClose = null;
+ opfsVfs.addOnDispose(
+ '$zName', opfsVfs.$zName,
+ 'cleanup default VFS wrapper', ()=>(dVfs ? dVfs.dispose() : null)
+ /**
+ Pedantic sidebar: the entries in this array are items to
+ clean up when opfsVfs.dispose() is called, but in this
+ environment it will never be called. The VFS instance simply
+ hangs around until the WASM module instance is cleaned up. We
+ "could" _hypothetically_ clean it up by "importing" an
+ sqlite3_os_end() impl into the wasm build, but the shutdown
+ order of the wasm engine and the JS one are undefined so
+ there is no guaranty that the opfsVfs instance would be
+ available in one environment or the other when
+ sqlite3_os_end() is called (_if_ it gets called at all in a
+ wasm build, which is undefined).
+ */
+ );
+
/**
State which we send to the async-api Worker or share with it.
This object must initially contain only cloneable or sharable
postMessage() to communicate its SharedArrayBuffer, and mixing
that approach with Atomics.wait/notify() gets a bit messy.
*/
- const state = Object.create(null);
+ const state = opfsUtil.createVfsStateObject(opfsVfs);
+ state.verbose = options.verbose;
const metrics = Object.create(null);
//#define opfs-has-metrics
//#include api/opfs-common-inline.c-pp.js
s.count = s.time = 0;
}
}/*metrics*/;
- const opfsIoMethods = new sqlite3_io_methods();
- const opfsVfs = new sqlite3_vfs()
- .addOnDispose( ()=>opfsIoMethods.dispose());
+ vfsMetrics.reset();
let promiseWasRejected = undefined;
const promiseReject = (err)=>{
promiseWasRejected = true;
error("Error initializing OPFS asyncer:",err);
promiseReject(new Error("Loading OPFS async Worker failed for unknown reasons."));
};
- const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
- const dVfs = pDVfs
- ? new sqlite3_vfs(pDVfs)
- : null /* dVfs will be null when sqlite3 is built with
- SQLITE_OS_OTHER. */;
- opfsIoMethods.$iVersion = 1;
- opfsVfs.$iVersion = 2/*yes, two*/;
- opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
- opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
- is undocumented/unspecified. */;
- opfsVfs.$zName = wasm.allocCString("opfs");
- // All C-side memory of opfsVfs is zeroed out, but just to be explicit:
- opfsVfs.$xDlOpen = opfsVfs.$xDlError = opfsVfs.$xDlSym = opfsVfs.$xDlClose = null;
- opfsVfs.addOnDispose(
- '$zName', opfsVfs.$zName,
- 'cleanup default VFS wrapper', ()=>(dVfs ? dVfs.dispose() : null)
- );
- /**
- Pedantic sidebar about opfsVfs.ondispose: the entries in that array
- are items to clean up when opfsVfs.dispose() is called, but in this
- environment it will never be called. The VFS instance simply
- hangs around until the WASM module instance is cleaned up. We
- "could" _hypothetically_ clean it up by "importing" an
- sqlite3_os_end() impl into the wasm build, but the shutdown order
- of the wasm engine and the JS one are undefined so there is no
- guaranty that the opfsVfs instance would be available in one
- environment or the other when sqlite3_os_end() is called (_if_ it
- gets called at all in a wasm build, which is undefined).
- */
- state.verbose = options.verbose;
- state.littleEndian = (()=>{
- const buffer = new ArrayBuffer(2);
- new DataView(buffer).setInt16(0, 256, true /* ==>littleEndian */);
- // Int16Array uses the platform's endianness.
- return new Int16Array(buffer)[0] === 256;
- })();
- /**
- asyncIdleWaitTime is how long (ms) to wait, in the async proxy,
- for each Atomics.wait() when waiting on inbound VFS API calls.
- We need to wake up periodically to give the thread a chance to
- do other things. If this is too high (e.g. 500ms) then even two
- workers/tabs can easily run into locking errors. Some multiple
- of this value is also used for determining how long to wait on
- lock contention to free up.
- */
- state.asyncIdleWaitTime = 150;
-
- /**
- Whether the async counterpart should log exceptions to
- the serialization channel. That produces a great deal of
- noise for seemingly innocuous things like xAccess() checks
- for missing files, so this option may have one of 3 values:
-
- 0 = no exception logging.
-
- 1 = only log exceptions for "significant" ops like xOpen(),
- xRead(), and xWrite().
-
- 2 = log all exceptions.
- */
- state.asyncS11nExceptions = 1;
- /* Size of file I/O buffer block. 64k = max sqlite3 page size, and
- xRead/xWrite() will never deal in blocks larger than that. */
- state.fileBufferSize = 1024 * 64;
- state.sabS11nOffset = state.fileBufferSize;
- /**
- The size of the block in our SAB for serializing arguments and
- result values. Needs to be large enough to hold serialized
- values of any of the proxied APIs. Filenames are the largest
- part but are limited to opfsVfs.$mxPathname bytes. We also
- store exceptions there, so it needs to be long enough to hold
- a reasonably long exception string.
- */
- state.sabS11nSize = opfsVfs.$mxPathname * 2;
- /**
- The SAB used for all data I/O between the synchronous and
- async halves (file i/o and arg/result s11n).
- */
- state.sabIO = new SharedArrayBuffer(
- state.fileBufferSize/* file i/o block */
- + state.sabS11nSize/* argument/result serialization block */
- );
- state.opIds = Object.create(null);
- {
- /* Indexes for use in our SharedArrayBuffer... */
- let i = 0;
- /* SAB slot used to communicate which operation is desired
- between both workers. This worker writes to it and the other
- listens for changes. */
- state.opIds.whichOp = i++;
- /* Slot for storing return values. This worker listens to that
- slot and the other worker writes to it. */
- state.opIds.rc = i++;
- /* Each function gets an ID which this worker writes to
- the whichOp slot. The async-api worker uses Atomic.wait()
- on the whichOp slot to figure out which operation to run
- next. */
- state.opIds.xAccess = i++;
- state.opIds.xClose = i++;
- state.opIds.xDelete = i++;
- state.opIds.xDeleteNoWait = i++;
- state.opIds.xFileSize = i++;
- state.opIds.xLock = i++;
- state.opIds.xOpen = i++;
- state.opIds.xRead = i++;
- state.opIds.xSleep = i++;
- state.opIds.xSync = i++;
- state.opIds.xTruncate = i++;
- state.opIds.xUnlock = i++;
- state.opIds.xWrite = i++;
- state.opIds.mkdir = i++;
- state.opIds['opfs-async-metrics'] = i++;
- state.opIds['opfs-async-shutdown'] = i++;
- /* The retry slot is used by the async part for wait-and-retry
- semantics. Though we could hypothetically use the xSleep slot
- for that, doing so might lead to undesired side effects. */
- state.opIds.retry = i++;
- state.sabOP = new SharedArrayBuffer(
- i * 4/* ==sizeof int32, noting that Atomics.wait() and friends
- can only function on Int32Array views of an SAB. */);
- vfsMetrics.reset();
- }
- /**
- SQLITE_xxx constants to export to the async worker
- counterpart...
- */
- state.sq3Codes = Object.create(null);
- [
- 'SQLITE_ACCESS_EXISTS',
- 'SQLITE_ACCESS_READWRITE',
- 'SQLITE_BUSY',
- 'SQLITE_CANTOPEN',
- 'SQLITE_ERROR',
- 'SQLITE_IOERR',
- 'SQLITE_IOERR_ACCESS',
- 'SQLITE_IOERR_CLOSE',
- 'SQLITE_IOERR_DELETE',
- 'SQLITE_IOERR_FSYNC',
- 'SQLITE_IOERR_LOCK',
- 'SQLITE_IOERR_READ',
- 'SQLITE_IOERR_SHORT_READ',
- 'SQLITE_IOERR_TRUNCATE',
- 'SQLITE_IOERR_UNLOCK',
- 'SQLITE_IOERR_WRITE',
- 'SQLITE_LOCK_EXCLUSIVE',
- 'SQLITE_LOCK_NONE',
- 'SQLITE_LOCK_PENDING',
- 'SQLITE_LOCK_RESERVED',
- 'SQLITE_LOCK_SHARED',
- 'SQLITE_LOCKED',
- 'SQLITE_MISUSE',
- 'SQLITE_NOTFOUND',
- 'SQLITE_OPEN_CREATE',
- 'SQLITE_OPEN_DELETEONCLOSE',
- 'SQLITE_OPEN_MAIN_DB',
- 'SQLITE_OPEN_READONLY'
- ].forEach((k)=>{
- if(undefined === (state.sq3Codes[k] = capi[k])){
- toss("Maintenance required: not found:",k);
- }
- });
- state.opfsFlags = Object.assign(Object.create(null),{
- /**
- Flag for use with xOpen(). URI flag "opfs-unlock-asap=1"
- enables this. See defaultUnlockAsap, below.
- */
- OPFS_UNLOCK_ASAP: 0x01,
- /**
- Flag for use with xOpen(). URI flag "delete-before-open=1"
- tells the VFS to delete the db file before attempting to open
- it. This can be used, e.g., to replace a db which has been
- corrupted (without forcing us to expose a delete/unlink()
- function in the public API).
-
- Failure to unlink the file is ignored but may lead to
- downstream errors. An unlink can fail if, e.g., another tab
- has the handle open.
-
- It goes without saying that deleting a file out from under another
- instance results in Undefined Behavior.
- */
- OPFS_UNLINK_BEFORE_OPEN: 0x02,
- /**
- If true, any async routine which implicitly acquires a sync
- access handle (i.e. an OPFS lock) will release that lock at
- the end of the call which acquires it. If false, such
- "autolocks" are not released until the VFS is idle for some
- brief amount of time.
-
- The benefit of enabling this is much higher concurrency. The
- down-side is much-reduced performance (as much as a 4x decrease
- in speedtest1).
- */
- defaultUnlockAsap: false
- });
/**
Runs the given operation (by name) in the async worker
}
}/*sanityCheck()*/;
+ //const initS11n = state.initS11n || toss("Missing state.initS11n()");
+ //delete state.initS11n;
W.onmessage = function({data}){
//log("Worker.onmessage:",data);
switch(data.type){
-C Consolidate\smuch\sof\sthe\sOPFS\sutility\scode\sinto\sa\snew\sfile\sfor\suse\sby\stwo\sof\sthe\sOPFS\sVFSes.
-D 2026-03-04T11:37:39.256
+C Baby\ssteps\sin\sconsolidating\scommon\sOPFS\sVFS\scode.
+D 2026-03-04T14:33:33.214
F .fossil-settings/binary-glob 61195414528fb3ea9693577e1980230d78a1f8b0a54c78cf1b9b24d0a409ed6a x
F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
F ext/wasm/api/README.md a905d5c6bfc3e2df875bd391d6d6b7b48d41b43bdee02ad115b47244781a7e81
F ext/wasm/api/extern-post-js.c-pp.js d9f42ecbedc784c0d086bc37800e52946a14f7a21600b291daa3f963c314f930
F ext/wasm/api/extern-pre-js.js cc61c09c7a24a07dbecb4c352453c3985170cec12b4e7e7e7a4d11d43c5c8f41
-F ext/wasm/api/opfs-common-inline.c-pp.js b9c4e080698792cbc04ce9dd9dda7d8316c6db0262f74820706f98b352b949d5 w ext/wasm/api/opfs-common.c-pp.js
-F ext/wasm/api/opfs-common-shared.c-pp.js 3f8f3f2ab4790fdd1e6d1d9224e232cef07f1c8753827c18bbba965dbe98795f
+F ext/wasm/api/opfs-common-inline.c-pp.js 5be8d6d91963849e218221b48206ae55612630bb2cd7f30b1b6fcf7a9e374b76
+F ext/wasm/api/opfs-common-shared.c-pp.js fdebcb821f9f732eb263e3ee2dbd6af5709aaa5fec9da6eaa0e10fd93b72f547
F ext/wasm/api/post-js-footer.js a50c1a2c4d008aede7b2aa1f18891a7ee71437c2f415b8aeb3db237ddce2935b
F ext/wasm/api/post-js-header.js f35d2dcf1ab7f22a93d565f8e0b622a2934fc4e743edf3b708e4dd8140eeff55
F ext/wasm/api/pre-js.c-pp.js 9234ea680a2f6a2a177e8dcd934bdc5811a9f8409165433a252b87f4c07bba6f
F ext/wasm/api/sqlite3-api-glue.c-pp.js 9b33e3ee467791dec4fd1b444b12a8545dfbb6c8b28ac651c7bdc7661a3b5a5c
F ext/wasm/api/sqlite3-api-oo1.c-pp.js 45454631265d9ce82685f1a64e1650ee19c8e121c41db98a22b534c15e543cfa
-F ext/wasm/api/sqlite3-api-prologue.js ccd8ece4b4580d2a70996218f28e810d70a86f5e2795f4d4a75f0603af24aef6
+F ext/wasm/api/sqlite3-api-prologue.js 98fedc159c9239b226d19567d7172300dee5ffce176e5fa2f62dd1f17d088385
F ext/wasm/api/sqlite3-api-worker1.c-pp.js 1041dd645e8e821c082b628cd8d9acf70c667430f9d45167569633ffc7567938
F ext/wasm/api/sqlite3-license-version-header.js 98d90255a12d02214db634e041c8e7f2f133d9361a8ebf000ba9c9af4c6761cc
-F ext/wasm/api/sqlite3-opfs-async-proxy.c-pp.js 25b856508ac94336419133c6ec10594f576b469f85cc69cde4c09cfa06a8e1c7
+F ext/wasm/api/sqlite3-opfs-async-proxy.c-pp.js f0a2aa8712211ff9db2ef548ae8b676be3e7c82f61586d03fd8317fbc95bbedd
F ext/wasm/api/sqlite3-vfs-helper.c-pp.js 3f828cc66758acb40e9c5b4dcfd87fd478a14c8fb7f0630264e6c7fa0e57515d
F ext/wasm/api/sqlite3-vfs-kvvfs.c-pp.js 2ccf4322f42063aefc150972943e750c77f7926b866f1639d40eec05df075b6e
F ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js 1575ea6bbcf2da1e6df6892c17521a0c1c1c199a672e9090176ea0b88de48bd9
-F ext/wasm/api/sqlite3-vfs-opfs-wl.c-pp.js 54f71e563dda30af73ed84ff9de03441537b2e8fb8d2ae2a0b0c8187f51db67a
-F ext/wasm/api/sqlite3-vfs-opfs.c-pp.js 2e2a72a40e2ad6ea92f52eb7adbd925d4acd874ffeecaa00b85234ad49862655
+F ext/wasm/api/sqlite3-vfs-opfs-wl.c-pp.js c6e5a281756e8ed7bbabf086dae765021486e17b91b6c4eee3c08dc2485fa348
+F ext/wasm/api/sqlite3-vfs-opfs.c-pp.js b8db3ccfedb457634cc07df7a955bc44c6d1be8b40d35f47822c2168ab8b1968
F ext/wasm/api/sqlite3-vtab-helper.c-pp.js 366596d8ff73d4cefb938bbe95bc839d503c3fab6c8335ce4bf52f0d8a7dee81
F ext/wasm/api/sqlite3-wasm.c 45bb20e19b245136711f9b78584371233975811b6560c29ed9b650e225417e29
F ext/wasm/api/sqlite3-worker1-promiser.c-pp.js aa9715f661fb700459a5a6cb1c32a4d6a770723b47aa9ac0e16c2cf87d622a66
F tool/warnings.sh d924598cf2f55a4ecbc2aeb055c10bd5f48114793e7ba25f9585435da29e7e98
F tool/win/sqlite.vsix deb315d026cc8400325c5863eef847784a219a2f
F tool/winmain.c 00c8fb88e365c9017db14c73d3c78af62194d9644feaf60e220ab0f411f3604c
-P 8ea85776116521526d684f221d67e288126e62931d4a0ea7fc7f164cd2d5b2ec
-R 82203270087631af34878be94f4d1594
+P db19a6e9663c3a44996178cb8c35dc4ccd60f48cb4b81b6c214411a56c57def7
+R a1c4d958895cbdc094c8287aa9f23443
U stephan
-Z 1548da853f7a79cab3a86616c1bf35cc
+Z b1a56499cd2b6bf9891139561c1b1fe5
# Remove this line to create a well-formed Fossil manifest.
-db19a6e9663c3a44996178cb8c35dc4ccd60f48cb4b81b6c214411a56c57def7
+b0dd23299e97ff975f213cb3a8b051f4d7b785b29def82e01f53427fdf77ecb6