asynchronous Origin-Private FileSystem (OPFS) APIs using a second
Worker, implemented in sqlite3-opfs-async-proxy.js. This file is
intended to be appended to the main sqlite3 JS deliverable somewhere
- after sqlite3-api-glue.js and before sqlite3-api-cleanup.js.
+ after sqlite3-api-oo1.js and before sqlite3-api-cleanup.js.
*/
'use strict';
self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
/**
- installOpfsVfs() returns a Promise which, on success, installs
- an sqlite3_vfs named "opfs", suitable for use with all sqlite3 APIs
- which accept a VFS. It uses the Origin-Private FileSystem API for
+ installOpfsVfs() returns a Promise which, on success, installs an
+ sqlite3_vfs named "opfs", suitable for use with all sqlite3 APIs
+ which accept a VFS. It is intended to be called via
+ sqlite3ApiBootstrap.initializersAsync or an equivalent mechanism.
+
+ The installed VFS uses the Origin-Private FileSystem API for
all file storage. On error it is rejected with an exception
explaining the problem. Reasons for rejection include, but are
not limited to:
proxying OPFS's synchronous API via the synchronous interface
required by the sqlite3_vfs API.
- - This function may only be called a single time and it must be
- called from the client, as opposed to the library initialization,
- in case the client requires a custom path for this API's
- "counterpart": this function's argument is the relative URI to
- this module's "asynchronous half". When called, this function removes
- itself from the sqlite3 object.
+ - This function may only be called a single time. When called, this
+ function removes itself from the sqlite3 object.
+
+ All arguments to this function are for internal/development purposes
+ only. They do not constitute a public API and may change at any
+ time.
- The argument may optionally be a plain object with the following
- configuration options:
+ The argument may optionally be a plain object with the following
+ configuration options:
- - proxyUri: as described above
+ - proxyUri: as described above
- - verbose (=2): an integer 0-3. 0 disables all logging, 1 enables
- logging of errors. 2 enables logging of warnings and errors. 3
- additionally enables debugging info.
+ - verbose (=2): an integer 0-3. 0 disables all logging, 1 enables
+ logging of errors. 2 enables logging of warnings and errors. 3
+ additionally enables debugging info.
- - sanityChecks (=false): if true, some basic sanity tests are
- run on the OPFS VFS API after it's initialized, before the
- returned Promise resolves.
+ - sanityChecks (=false): if true, some basic sanity tests are
+ run on the OPFS VFS API after it's initialized, before the
+ returned Promise resolves.
- On success, the Promise resolves to the top-most sqlite3 namespace
- object and that object gets a new object installed in its
- `opfs` property, containing several OPFS-specific utilities.
+ On success, the Promise resolves to the top-most sqlite3 namespace
+ object and that object gets a new object installed in its
+ `opfs` property, containing several OPFS-specific utilities.
*/
-const installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri){
+const installOpfsVfs = function callee(options){
if(!self.SharedArrayBuffer ||
!self.Atomics ||
!self.FileSystemHandle ||
new Error("This environment does not have OPFS support.")
);
}
- const options = (asyncProxyUri && 'object'===asyncProxyUri) ? asyncProxyUri : {
- proxyUri: asyncProxyUri
- };
+ if(!options || 'object'!==typeof options){
+ options = Object.create(null);
+ }
const urlParams = new URL(self.location.href).searchParams;
if(undefined===options.verbose){
options.verbose = urlParams.has('opfs-verbose') ? 3 : 2;
const log = (...args)=>logImpl(2, ...args);
const warn = (...args)=>logImpl(1, ...args);
const error = (...args)=>logImpl(0, ...args);
- //warn("The OPFS VFS feature is very much experimental and under construction.");
const toss = function(...args){throw new Error(args.join(' '))};
const capi = sqlite3.capi;
const wasm = capi.wasm;
s.count = s.time = 0;
s = metrics.s11n.deserialize = Object.create(null);
s.count = s.time = 0;
- //[ // timed routines which are not in state.opIds
- // 'xFileControl'
- //].forEach((k)=>r(metrics[k] = Object.create(null)));
}
}/*metrics*/;
const promiseReject = function(err){
cases. We should probably have one SAB here with a single slot
for locking a per-file initialization step and then allocate a
separate SAB like the above one for each file. That will
- require a bit of acrobatics but should be feasible.
+ require a bit of acrobatics but should be feasible. The most
+ problematic part is that xOpen() would have to use
+ postMessage() to communicate its SharedArrayBuffer, and mixing
+ that approach with Atomics.wait/notify() gets a bit messy.
*/
const state = Object.create(null);
state.verbose = options.verbose;
state.littleEndian = (()=>{
const buffer = new ArrayBuffer(2);
- new DataView(buffer).setInt16(0, 256, true /* littleEndian */);
+ new DataView(buffer).setInt16(0, 256, true /* ==>littleEndian */);
// Int16Array uses the platform's endianness.
return new Int16Array(buffer)[0] === 256;
})();
- /** Whether the async counterpart should log exceptions to
- the serialization channel. That produces a great deal of
- noise for seemingly innocuous things like xAccess() checks
- for missing files, so this option may have one of 3 values:
+ /**
+ Whether the async counterpart should log exceptions to
+ the serialization channel. That produces a great deal of
+ noise for seemingly innocuous things like xAccess() checks
+ for missing files, so this option may have one of 3 values:
- 0 = no exception logging
+ 0 = no exception logging
- 1 = only log exceptions for "significant" ops like xOpen(),
- xRead(), and xWrite().
+ 1 = only log exceptions for "significant" ops like xOpen(),
+ xRead(), and xWrite().
- 2 = log all exceptions.
+ 2 = log all exceptions.
*/
state.asyncS11nExceptions = 1;
- /* Size of file I/O buffer block. 64k = max sqlite3 page size. */
- state.fileBufferSize =
- 1024 * 64;
+ /* Size of file I/O buffer block. 64k = max sqlite3 page size, and
+ xRead/xWrite() will never deal in blocks larger than that. */
+ state.fileBufferSize = 1024 * 64;
state.sabS11nOffset = state.fileBufferSize;
/**
The size of the block in our SAB for serializing arguments and
*/
state.sabS11nSize = opfsVfs.$mxPathname * 2;
/**
- The SAB used for all data I/O (files and arg/result s11n).
+ The SAB used for all data I/O between the synchronous and
+ async halves (file i/o and arg/result s11n).
*/
state.sabIO = new SharedArrayBuffer(
state.fileBufferSize/* file i/o block */
state.opIds.mkdir = i++;
state.opIds['opfs-async-metrics'] = i++;
state.opIds['opfs-async-shutdown'] = i++;
- state.sabOP = new SharedArrayBuffer(i * 4/*sizeof int32*/);
+ /* The retry slot is used by the async part for wait-and-retry
+ semantics. Though we could hypothetically use the xSleep slot
+ for that, doing so might lead to undesired side effects. */
+ state.opIds.retry = i++;
+ state.sabOP = new SharedArrayBuffer(
+ i * 4/* ==sizeof int32, noting that Atomics.wait() and friends
+ can only function on Int32Array views of an SAB. */);
opfsUtil.metrics.reset();
}
/**
state.s11n.serialize(...args);
Atomics.store(state.sabOPView, state.opIds.rc, -1);
Atomics.store(state.sabOPView, state.opIds.whichOp, opNdx);
- Atomics.notify(state.sabOPView, state.opIds.whichOp) /* async thread will take over here */;
+ Atomics.notify(state.sabOPView, state.opIds.whichOp)
+ /* async thread will take over here */;
const t = performance.now();
- Atomics.wait(state.sabOPView, state.opIds.rc, -1);
+ Atomics.wait(state.sabOPView, state.opIds.rc, -1)
+ /* When this wait() call returns, the async half will have
+ completed the operation and reported its results. */;
const rc = Atomics.load(state.sabOPView, state.opIds.rc);
metrics[op].wait += performance.now() - t;
if(rc && state.asyncS11nExceptions){
const initS11n = ()=>{
/**
- ACHTUNG: this code is 100% duplicated in the other half of this
- proxy! The documentation is maintained in the "synchronous half".
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ACHTUNG: this code is 100% duplicated in the other half of
+ this proxy! The documentation is maintained in the
+ "synchronous half".
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
This proxy de/serializes cross-thread function arguments and
output-pointer values via the state.sabIO SharedArrayBuffer,
using the region defined by (state.sabS11nOffset,
state.sabS11nOffset]. Only one dataset is recorded at a time.
- This is not a general-purpose format. It only supports the range
- of operations, and data sizes, needed by the sqlite3_vfs and
- sqlite3_io_methods operations.
+ This is not a general-purpose format. It only supports the
+ range of operations, and data sizes, needed by the
+ sqlite3_vfs and sqlite3_io_methods operations. Serialized
+ data are transient and this serialization algorithm may
+ change at any time.
The data format can be succinctly summarized as:
using their TextEncoder/TextDecoder representations. It would
arguably make more sense to store them as Int16Arrays of
their JS character values, but how best/fastest to get that
- in and out of string form us an open point.
+ in and out of string form is an open point. Initial
+ experimentation with that approach did not gain us any speed.
Historical note: this impl was initially about 1% this size by
using using JSON.stringify/parse(), but using fit-to-purpose
toss("Member",name," is not a function pointer. Signature =",sigN);
}
const memKey = tgt.memberKey(name);
- //log("installMethod",tgt, name, sigN);
const fProxy = 0
- // We can remove this proxy middle-man once the VFS is working
+ /** This middle-man proxy is only for use during development, to
+ confirm that we always pass the proper number of
+ arguments. We know that the C-level code will always use the
+ correct argument count. */
? callee.argcProxy(func, sigN)
: func;
const pFunc = wasm.installFunction(fProxy, tgt.memberSignature(name, true));
const mTimeStart = (op)=>{
opTimer.start = performance.now();
opTimer.op = op;
- //metrics[op] || toss("Maintenance required: missing metrics for",op);
++metrics[op].count;
};
const mTimeEnd = ()=>(
*/
const ioSyncWrappers = {
xCheckReservedLock: function(pFile,pOut){
- // Exclusive lock is automatically acquired when opened
- //warn("xCheckReservedLock(",arguments,") is a no-op");
+ /**
+ As of late 2022, only a single lock can be held on an OPFS
+ file. We have no way of checking whether any _other_ db
+ connection has a lock except by trying to obtain and (on
+ success) release a sync-handle for it, but doing so would
+ involve an inherent race condition. For the time being,
+ pending a better solution, we simply report whether the
+ given pFile instance has a lock.
+ */
const f = __openFiles[pFile];
wasm.setMemValue(pOut, f.lockMode ? 1 : 0, 'i32');
return 0;
return rc;
},
xRead: function(pFile,pDest,n,offset64){
- /* int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst) */
mTimeStart('xRead');
const f = __openFiles[pFile];
let rc;
try {
rc = opRun('xRead',pFile, n, Number(offset64));
- if(0===rc || capi.SQLITE_IOERR_SHORT_READ===rc){
- // set() seems to be the fastest way to copy this...
+ if(0===rc || capi.SQLITE_IOERR_SHORT_READ===rc){
+ /**
+ Results get written to the SharedArrayBuffer f.sabView.
+ Because the heap is _not_ a SharedArrayBuffer, we have
+ to copy the results. TypedArray.set() seems to be the
+ fastest way to copy this. */
wasm.heap8u().set(f.sabView.subarray(0, n), pDest);
}
}catch(e){
return rc;
},
xWrite: function(pFile,pSrc,n,offset64){
- /* int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst) */
mTimeStart('xWrite');
const f = __openFiles[pFile];
let rc;
//xSleep is optionally defined below
xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
mTimeStart('xOpen');
- if(!f._){
- f._ = {
- fileTypes: {
- SQLITE_OPEN_MAIN_DB: 'mainDb',
- SQLITE_OPEN_MAIN_JOURNAL: 'mainJournal',
- SQLITE_OPEN_TEMP_DB: 'tempDb',
- SQLITE_OPEN_TEMP_JOURNAL: 'tempJournal',
- SQLITE_OPEN_TRANSIENT_DB: 'transientDb',
- SQLITE_OPEN_SUBJOURNAL: 'subjournal',
- SQLITE_OPEN_SUPER_JOURNAL: 'superJournal',
- SQLITE_OPEN_WAL: 'wal'
- },
- getFileType: function(filename,oflags){
- const ft = f._.fileTypes;
- for(let k of Object.keys(ft)){
- if(oflags & capi[k]) return ft[k];
- }
- warn("Cannot determine fileType based on xOpen() flags for file",filename);
- return '???';
- }
- };
- }
if(0===zName){
zName = randomFilename();
}else if('number'===typeof zName){
promiseReject(e);
error("Unexpected message from the async worker:",data);
break;
- }
- };
-
+ }/*switch(data.type)*/
+ }/*W.onmessage()*/;
})/*thePromise*/;
return thePromise;
}/*installOpfsVfs()*/;
installOpfsVfs.defaultProxyUri =
- //self.location.pathname.replace(/[^/]*$/, "sqlite3-opfs-async-proxy.js");
"sqlite3-opfs-async-proxy.js";
-//console.warn("sqlite3.installOpfsVfs.defaultProxyUri =",sqlite3.installOpfsVfs.defaultProxyUri);
self.sqlite3ApiBootstrap.initializersAsync.push(async (sqlite3)=>{
if(sqlite3.scriptInfo && !sqlite3.scriptInfo.isWorker){
return;
***********************************************************************
- An Worker which manages asynchronous OPFS handles on behalf of a
+ A Worker which manages asynchronous OPFS handles on behalf of a
synchronous API which controls it via a combination of Worker
messages, SharedArrayBuffer, and Atomics. It is the asynchronous
counterpart of the API defined in sqlite3-api-opfs.js.
access to the sqlite3 JS/WASM bits, so any bits which it needs (most
notably SQLITE_xxx integer codes) have to be imported into it via an
initialization process.
+
+ This file represents an implementation detail of a larger piece of
+ code, and not a public interface. Its details may change at any time
+ and are not intended to be used by any client-level code.
*/
-'use strict';
+"use strict";
const toss = function(...args){throw new Error(args.join(' '))};
if(self.window === self){
toss("This code cannot run from the main thread.",
/**
Returns the sync access handle associated with the given file
- handle object (which must be a valid handle object), lazily opening
- it if needed.
+ handle object (which must be a valid handle object, as created by
+ xOpen()), lazily opening it if needed.
In order to help alleviate cross-tab contention for a dabase,
if an exception is thrown while acquiring the handle, this routine
let i = 1, ms = 300;
for(; true; ms *= ++i){
try {
- //if(1===i) toss("Just testing.");
+ //if(i<3) toss("Just testing.");
//TODO? A config option which tells it to throw here
//randomly every now and then, for testing purposes.
fh.syncHandle = await fh.fileHandle.createSyncAccessHandle();
}
warn("Error getting sync handle. Waiting",ms,
"ms and trying again.",fh.filenameAbs,e);
- Atomics.wait(state.sabOPView, state.opIds.xSleep, 0, ms);
+ Atomics.wait(state.sabOPView, state.opIds.retry, 0, ms);
}
}
log("Got sync handle for",fh.filenameAbs,'in',performance.now() - t,'ms');
storeAndNotify('xAccess', rc);
mTimeEnd();
},
- xClose: async function(fid){
+ xClose: async function(fid/*sqlite3_file pointer*/){
const opName = 'xClose';
mTimeStart(opName);
const fh = __openFiles[fid];
wTimeEnd();
return rc;
},
- xFileSize: async function(fid){
+ xFileSize: async function(fid/*sqlite3_file pointer*/){
mTimeStart('xFileSize');
const fh = __openFiles[fid];
let sz;
storeAndNotify('xFileSize', sz);
mTimeEnd();
},
- xLock: async function(fid,lockType){
+ xLock: async function(fid/*sqlite3_file pointer*/,
+ lockType/*SQLITE_LOCK_...*/){
mTimeStart('xLock');
const fh = __openFiles[fid];
let rc = 0;
storeAndNotify('xLock',rc);
mTimeEnd();
},
- xOpen: async function(fid/*sqlite3_file pointer*/, filename, flags){
+ xOpen: async function(fid/*sqlite3_file pointer*/, filename,
+ flags/*SQLITE_OPEN_...*/){
const opName = 'xOpen';
mTimeStart(opName);
const deleteOnClose = (state.sq3Codes.SQLITE_OPEN_DELETEONCLOSE & flags);
}
mTimeEnd();
},
- xRead: async function(fid,n,offset){
+ xRead: async function(fid/*sqlite3_file pointer*/,n,offset64){
mTimeStart('xRead');
let rc = 0, nRead;
const fh = __openFiles[fid];
wTimeStart('xRead');
nRead = (await getSyncHandle(fh)).read(
fh.sabView.subarray(0, n),
- {at: Number(offset)}
+ {at: Number(offset64)}
);
wTimeEnd();
if(nRead < n){/* Zero-fill remaining bytes */
storeAndNotify('xRead',rc);
mTimeEnd();
},
- xSync: async function(fid,flags/*ignored*/){
+ xSync: async function(fid/*sqlite3_file pointer*/,flags/*ignored*/){
mTimeStart('xSync');
const fh = __openFiles[fid];
let rc = 0;
storeAndNotify('xSync',rc);
mTimeEnd();
},
- xTruncate: async function(fid,size){
+ xTruncate: async function(fid/*sqlite3_file pointer*/,size){
mTimeStart('xTruncate');
let rc = 0;
const fh = __openFiles[fid];
storeAndNotify('xTruncate',rc);
mTimeEnd();
},
- xUnlock: async function(fid,lockType){
+ xUnlock: async function(fid/*sqlite3_file pointer*/,
+ lockType/*SQLITE_LOCK_...*/){
mTimeStart('xUnlock');
let rc = 0;
const fh = __openFiles[fid];
storeAndNotify('xUnlock',rc);
mTimeEnd();
},
- xWrite: async function(fid,n,offset){
+ xWrite: async function(fid/*sqlite3_file pointer*/,n,offset64){
mTimeStart('xWrite');
let rc;
wTimeStart('xWrite');
rc = (
n === (await getSyncHandle(fh))
.write(fh.sabView.subarray(0, n),
- {at: Number(offset)})
+ {at: Number(offset64)})
) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE;
}catch(e){
error("xWrite():",e,fh);