}
};
+ opfsUtil.initOptions = function(options, callee){
+ options = util.nu(options);
+ const urlParams = new URL(globalThis.location.href).searchParams;
+ if(urlParams.has('opfs-disable')){
+ //sqlite3.config.warn('Explicitly not installing "opfs" VFS due to opfs-disable flag.');
+ options.disableOpfs = true;
+ return options;
+ }
+ if(undefined===options.verbose){
+ options.verbose = urlParams.has('opfs-verbose')
+ ? (+urlParams.get('opfs-verbose') || 2) : 1;
+ }
+ if(undefined===options.sanityChecks){
+ options.sanityChecks = urlParams.has('opfs-sanity-check');
+ }
+ if(undefined===options.proxyUri){
+ options.proxyUri = callee.defaultProxyUri;
+ }
+ if('function' === typeof options.proxyUri){
+ options.proxyUri = options.proxyUri();
+ }
+ return options;
+ };
+
/**
- Populates the main state object used by "opfs" and "opfs-wl", and
+ Creates and populates the main state object used by "opfs" and "opfs-wl", and
transfered from those to their async counterpart.
- State which we send to the async-api Worker or share with it.
- This object must initially contain only cloneable or sharable
- objects. After the worker's "inited" message arrives, other types
- of data may be added to it.
-
- For purposes of Atomics.wait() and Atomics.notify(), we use a
- SharedArrayBuffer with one slot reserved for each of the API
- proxy's methods. The sync side of the API uses Atomics.wait()
- on the corresponding slot and the async side uses
- Atomics.notify() on that slot.
-
- The approach of using a single SAB to serialize comms for all
- instances might(?) lead to deadlock situations in multi-db
- cases. We should probably have one SAB here with a single slot
- for locking a per-file initialization step and then allocate a
- separate SAB like the above one for each file. That will
- require a bit of acrobatics but should be feasible. The most
- problematic part is that xOpen() would have to use
- postMessage() to communicate its SharedArrayBuffer, and mixing
- that approach with Atomics.wait/notify() gets a bit messy.
+ Returns an object containing state which we send to the async-api
+ Worker or share with it.
+
+ Because the returned object must be serializable to be posted to
+ the async proxy, after this returns, the caller must:
+
+ - Make a local-scope reference of state.vfs then (delete
+ state.vfs). That's the capi.sqlite3_vfs instance for the VFS.
+
+ This object must, when it's passed to the async part, contain
+ only cloneable or sharable objects. After the worker's "inited"
+ message arrives, other types of data may be added to it.
*/
- opfsUtil.createVfsStateObject = function(opfsVfs){
- if( !(opfsVfs instanceof capi.sqlite3_vfs) ){
- toss("Expecting a sqlite3_vfs instance");
- }
- const vfsName = wasm.cstrToJs(opfsVfs.$zName);
- const isWebLocker = 'opfs-wl'===vfsName;
+ opfsUtil.createVfsState = function(vfsName, options){
const state = util.nu();
+ state.verbose = options.verbose;
+
+ const opfsVfs = state.vfs = new capi.sqlite3_vfs();
+ const opfsIoMethods = opfsVfs.ioMethods = new capi.sqlite3_io_methods();
+
+ opfsIoMethods.$iVersion = 1;
+ opfsVfs.$iVersion = 2/*yes, two*/;
+ opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
+ opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
+ is undocumented/unspecified. */;
+ opfsVfs.$zName = wasm.allocCString(vfsName);
+ opfsVfs.addOnDispose(
+ '$zName', opfsVfs.$zName, opfsIoMethods
+ /**
+ Pedantic sidebar: the entries in this array are items to
+ clean up when opfsVfs.dispose() is called, but in this
+ environment it will never be called. The VFS instance simply
+ hangs around until the WASM module instance is cleaned up. We
+ "could" _hypothetically_ clean it up by "importing" an
+ sqlite3_os_end() impl into the wasm build, but the shutdown
+ order of the wasm engine and the JS one are undefined so
+ there is no guaranty that the opfsVfs instance would be
+ available in one environment or the other when
+ sqlite3_os_end() is called (_if_ it gets called at all in a
+ wasm build, which is undefined). i.e. addOnDispose() here is
+ a matter of "correctness", not necessity. It just wouldn't do
+ to leave the impression that we're blindly leaking memory.
+ */
+ );
+
+ const isWebLocker = 'opfs-wl'===vfsName;
opfsVfs.metrics = util.nu({
counters: util.nu(),
dump: function(){
"\nTotal of",n,"op(s) for",t,
"ms (incl. "+w+" ms of waiting on the async side)");
sqlite3.config.log("Serialization metrics:",opfsVfs.metrics.counters.s11n);
- //W.postMessage({type:'opfs-async-metrics'});
+ opfsVfs.worker?.postMessage?.({type:'opfs-async-metrics'});
},
reset: function(){
let k;
state.fileBufferSize/* file i/o block */
+ state.sabS11nSize/* argument/result serialization block */
);
+
+ /**
+ For purposes of Atomics.wait() and Atomics.notify(), we use a
+ SharedArrayBuffer with one slot reserved for each of the API
+ proxy's methods. The sync side of the API uses Atomics.wait()
+ on the corresponding slot and the async side uses
+ Atomics.notify() on that slot. state.opIds holds the SAB slot
+ IDs of each of those.
+ */
state.opIds = Object.create(null);
{
/*
for that, doing so might lead to undesired side effects. */
state.opIds.retry = i++;
- /* Slots for submitting the lock type and receiving its acknowledgement.
- Only used by "opfs-wl". */
state.lock = util.nu({
+ /* Slots for submitting the lock type and receiving its
+ acknowledgement. Only used by "opfs-wl". */
type: i++ /* SQLITE_LOCK_xyz value */,
atomicsHandshake: i++ /* 0=pending, 1=release, 2=granted */
});
});
opfsVfs.metrics.reset();
-//#if not defined nope
-//#// does not yet work this way
-//#define vfs.metrics.enable
const metrics = opfsVfs.metrics.counters;
-//#include api/opfs-common-inline.c-pp.js
+
+ /**
+ Runs the given operation (by name) in the async worker
+ counterpart, waits for its response, and returns the result
+ which the async worker writes to SAB[state.opIds.rc]. The
+ 2nd and subsequent arguments must be the arguments for the
+ async op.
+ */
+ const opRun = opfsVfs.opRun = (op,...args)=>{
+ const opNdx = state.opIds[op] || toss("Invalid op ID:",op);
+ state.s11n.serialize(...args);
+ Atomics.store(state.sabOPView, state.opIds.rc, -1);
+ Atomics.store(state.sabOPView, state.opIds.whichOp, opNdx);
+ Atomics.notify(state.sabOPView, state.opIds.whichOp)
+ /* async thread will take over here */;
+ const t = performance.now();
+ while('not-equal'!==Atomics.wait(state.sabOPView, state.opIds.rc, -1)){
+ /*
+ The reason for this loop is buried in the details of a long
+ discussion at:
+
+ https://github.com/sqlite/sqlite-wasm/issues/12
+
+ Summary: in at least one browser flavor, under high loads,
+ the wait()/notify() pairings can get out of sync. Calling
+ wait() here until it returns 'not-equal' gets them back in
+ sync.
+ */
+ }
+ /* When the above wait() call returns 'not-equal', the async
+ half will have completed the operation and reported its results
+ in the state.opIds.rc slot of the SAB. */
+ const rc = Atomics.load(state.sabOPView, state.opIds.rc);
+ metrics[op].wait += performance.now() - t;
+ if(rc && state.asyncS11nExceptions){
+ const err = state.s11n.deserialize();
+ if(err) error(op+"() async error:",...err);
+ }
+ return rc;
+ };
+
+ const opTimer = Object.create(null);
+ opTimer.op = undefined;
+ opTimer.start = undefined;
+ const mTimeStart = opfsVfs.mTimeStart = (op)=>{
+ opTimer.start = performance.now();
+ opTimer.op = op;
+ ++metrics[op].count;
+ };
+ const mTimeEnd = opfsVfs.mTimeEnd = ()=>(
+ metrics[opTimer.op].time += performance.now() - opTimer.start
+ );
+
+ /**
+ Map of sqlite3_file pointers to objects constructed by xOpen().
+ */
+ const __openFiles = opfsVfs.__openFiles = Object.create(null);
+
+ /**
+ Impls for the sqlite3_io_methods methods. Maintenance reminder:
+ members are in alphabetical order to simplify finding them.
+ */
+ const ioSyncWrappers = opfsVfs.ioSyncWrappers = util.nu({
+ xCheckReservedLock: function(pFile,pOut){
+ /**
+ After consultation with a topic expert: "opfs-wl" will
+ continue to use the same no-op impl which "opfs" does
+ because:
+
+ - xCheckReservedLock() is just a hint. If SQLite needs to
+ lock, it's still going to try to lock.
+
+ - We cannot do this check synchronously in "opfs-wl",
+ so would need to pass it to the async proxy. That would
+ make it inordinately expensive considering that it's
+ just a hint.
+ */
+ wasm.poke(pOut, 0, 'i32');
+ return 0;
+ },
+ xClose: function(pFile){
+ mTimeStart('xClose');
+ let rc = 0;
+ const f = __openFiles[pFile];
+ if(f){
+ delete __openFiles[pFile];
+ rc = opRun('xClose', pFile);
+ if(f.sq3File) f.sq3File.dispose();
+ }
+ mTimeEnd();
+ return rc;
+ },
+ xDeviceCharacteristics: function(pFile){
+ return capi.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN;
+ },
+ xFileControl: function(pFile, opId, pArg){
+ /*mTimeStart('xFileControl');
+ mTimeEnd();*/
+ return capi.SQLITE_NOTFOUND;
+ },
+ xFileSize: function(pFile,pSz64){
+ mTimeStart('xFileSize');
+ let rc = opRun('xFileSize', pFile);
+ if(0==rc){
+ try {
+ const sz = state.s11n.deserialize()[0];
+ wasm.poke(pSz64, sz, 'i64');
+ }catch(e){
+ error("Unexpected error reading xFileSize() result:",e);
+ rc = state.sq3Codes.SQLITE_IOERR;
+ }
+ }
+ mTimeEnd();
+ return rc;
+ },
+ xRead: function(pFile,pDest,n,offset64){
+ mTimeStart('xRead');
+ const f = __openFiles[pFile];
+ let rc;
+ try {
+ rc = opRun('xRead',pFile, n, Number(offset64));
+ if(0===rc || capi.SQLITE_IOERR_SHORT_READ===rc){
+ /**
+ Results get written to the SharedArrayBuffer f.sabView.
+ Because the heap is _not_ a SharedArrayBuffer, we have
+ to copy the results. TypedArray.set() seems to be the
+ fastest way to copy this. */
+ wasm.heap8u().set(f.sabView.subarray(0, n), Number(pDest));
+ }
+ }catch(e){
+ error("xRead(",arguments,") failed:",e,f);
+ rc = capi.SQLITE_IOERR_READ;
+ }
+ mTimeEnd();
+ return rc;
+ },
+ xSync: function(pFile,flags){
+ mTimeStart('xSync');
+ ++metrics.xSync.count;
+ const rc = opRun('xSync', pFile, flags);
+ mTimeEnd();
+ return rc;
+ },
+ xTruncate: function(pFile,sz64){
+ mTimeStart('xTruncate');
+ const rc = opRun('xTruncate', pFile, Number(sz64));
+ mTimeEnd();
+ return rc;
+ },
+ xWrite: function(pFile,pSrc,n,offset64){
+ mTimeStart('xWrite');
+ const f = __openFiles[pFile];
+ let rc;
+ try {
+ f.sabView.set(wasm.heap8u().subarray(
+ Number(pSrc), Number(pSrc) + n
+ ));
+ rc = opRun('xWrite', pFile, n, Number(offset64));
+ }catch(e){
+ error("xWrite(",arguments,") failed:",e,f);
+ rc = capi.SQLITE_IOERR_WRITE;
+ }
+ mTimeEnd();
+ return rc;
+ }
+ })/*ioSyncWrappers*/;
+
+ /**
+ Impls for the sqlite3_vfs methods. Maintenance reminder: members
+ are in alphabetical order to simplify finding them.
+ */
+ const vfsSyncWrappers = opfsVfs.vfsSyncWrappers = {
+ xAccess: function(pVfs,zName,flags,pOut){
+ mTimeStart('xAccess');
+ const rc = opRun('xAccess', wasm.cstrToJs(zName));
+ wasm.poke( pOut, (rc ? 0 : 1), 'i32' );
+ mTimeEnd();
+ return 0;
+ },
+ xCurrentTime: function(pVfs,pOut){
+ wasm.poke(pOut, 2440587.5 + (new Date().getTime()/86400000),
+ 'double');
+ return 0;
+ },
+ xCurrentTimeInt64: function(pVfs,pOut){
+ wasm.poke(pOut, (2440587.5 * 86400000) + new Date().getTime(),
+ 'i64');
+ return 0;
+ },
+ xDelete: function(pVfs, zName, doSyncDir){
+ mTimeStart('xDelete');
+ const rc = opRun('xDelete', wasm.cstrToJs(zName), doSyncDir, false);
+ mTimeEnd();
+ return rc;
+ },
+ xFullPathname: function(pVfs,zName,nOut,pOut){
+ /* Until/unless we have some notion of "current dir"
+ in OPFS, simply copy zName to pOut... */
+ const i = wasm.cstrncpy(pOut, zName, nOut);
+ return i<nOut ? 0 : capi.SQLITE_CANTOPEN
+ /*CANTOPEN is required by the docs but SQLITE_RANGE would be a closer match*/;
+ },
+ xGetLastError: function(pVfs,nOut,pOut){
+ /* Mutex use in the overlying APIs cause xGetLastError() to
+ not be terribly useful for us. e.g. it can't be used to
+ convey error messages from xOpen() because there would be a
+ race condition between sqlite3_open()'s call to xOpen() and
+ this function. */
+ warn("OPFS xGetLastError() has nothing sensible to return.");
+ return 0;
+ },
+ //xSleep is optionally defined below
+ xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
+ mTimeStart('xOpen');
+ let opfsFlags = 0;
+ if(0===zName){
+ zName = opfsUtil.randomFilename();
+ }else if(wasm.isPtr(zName)){
+ if(capi.sqlite3_uri_boolean(zName, "opfs-unlock-asap", 0)){
+ /* -----------------------^^^^^ MUST pass the untranslated
+ C-string here. */
+ opfsFlags |= state.opfsFlags.OPFS_UNLOCK_ASAP;
+ }
+ if(capi.sqlite3_uri_boolean(zName, "delete-before-open", 0)){
+ opfsFlags |= state.opfsFlags.OPFS_UNLINK_BEFORE_OPEN;
+ }
+ zName = wasm.cstrToJs(zName);
+ //warn("xOpen zName =",zName, "opfsFlags =",opfsFlags);
+ }
+ const fh = Object.create(null);
+ fh.fid = pFile;
+ fh.filename = wasm.cstrToJs(zName);
+ fh.sab = new SharedArrayBuffer(state.fileBufferSize);
+ fh.flags = flags;
+ fh.readOnly = !(capi.SQLITE_OPEN_CREATE & flags)
+ && !!(flags & capi.SQLITE_OPEN_READONLY);
+ const rc = opRun('xOpen', pFile, zName, flags, opfsFlags);
+ if(!rc){
+ /* Recall that sqlite3_vfs::xClose() will be called, even on
+ error, unless pFile->pMethods is NULL. */
+ if(fh.readOnly){
+ wasm.poke(pOutFlags, capi.SQLITE_OPEN_READONLY, 'i32');
+ }
+ __openFiles[pFile] = fh;
+ fh.sabView = state.sabFileBufView;
+ fh.sq3File = new capi.sqlite3_file(pFile);
+ fh.sq3File.$pMethods = opfsIoMethods.pointer;
+ fh.lockType = capi.SQLITE_LOCK_NONE;
+ }
+ mTimeEnd();
+ return rc;
+ }/*xOpen()*/
+ }/*vfsSyncWrappers*/;
+
+ const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
+ if(pDVfs){
+ const dVfs = new capi.sqlite3_vfs(pDVfs);
+ opfsVfs.$xRandomness = dVfs.$xRandomness;
+ opfsVfs.$xSleep = dVfs.$xSleep;
+ dVfs.dispose();
+ }
+ if(!opfsVfs.$xRandomness){
+ /* If the default VFS has no xRandomness(), add a basic JS impl... */
+ opfsVfs.vfsSyncWrappers.xRandomness = function(pVfs, nOut, pOut){
+ const heap = wasm.heap8u();
+ let i = 0;
+ const npOut = Number(pOut);
+ for(; i < nOut; ++i) heap[npOut + i] = (Math.random()*255000) & 0xFF;
+ return i;
+ };
+ }
+ if(!opfsVfs.$xSleep){
+ /* If we can inherit an xSleep() impl from the default VFS then
+ assume it's sane and use it, otherwise install a JS-based
+ one. */
+ opfsVfs.vfsSyncWrappers.xSleep = function(pVfs,ms){
+ mTimeStart('xSleep');
+ Atomics.wait(state.sabOPView, state.opIds.xSleep, 0, ms);
+ mTimeEnd();
+ return 0;
+ };
+ }
+
+//#define vfs.metrics.enable
//#// import initS11n()
+//#include api/opfs-common-inline.c-pp.js
//#undef vfs.metrics.enable
opfsVfs.initS11n = initS11n;
-//#endif
return state;
- }/*createVfsStateObject()*/;
+ }/*createVfsState()*/;
}/*sqlite3ApiBootstrap.initializers*/);
//#endif target:node
*/
'use strict';
globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
+ const util = sqlite3.util,
+ toss = sqlite3.util.toss;
+ const opfsUtil = sqlite3.opfs || sqlite3.util.toss("Missing sqlite3.opfs")
/* These get removed from sqlite3 during bootstrap, so we need an
- early reference to it. */
- const util = sqlite3.util;
- const opfsUtil = sqlite3.opfs || sqlite3.util.toss("Missing sqlite3.opfs");
+ early reference to it. */;
/**
installOpfsWlVfs() returns a Promise which, on success, installs an
additionally enables debugging info. Logging is performed
via the sqlite3.config.{log|warn|error}() functions.
- On success, the Promise resolves to the top-most sqlite3 namespace
- object.
- Code-diver notes: this file is particularly sparse on documentation
- because much of it is identical to the code in
- sqlite3-vfs-opfs.c-pp.js. See that file for more details.
+ On success, the Promise resolves to the top-most sqlite3 namespace
+ object. Success does not necessarily mean that it installs the VFS,
+ as there are legitimate non-error reasons for OPFS not to be
+ available.
*/
const installOpfsWlVfs = function callee(options){
try{
}catch(e){
return Promise.reject(e);
}
- options = util.nu(options);
- const urlParams = new URL(globalThis.location.href).searchParams;
- if(urlParams.has('opfs-disable')){
- //sqlite3.config.warn('Explicitly not installing 'opfs-wl' VFS due to opfs-disable flag.');
+ options = opfsUtil.initOptions(options, callee);
+ if( options.disableOpfs ){
return Promise.resolve(sqlite3);
}
- if(undefined===options.verbose){
- options.verbose = urlParams.has('opfs-verbose')
- ? (+urlParams.get('opfs-verbose') || 2) : 1;
- }
- if(undefined===options.sanityChecks){
- options.sanityChecks = urlParams.has('opfs-sanity-check');
- }
- if(undefined===options.proxyUri){
- options.proxyUri = callee.defaultProxyUri;
- }
- if('function' === typeof options.proxyUri){
- options.proxyUri = options.proxyUri();
- }
+
const thePromise = new Promise(function(promiseResolve_, promiseReject_){
const loggers = [
sqlite3.config.error,
const logImpl = (level,...args)=>{
if(options.verbose>level) loggers[level]("OPFS syncer:",...args);
};
- const log = (...args)=>logImpl(2, ...args);
- const warn = (...args)=>logImpl(1, ...args);
- const error = (...args)=>logImpl(0, ...args);
- const toss = sqlite3.util.toss;
- const capi = sqlite3.capi;
- const wasm = sqlite3.wasm;
- const sqlite3_vfs = capi.sqlite3_vfs;
- const sqlite3_file = capi.sqlite3_file;
- const sqlite3_io_methods = capi.sqlite3_io_methods;
- const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
- const dVfs = pDVfs
- ? new sqlite3_vfs(pDVfs)
- : null /* dVfs will be null when sqlite3 is built with
- SQLITE_OS_OTHER. */;
- const opfsIoMethods = new sqlite3_io_methods();
- const opfsVfs = new sqlite3_vfs()
- .addOnDispose( ()=>opfsIoMethods.dispose());
- opfsIoMethods.$iVersion = 1;
- opfsVfs.$iVersion = 2/*yes, two*/;
- opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
- opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
- is undocumented/unspecified. */;
- opfsVfs.$zName = wasm.allocCString('opfs-wl');
- opfsVfs.addOnDispose(
- '$zName', opfsVfs.$zName,
- 'cleanup default VFS wrapper', ()=>(dVfs ? dVfs.dispose() : null)
- );
- const state = opfsUtil.createVfsStateObject(opfsVfs);
- state.verbose = options.verbose;
- const metrics = opfsVfs.metrics.counters;
+ const log = (...args)=>logImpl(2, ...args),
+ warn = (...args)=>logImpl(1, ...args),
+ error = (...args)=>logImpl(0, ...args),
+ capi = sqlite3.capi,
+ wasm = sqlite3.wasm;
+ const state = opfsUtil.createVfsState('opfs-wl', options),
+ opfsVfs = state.vfs,
+ metrics = opfsVfs.metrics.counters,
+ mTimeStart = opfsVfs.mTimeStart,
+ mTimeEnd = opfsVfs.mTimeEnd,
+ __openFiles = opfsVfs.__openFiles;
+ delete state.vfs;
+
+ /* At this point, createVfsState() has populated state and
+ opfsVfs with any code common to both the "opfs" and "opfs-wl"
+ VFSes. Now comes the VFS-dependent work... */
+
+ opfsVfs.ioSyncWrappers.xLock = function(pFile, lockType){
+ mTimeStart('xLock');
+ ++metrics.xLock.count;
+ const f = __openFiles[pFile];
+ let rc = 0;
+ /* All OPFS locks are exclusive locks. If xLock() has
+ previously succeeded, do nothing except record the lock
+ type. If no lock is active, have the async counterpart
+ lock the file. */
+ if( f.lockType ) {
+ f.lockType = lockType;
+ }else{
+ try{
+ const view = state.sabOPView;
+ /* We need to pass pFile's name to the async proxy so that
+ it can create the WebLock name. */
+ state.s11n.serialize(f.filename)
+ Atomics.store(view, state.lock.atomicsHandshake, 0);
+ Atomics.store(view, state.lock.type, lockType);
+ Atomics.store(view, state.opIds.whichOp, state.opIds.lockControl);
+ Atomics.notify(state.sabOPView, state.opIds.whichOp)
+ while('not-equal'!==Atomics.wait(view, state.lock.atomicsHandshake, 0)){
+ /* Loop is a workaround for environment-specific quirks. See
+ notes in similar loops. */
+ }
+ f.lockType = lockType;
+ }catch(e){
+ error("xLock(",arguments,") failed", e, f);
+ rc = capi.SQLITE_IOERR_LOCK;
+ }
+ }
+ mTimeEnd();
+ return rc;
+ };
+
+ opfsVfs.ioSyncWrappers.xUnlock =function(pFile,lockType){
+ mTimeStart('xUnlock');
+ ++metrics.xUnlock.count;
+ const f = __openFiles[pFile];
+ let rc = 0;
+ if( lockType < f.lockType ){
+ try{
+ const view = state.sabOPView;
+ Atomics.store(view, state.lock.atomicsHandshake, 1);
+ Atomics.notify(view, state.lock.atomicsHandshake);
+ Atomics.wait(view, state.lock.atomicsHandshake, 1);
+ }catch(e){
+ error("xUnlock(",pFile,lockType,") failed",e, f);
+ rc = capi.SQLITE_IOERR_LOCK;
+ }
+ }
+ if( 0===rc ) f.lockType = lockType;
+ mTimeEnd();
+ return rc;
+ };
+
+
let promiseWasRejected = undefined;
const promiseReject = (err)=>{
return promiseResolve_(sqlite3);
};
options.proxyUri += '?vfs=opfs-wl';
- const W =
+ const W = opfsVfs.worker =
//#if target:es6-bundler-friendly
new Worker(new URL("sqlite3-opfs-async-proxy.js?vfs=opfs-wl", import.meta.url));
//#elif target:es6-module
promiseReject(new Error("Loading OPFS async Worker failed for unknown reasons."));
};
- /**
- Runs the given operation (by name) in the async worker
- counterpart, waits for its response, and returns the result
- which the async worker writes to SAB[state.opIds.rc]. The
- 2nd and subsequent arguments must be the arguments for the
- async op.
- */
- const opRun = (op,...args)=>{
- const opNdx = state.opIds[op] || toss("Invalid op ID:",op);
- state.s11n.serialize(...args);
- Atomics.store(state.sabOPView, state.opIds.rc, -1);
- Atomics.store(state.sabOPView, state.opIds.whichOp, opNdx);
- Atomics.notify(state.sabOPView, state.opIds.whichOp)
- /* async thread will take over here */;
- const t = performance.now();
- while('not-equal'!==Atomics.wait(state.sabOPView, state.opIds.rc, -1)){
- /*
- The reason for this loop is buried in the details of a long
- discussion at:
-
- https://github.com/sqlite/sqlite-wasm/issues/12
-
- Summary: in at least one browser flavor, under high loads,
- the wait()/notify() pairings can get out of sync. Calling
- wait() here until it returns 'not-equal' gets them back in
- sync.
- */
- }
- /* When the above wait() call returns 'not-equal', the async
- half will have completed the operation and reported its results
- in the state.opIds.rc slot of the SAB. */
- const rc = Atomics.load(state.sabOPView, state.opIds.rc);
- metrics[op].wait += performance.now() - t;
- if(rc && state.asyncS11nExceptions){
- const err = state.s11n.deserialize();
- if(err) error(op+"() async error:",...err);
- }
- return rc;
- };
-
-//#if nope
+ const opRun = opfsVfs.opRun;
/**
Not part of the public API. Only for test/development use.
*/
W.postMessage({type: 'opfs-async-restart'});
}
};
-//#endif
-
- /**
- Map of sqlite3_file pointers to objects constructed by xOpen().
- */
- const __openFiles = Object.create(null);
-
- const opTimer = Object.create(null);
- opTimer.op = undefined;
- opTimer.start = undefined;
- const mTimeStart = (op)=>{
- opTimer.start = performance.now();
- opTimer.op = op;
- ++metrics[op].count;
- };
- const mTimeEnd = ()=>(
- metrics[opTimer.op].time += performance.now() - opTimer.start
- );
-
- /**
- Impls for the sqlite3_io_methods methods. Maintenance reminder:
- members are in alphabetical order to simplify finding them.
- */
- const ioSyncWrappers = {
- xCheckReservedLock: function(pFile,pOut){
- /**
- After consultation with a topic expert: "opfs-wl" will
- continue to use the same no-op impl which "opfs" does
- because:
-
- - xCheckReservedLock() is just a hint. If SQLite needs to
- lock, it's still going to try to lock.
-
- - We cannot do this check synchronously in "opfs-wl",
- so would need to pass it to the async proxy. That would
- make it inordinately expensive considering that it's
- just a hint.
- */
- wasm.poke(pOut, 0, 'i32');
- return 0;
- },
- xClose: function(pFile){
- mTimeStart('xClose');
- let rc = 0;
- const f = __openFiles[pFile];
- if(f){
- delete __openFiles[pFile];
- rc = opRun('xClose', pFile);
- if(f.sq3File) f.sq3File.dispose();
- }
- mTimeEnd();
- return rc;
- },
- xDeviceCharacteristics: function(pFile){
- return capi.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN;
- },
- xFileControl: function(pFile, opId, pArg){
- /*mTimeStart('xFileControl');
- mTimeEnd();*/
- return capi.SQLITE_NOTFOUND;
- },
- xFileSize: function(pFile,pSz64){
- mTimeStart('xFileSize');
- let rc = opRun('xFileSize', pFile);
- if(0==rc){
- try {
- const sz = state.s11n.deserialize()[0];
- wasm.poke(pSz64, sz, 'i64');
- }catch(e){
- error("Unexpected error reading xFileSize() result:",e);
- rc = state.sq3Codes.SQLITE_IOERR;
- }
- }
- mTimeEnd();
- return rc;
- },
- xLock: function(pFile, lockType){
- mTimeStart('xLock');
- const f = __openFiles[pFile];
- let rc = 0;
- /* All OPFS locks are exclusive locks. If xLock() has
- previously succeeded, do nothing except record the lock
- type. If no lock is active, have the async counterpart
- lock the file. */
- if( !f.lockType ) {
- try{
- const view = state.sabOPView;
- /* We need to pass pFile's name through so that the other
- side can create the WebLock name. */
- state.s11n.serialize(f.filename)
- Atomics.store(view, state.lock.atomicsHandshake, 0);
- Atomics.store(view, state.lock.type, lockType);
- Atomics.store(view, state.opIds.whichOp, state.opIds.lockControl);
- Atomics.notify(state.sabOPView, state.opIds.whichOp)
- while('not-equal'!==Atomics.wait(view, state.lock.atomicsHandshake, 0)){
- /* Loop is a workaround for environment-specific quirks. See
- notes in similar loops. */
- }
- f.lockType = lockType;
- }catch(e){
- error("xLock(",arguments,") failed", e, f);
- rc = capi.SQLITE_IOERR_LOCK;
- }
- }else{
- f.lockType = lockType;
- }
- mTimeEnd();
- return rc;
- },
- xRead: function(pFile,pDest,n,offset64){
- mTimeStart('xRead');
- const f = __openFiles[pFile];
- let rc;
- try {
- rc = opRun('xRead',pFile, n, Number(offset64));
- if(0===rc || capi.SQLITE_IOERR_SHORT_READ===rc){
- /**
- Results get written to the SharedArrayBuffer f.sabView.
- Because the heap is _not_ a SharedArrayBuffer, we have
- to copy the results. TypedArray.set() seems to be the
- fastest way to copy this. */
- wasm.heap8u().set(f.sabView.subarray(0, n), Number(pDest));
- }
- }catch(e){
- error("xRead(",arguments,") failed:",e,f);
- rc = capi.SQLITE_IOERR_READ;
- }
- mTimeEnd();
- return rc;
- },
- xSync: function(pFile,flags){
- mTimeStart('xSync');
- ++metrics.xSync.count;
- const rc = opRun('xSync', pFile, flags);
- mTimeEnd();
- return rc;
- },
- xTruncate: function(pFile,sz64){
- mTimeStart('xTruncate');
- const rc = opRun('xTruncate', pFile, Number(sz64));
- mTimeEnd();
- return rc;
- },
- xUnlock: function(pFile,lockType){
- mTimeStart('xUnlock');
- const f = __openFiles[pFile];
- let rc = 0;
- if( lockType < f.lockType ){
- try{
- const view = state.sabOPView;
- Atomics.store(view, state.lock.atomicsHandshake, 1);
- Atomics.notify(view, state.lock.atomicsHandshake);
- Atomics.wait(view, state.lock.atomicsHandshake, 1);
- }catch(e){
- error("xUnlock(",pFile,lockType,") failed",e, f);
- rc = capi.SQLITE_IOERR_LOCK;
- }
- }
- if( 0===rc ) f.lockType = lockType;
- mTimeEnd();
- return rc;
- },
- xWrite: function(pFile,pSrc,n,offset64){
- mTimeStart('xWrite');
- const f = __openFiles[pFile];
- let rc;
- try {
- f.sabView.set(wasm.heap8u().subarray(
- Number(pSrc), Number(pSrc) + n
- ));
- rc = opRun('xWrite', pFile, n, Number(offset64));
- }catch(e){
- error("xWrite(",arguments,") failed:",e,f);
- rc = capi.SQLITE_IOERR_WRITE;
- }
- mTimeEnd();
- return rc;
- }
- }/*ioSyncWrappers*/;
-
- /**
- Impls for the sqlite3_vfs methods. Maintenance reminder: members
- are in alphabetical order to simplify finding them.
- */
- const vfsSyncWrappers = {
- xAccess: function(pVfs,zName,flags,pOut){
- mTimeStart('xAccess');
- const rc = opRun('xAccess', wasm.cstrToJs(zName));
- wasm.poke( pOut, (rc ? 0 : 1), 'i32' );
- mTimeEnd();
- return 0;
- },
- xCurrentTime: function(pVfs,pOut){
- /* If it turns out that we need to adjust for timezone, see:
- https://stackoverflow.com/a/11760121/1458521 */
- wasm.poke(pOut, 2440587.5 + (new Date().getTime()/86400000),
- 'double');
- return 0;
- },
- xCurrentTimeInt64: function(pVfs,pOut){
- wasm.poke(pOut, (2440587.5 * 86400000) + new Date().getTime(),
- 'i64');
- return 0;
- },
- xDelete: function(pVfs, zName, doSyncDir){
- mTimeStart('xDelete');
- const rc = opRun('xDelete', wasm.cstrToJs(zName), doSyncDir, false);
- mTimeEnd();
- return rc;
- },
- xFullPathname: function(pVfs,zName,nOut,pOut){
- /* Until/unless we have some notion of "current dir"
- in OPFS, simply copy zName to pOut... */
- const i = wasm.cstrncpy(pOut, zName, nOut);
- return i<nOut ? 0 : capi.SQLITE_CANTOPEN
- /*CANTOPEN is required by the docs but SQLITE_RANGE would be a closer match*/;
- },
- xGetLastError: function(pVfs,nOut,pOut){
- /* TODO: store exception.message values from the async
- partner in a dedicated SharedArrayBuffer, noting that we'd have
- to encode them... TextEncoder can do that for us. */
- warn("OPFS xGetLastError() has nothing sensible to return.");
- return 0;
- },
- //xSleep is optionally defined below
- xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
- mTimeStart('xOpen');
- let opfsFlags = 0;
- if(0===zName){
- zName = opfsUtil.randomFilename();
- }else if(wasm.isPtr(zName)){
- if(capi.sqlite3_uri_boolean(zName, "opfs-unlock-asap", 0)){
- /* -----------------------^^^^^ MUST pass the untranslated
- C-string here. */
- opfsFlags |= state.opfsFlags.OPFS_UNLOCK_ASAP;
- }
- if(capi.sqlite3_uri_boolean(zName, "delete-before-open", 0)){
- opfsFlags |= state.opfsFlags.OPFS_UNLINK_BEFORE_OPEN;
- }
- zName = wasm.cstrToJs(zName);
- //warn("xOpen zName =",zName, "opfsFlags =",opfsFlags);
- }
- const fh = Object.create(null);
- fh.fid = pFile;
- fh.filename = wasm.cstrToJs(zName);
- fh.sab = new SharedArrayBuffer(state.fileBufferSize);
- fh.flags = flags;
- fh.readOnly = !(capi.SQLITE_OPEN_CREATE & flags)
- && !!(flags & capi.SQLITE_OPEN_READONLY);
- const rc = opRun('xOpen', pFile, zName, flags, opfsFlags);
- if(!rc){
- /* Recall that sqlite3_vfs::xClose() will be called, even on
- error, unless pFile->pMethods is NULL. */
- if(fh.readOnly){
- wasm.poke(pOutFlags, capi.SQLITE_OPEN_READONLY, 'i32');
- }
- __openFiles[pFile] = fh;
- fh.sabView = state.sabFileBufView;
- fh.sq3File = new sqlite3_file(pFile);
- fh.sq3File.$pMethods = opfsIoMethods.pointer;
- fh.lockType = capi.SQLITE_LOCK_NONE;
- }
- mTimeEnd();
- return rc;
- }/*xOpen()*/
- }/*vfsSyncWrappers*/;
-
- if(dVfs){
- opfsVfs.$xRandomness = dVfs.$xRandomness;
- opfsVfs.$xSleep = dVfs.$xSleep;
- }
- if(!opfsVfs.$xRandomness){
- /* If the default VFS has no xRandomness(), add a basic JS impl... */
- vfsSyncWrappers.xRandomness = function(pVfs, nOut, pOut){
- const heap = wasm.heap8u();
- let i = 0;
- const npOut = Number(pOut);
- for(; i < nOut; ++i) heap[npOut + i] = (Math.random()*255000) & 0xFF;
- return i;
- };
- }
- if(!opfsVfs.$xSleep){
- /* If we can inherit an xSleep() impl from the default VFS then
- assume it's sane and use it, otherwise install a JS-based
- one. */
- vfsSyncWrappers.xSleep = function(pVfs,ms){
- Atomics.wait(state.sabOPView, state.opIds.xSleep, 0, ms);
- return 0;
- };
- }
if(sqlite3.oo1){
const OpfsWlDb = function(...args){
const sanityCheck = function(){
const scope = wasm.scopedAllocPush();
- const sq3File = new sqlite3_file();
+ const sq3File = new capi.sqlite3_file();
try{
const fid = sq3File.pointer;
const openFlags = capi.SQLITE_OPEN_CREATE
rc = state.s11n.deserialize();
log("deserialize() says:",rc);
if("This is รค string."!==rc[0]) toss("String d13n error.");
- vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
+ opfsVfs.vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
rc = wasm.peek(pOut,'i32');
log("xAccess(",dbFile,") exists ?=",rc);
- rc = vfsSyncWrappers.xOpen(opfsVfs.pointer, zDbFile,
+ rc = opfsVfs.vfsSyncWrappers.xOpen(opfsVfs.pointer, zDbFile,
fid, openFlags, pOut);
log("open rc =",rc,"state.sabOPView[xOpen] =",
state.sabOPView[state.opIds.xOpen]);
error("open failed with code",rc);
return;
}
- vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
+ opfsVfs.vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
rc = wasm.peek(pOut,'i32');
if(!rc) toss("xAccess() failed to detect file.");
- rc = ioSyncWrappers.xSync(sq3File.pointer, 0);
+ rc = opfsVfs.ioSyncWrappers.xSync(sq3File.pointer, 0);
if(rc) toss('sync failed w/ rc',rc);
- rc = ioSyncWrappers.xTruncate(sq3File.pointer, 1024);
+ rc = opfsVfs.ioSyncWrappers.xTruncate(sq3File.pointer, 1024);
if(rc) toss('truncate failed w/ rc',rc);
wasm.poke(pOut,0,'i64');
- rc = ioSyncWrappers.xFileSize(sq3File.pointer, pOut);
+ rc = opfsVfs.ioSyncWrappers.xFileSize(sq3File.pointer, pOut);
if(rc) toss('xFileSize failed w/ rc',rc);
log("xFileSize says:",wasm.peek(pOut, 'i64'));
- rc = ioSyncWrappers.xWrite(sq3File.pointer, zDbFile, 10, 1);
+ rc = opfsVfs.ioSyncWrappers.xWrite(sq3File.pointer, zDbFile, 10, 1);
if(rc) toss("xWrite() failed!");
const readBuf = wasm.scopedAlloc(16);
- rc = ioSyncWrappers.xRead(sq3File.pointer, readBuf, 6, 2);
+ rc = opfsVfs.ioSyncWrappers.xRead(sq3File.pointer, readBuf, 6, 2);
wasm.poke(readBuf+6,0);
let jRead = wasm.cstrToJs(readBuf);
log("xRead() got:",jRead);
if("sanity"!==jRead) toss("Unexpected xRead() value.");
- if(vfsSyncWrappers.xSleep){
+ if(opfsVfs.vfsSyncWrappers.xSleep){
log("xSleep()ing before close()ing...");
- vfsSyncWrappers.xSleep(opfsVfs.pointer,2000);
+ opfsVfs.vfsSyncWrappers.xSleep(opfsVfs.pointer,2000);
log("waking up from xSleep()");
}
- rc = ioSyncWrappers.xClose(fid);
+ rc = opfsVfs.ioSyncWrappers.xClose(fid);
log("xClose rc =",rc,"sabOPView =",state.sabOPView);
log("Deleting file:",dbFile);
- vfsSyncWrappers.xDelete(opfsVfs.pointer, zDbFile, 0x1234);
- vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
+ opfsVfs.vfsSyncWrappers.xDelete(opfsVfs.pointer, zDbFile, 0x1234);
+ opfsVfs.vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
rc = wasm.peek(pOut,'i32');
if(rc) toss("Expecting 0 from xAccess(",dbFile,") after xDelete().");
warn("End of OPFS sanity checks.");
}
try {
sqlite3.vfs.installVfs({
- io: {struct: opfsIoMethods, methods: ioSyncWrappers},
- vfs: {struct: opfsVfs, methods: vfsSyncWrappers}
+ io: {struct: opfsVfs.ioMethods, methods: opfsVfs.ioSyncWrappers},
+ vfs: {struct: opfsVfs, methods: opfsVfs.vfsSyncWrappers}
});
state.sabOPView = new Int32Array(state.sabOP);
state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize);
*/
'use strict';
globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
+ const util = sqlite3.util,
+ toss = sqlite3.util.toss;
const opfsUtil = sqlite3.opfs || sqlite3.util.toss("Missing sqlite3.opfs")
- /* Gets removed from sqlite3 during bootstrap, so we need an
+ /* These get removed from sqlite3 during bootstrap, so we need an
early reference to it. */;
/**
installOpfsVfs() returns a Promise which, on success, installs an
development of the VFS, not client-side use.
On success, the Promise resolves to the top-most sqlite3 namespace
- object.
+ object. Success does not necessarily mean that it installs the VFS,
+ as there are legitimate non-error reasons for OPFS not to be
+ available.
*/
const installOpfsVfs = function callee(options){
try{
}catch(e){
return Promise.reject(e);
}
- options = Object.assign(Object.create(null), options);
- const urlParams = new URL(globalThis.location.href).searchParams;
- if(urlParams.has('opfs-disable')){
- //sqlite3.config.warn('Explicitly not installing "opfs" VFS due to opfs-disable flag.');
+ options = opfsUtil.initOptions(options, callee);
+ if( options.disableOpfs ){
return Promise.resolve(sqlite3);
}
- if(undefined===options.verbose){
- options.verbose = urlParams.has('opfs-verbose')
- ? (+urlParams.get('opfs-verbose') || 2) : 1;
- }
- if(undefined===options.sanityChecks){
- options.sanityChecks = urlParams.has('opfs-sanity-check');
- }
- if(undefined===options.proxyUri){
- options.proxyUri = callee.defaultProxyUri;
- }
- if('function' === typeof options.proxyUri){
- options.proxyUri = options.proxyUri();
- }
+
//sqlite3.config.warn("OPFS options =",options,globalThis.location);
const thePromise = new Promise(function(promiseResolve_, promiseReject_){
const loggers = [
const logImpl = (level,...args)=>{
if(options.verbose>level) loggers[level]("OPFS syncer:",...args);
};
- const log = (...args)=>logImpl(2, ...args);
- const warn = (...args)=>logImpl(1, ...args);
- const error = (...args)=>logImpl(0, ...args);
- const toss = sqlite3.util.toss;
- const capi = sqlite3.capi;
- const util = sqlite3.util;
- const wasm = sqlite3.wasm;
- const sqlite3_vfs = capi.sqlite3_vfs;
- const sqlite3_file = capi.sqlite3_file;
- const sqlite3_io_methods = capi.sqlite3_io_methods;
-
- const opfsIoMethods = new sqlite3_io_methods();
- const opfsVfs = new sqlite3_vfs()
- .addOnDispose( ()=>opfsIoMethods.dispose());
- const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
- const dVfs = pDVfs
- ? new sqlite3_vfs(pDVfs)
- : null /* dVfs will be null when sqlite3 is built with
- SQLITE_OS_OTHER. */;
-
- opfsIoMethods.$iVersion = 1;
- opfsVfs.$iVersion = 2/*yes, two*/;
- opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
- opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
- is undocumented/unspecified. */;
- opfsVfs.$zName = wasm.allocCString("opfs");
- // All C-side memory of opfsVfs is zeroed out, but just to be explicit:
- opfsVfs.$xDlOpen = opfsVfs.$xDlError = opfsVfs.$xDlSym = opfsVfs.$xDlClose = null;
- opfsVfs.addOnDispose(
- '$zName', opfsVfs.$zName,
- 'cleanup default VFS wrapper', ()=>(dVfs ? dVfs.dispose() : null)
- /**
- Pedantic sidebar: the entries in this array are items to
- clean up when opfsVfs.dispose() is called, but in this
- environment it will never be called. The VFS instance simply
- hangs around until the WASM module instance is cleaned up. We
- "could" _hypothetically_ clean it up by "importing" an
- sqlite3_os_end() impl into the wasm build, but the shutdown
- order of the wasm engine and the JS one are undefined so
- there is no guaranty that the opfsVfs instance would be
- available in one environment or the other when
- sqlite3_os_end() is called (_if_ it gets called at all in a
- wasm build, which is undefined).
- */
- );
-
- /**
- State which we send to the async-api Worker or share with it.
- This object must initially contain only cloneable or sharable
- objects. After the worker's "inited" message arrives, other types
- of data may be added to it.
-
- For purposes of Atomics.wait() and Atomics.notify(), we use a
- SharedArrayBuffer with one slot reserved for each of the API
- proxy's methods. The sync side of the API uses Atomics.wait()
- on the corresponding slot and the async side uses
- Atomics.notify() on that slot.
+ const log = (...args)=>logImpl(2, ...args),
+ warn = (...args)=>logImpl(1, ...args),
+ error = (...args)=>logImpl(0, ...args),
+ capi = sqlite3.capi,
+ wasm = sqlite3.wasm;
+
+ const state = opfsUtil.createVfsState('opfs', options),
+ opfsVfs = state.vfs,
+ metrics = opfsVfs.metrics.counters,
+ mTimeStart = opfsVfs.mTimeStart,
+ mTimeEnd = opfsVfs.mTimeEnd,
+ __openFiles = opfsVfs.__openFiles;
+ delete state.vfs;
+
+ /* At this point, createVfsState() has populated state and
+ opfsVfs with any code common to both the "opfs" and "opfs-wl"
+ VFSes. Now comes the VFS-dependent work... */
- The approach of using a single SAB to serialize comms for all
- instances might(?) lead to deadlock situations in multi-db
- cases. We should probably have one SAB here with a single slot
- for locking a per-file initialization step and then allocate a
- separate SAB like the above one for each file. That will
- require a bit of acrobatics but should be feasible. The most
- problematic part is that xOpen() would have to use
- postMessage() to communicate its SharedArrayBuffer, and mixing
- that approach with Atomics.wait/notify() gets a bit messy.
- */
- const state = opfsUtil.createVfsStateObject(opfsVfs);
- state.verbose = options.verbose;
- const metrics = opfsVfs.metrics.counters;
let promiseWasRejected = undefined;
const promiseReject = (err)=>{
promiseWasRejected = true;
promiseWasRejected = false;
return promiseResolve_(sqlite3);
};
- const workerArgs = '?vfs=opfs';
- const W =
+ options.proxyUri += '?vfs=opfs';
+ const W = opfsVfs.worker =
//#if target:es6-bundler-friendly
new Worker(new URL("sqlite3-opfs-async-proxy.js?vfs=opfs", import.meta.url));
//#elif target:es6-module
- new Worker(new URL(options.proxyUri+workerArgs, import.meta.url));
+ new Worker(new URL(options.proxyUri, import.meta.url));
//#else
- new Worker(options.proxyUri+workerArgs);
+ new Worker(options.proxyUri);
//#endif
setTimeout(()=>{
/* At attempt to work around a browser-specific quirk in which
promiseReject(new Error("Loading OPFS async Worker failed for unknown reasons."));
};
- /**
- Runs the given operation (by name) in the async worker
- counterpart, waits for its response, and returns the result
- which the async worker writes to SAB[state.opIds.rc]. The
- 2nd and subsequent arguments must be the arguments for the
- async op.
- */
- const opRun = (op,...args)=>{
- const opNdx = state.opIds[op] || toss("Invalid op ID:",op);
- state.s11n.serialize(...args);
- Atomics.store(state.sabOPView, state.opIds.rc, -1);
- Atomics.store(state.sabOPView, state.opIds.whichOp, opNdx);
- Atomics.notify(state.sabOPView, state.opIds.whichOp)
- /* async thread will take over here */;
- const t = performance.now();
- while('not-equal'!==Atomics.wait(state.sabOPView, state.opIds.rc, -1)){
- /*
- The reason for this loop is buried in the details of a long
- discussion at:
-
- https://github.com/sqlite/sqlite-wasm/issues/12
-
- Summary: in at least one browser flavor, under high loads,
- the wait()/notify() pairings can get out of sync. Calling
- wait() here until it returns 'not-equal' gets them back in
- sync.
- */
- }
- /* When the above wait() call returns 'not-equal', the async
- half will have completed the operation and reported its results
- in the state.opIds.rc slot of the SAB. */
- const rc = Atomics.load(state.sabOPView, state.opIds.rc);
- metrics[op].wait += performance.now() - t;
- if(rc && state.asyncS11nExceptions){
- const err = state.s11n.deserialize();
- if(err) error(op+"() async error:",...err);
- }
- return rc;
- };
-
+ const opRun = opfsVfs.opRun;
//#if nope
/**
Not part of the public API. Only for test/development use.
};
//#endif
- /**
- Map of sqlite3_file pointers to objects constructed by xOpen().
- */
- const __openFiles = Object.create(null);
-
- const opTimer = Object.create(null);
- opTimer.op = undefined;
- opTimer.start = undefined;
- const mTimeStart = (op)=>{
- opTimer.start = performance.now();
- opTimer.op = op;
- ++metrics[op].count;
+ opfsVfs.ioSyncWrappers.xLock = function(pFile,lockType){
+ mTimeStart('xLock');
+ ++metrics.xLock.count;
+ const f = __openFiles[pFile];
+ let rc = 0;
+ /* All OPFS locks are exclusive locks. If xLock() has
+ previously succeeded, do nothing except record the lock
+ type. If no lock is active, have the async counterpart
+ lock the file. */
+ if( f.lockType ) {
+ f.lockType = lockType;
+ }else{
+ rc = opRun('xLock', pFile, lockType);
+ if( 0===rc ) f.lockType = lockType;
+ }
+ mTimeEnd();
+ return rc;
};
- const mTimeEnd = ()=>(
- metrics[opTimer.op].time += performance.now() - opTimer.start
- );
-
- /**
- Impls for the sqlite3_io_methods methods. Maintenance reminder:
- members are in alphabetical order to simplify finding them.
- */
- const ioSyncWrappers = {
- xCheckReservedLock: function(pFile,pOut){
- /**
- As of late 2022, only a single lock can be held on an OPFS
- file. We have no way of checking whether any _other_ db
- connection has a lock except by trying to obtain and (on
- success) release a sync-handle for it, but doing so would
- involve an inherent race condition. For the time being,
- pending a better solution, we simply report whether the
- given pFile is open.
-
- Update 2024-06-12: based on forum discussions, this
- function now always sets pOut to 0 (false):
- https://sqlite.org/forum/forumpost/a2f573b00cda1372
- */
- wasm.poke(pOut, 0, 'i32');
- return 0;
- },
- xClose: function(pFile){
- mTimeStart('xClose');
- let rc = 0;
- const f = __openFiles[pFile];
- if(f){
- delete __openFiles[pFile];
- rc = opRun('xClose', pFile);
- if(f.sq3File) f.sq3File.dispose();
- }
- mTimeEnd();
- return rc;
- },
- xDeviceCharacteristics: function(pFile){
- return capi.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN;
- },
- xFileControl: function(pFile, opId, pArg){
- /*mTimeStart('xFileControl');
- mTimeEnd();*/
- return capi.SQLITE_NOTFOUND;
- },
- xFileSize: function(pFile,pSz64){
- mTimeStart('xFileSize');
- let rc = opRun('xFileSize', pFile);
- if(0==rc){
- try {
- const sz = state.s11n.deserialize()[0];
- wasm.poke(pSz64, sz, 'i64');
- }catch(e){
- error("Unexpected error reading xFileSize() result:",e);
- rc = state.sq3Codes.SQLITE_IOERR;
- }
- }
- mTimeEnd();
- return rc;
- },
- xLock: function(pFile,lockType){
- mTimeStart('xLock');
- const f = __openFiles[pFile];
- let rc = 0;
- /* All OPFS locks are exclusive locks. If xLock() has
- previously succeeded, do nothing except record the lock
- type. If no lock is active, have the async counterpart
- lock the file. */
- if( !f.lockType ) {
- rc = opRun('xLock', pFile, lockType);
- if( 0===rc ) f.lockType = lockType;
- }else{
- f.lockType = lockType;
- }
- mTimeEnd();
- return rc;
- },
- xRead: function(pFile,pDest,n,offset64){
- mTimeStart('xRead');
- const f = __openFiles[pFile];
- let rc;
- try {
- rc = opRun('xRead',pFile, n, Number(offset64));
- if(0===rc || capi.SQLITE_IOERR_SHORT_READ===rc){
- /**
- Results get written to the SharedArrayBuffer f.sabView.
- Because the heap is _not_ a SharedArrayBuffer, we have
- to copy the results. TypedArray.set() seems to be the
- fastest way to copy this. */
- wasm.heap8u().set(f.sabView.subarray(0, n), Number(pDest));
- }
- }catch(e){
- error("xRead(",arguments,") failed:",e,f);
- rc = capi.SQLITE_IOERR_READ;
- }
- mTimeEnd();
- return rc;
- },
- xSync: function(pFile,flags){
- mTimeStart('xSync');
- ++metrics.xSync.count;
- const rc = opRun('xSync', pFile, flags);
- mTimeEnd();
- return rc;
- },
- xTruncate: function(pFile,sz64){
- mTimeStart('xTruncate');
- const rc = opRun('xTruncate', pFile, Number(sz64));
- mTimeEnd();
- return rc;
- },
- xUnlock: function(pFile,lockType){
- mTimeStart('xUnlock');
- const f = __openFiles[pFile];
- let rc = 0;
- if( capi.SQLITE_LOCK_NONE === lockType
+ opfsVfs.ioSyncWrappers.xUnlock = function(pFile,lockType){
+ mTimeStart('xUnlock');
+ ++metrics.xUnlock.count;
+ const f = __openFiles[pFile];
+ let rc = 0;
+ if( capi.SQLITE_LOCK_NONE === lockType
&& f.lockType ){
- rc = opRun('xUnlock', pFile, lockType);
- }
- if( 0===rc ) f.lockType = lockType;
- mTimeEnd();
- return rc;
- },
- xWrite: function(pFile,pSrc,n,offset64){
- mTimeStart('xWrite');
- const f = __openFiles[pFile];
- let rc;
- try {
- f.sabView.set(wasm.heap8u().subarray(
- Number(pSrc), Number(pSrc) + n
- ));
- rc = opRun('xWrite', pFile, n, Number(offset64));
- }catch(e){
- error("xWrite(",arguments,") failed:",e,f);
- rc = capi.SQLITE_IOERR_WRITE;
- }
- mTimeEnd();
- return rc;
+ rc = opRun('xUnlock', pFile, lockType);
}
- }/*ioSyncWrappers*/;
-
- /**
- Impls for the sqlite3_vfs methods. Maintenance reminder: members
- are in alphabetical order to simplify finding them.
- */
- const vfsSyncWrappers = {
- xAccess: function(pVfs,zName,flags,pOut){
- mTimeStart('xAccess');
- const rc = opRun('xAccess', wasm.cstrToJs(zName));
- wasm.poke( pOut, (rc ? 0 : 1), 'i32' );
- mTimeEnd();
- return 0;
- },
- xCurrentTime: function(pVfs,pOut){
- /* If it turns out that we need to adjust for timezone, see:
- https://stackoverflow.com/a/11760121/1458521 */
- wasm.poke(pOut, 2440587.5 + (new Date().getTime()/86400000),
- 'double');
- return 0;
- },
- xCurrentTimeInt64: function(pVfs,pOut){
- wasm.poke(pOut, (2440587.5 * 86400000) + new Date().getTime(),
- 'i64');
- return 0;
- },
- xDelete: function(pVfs, zName, doSyncDir){
- mTimeStart('xDelete');
- const rc = opRun('xDelete', wasm.cstrToJs(zName), doSyncDir, false);
- mTimeEnd();
- return rc;
- },
- xFullPathname: function(pVfs,zName,nOut,pOut){
- /* Until/unless we have some notion of "current dir"
- in OPFS, simply copy zName to pOut... */
- const i = wasm.cstrncpy(pOut, zName, nOut);
- return i<nOut ? 0 : capi.SQLITE_CANTOPEN
- /*CANTOPEN is required by the docs but SQLITE_RANGE would be a closer match*/;
- },
- xGetLastError: function(pVfs,nOut,pOut){
- /* TODO: store exception.message values from the async
- partner in a dedicated SharedArrayBuffer, noting that we'd have
- to encode them... TextEncoder can do that for us. */
- warn("OPFS xGetLastError() has nothing sensible to return.");
- return 0;
- },
- //xSleep is optionally defined below
- xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
- mTimeStart('xOpen');
- let opfsFlags = 0;
- if(0===zName){
- zName = opfsUtil.randomFilename();
- }else if(wasm.isPtr(zName)){
- if(capi.sqlite3_uri_boolean(zName, "opfs-unlock-asap", 0)){
- /* -----------------------^^^^^ MUST pass the untranslated
- C-string here. */
- opfsFlags |= state.opfsFlags.OPFS_UNLOCK_ASAP;
- }
- if(capi.sqlite3_uri_boolean(zName, "delete-before-open", 0)){
- opfsFlags |= state.opfsFlags.OPFS_UNLINK_BEFORE_OPEN;
- }
- zName = wasm.cstrToJs(zName);
- //warn("xOpen zName =",zName, "opfsFlags =",opfsFlags);
- }
- const fh = Object.create(null);
- fh.fid = pFile;
- fh.filename = zName;
- fh.sab = new SharedArrayBuffer(state.fileBufferSize);
- fh.flags = flags;
- fh.readOnly = !(capi.SQLITE_OPEN_CREATE & flags)
- && !!(flags & capi.SQLITE_OPEN_READONLY);
- const rc = opRun('xOpen', pFile, zName, flags, opfsFlags);
- if(!rc){
- /* Recall that sqlite3_vfs::xClose() will be called, even on
- error, unless pFile->pMethods is NULL. */
- if(fh.readOnly){
- wasm.poke(pOutFlags, capi.SQLITE_OPEN_READONLY, 'i32');
- }
- __openFiles[pFile] = fh;
- fh.sabView = state.sabFileBufView;
- fh.sq3File = new sqlite3_file(pFile);
- fh.sq3File.$pMethods = opfsIoMethods.pointer;
- fh.lockType = capi.SQLITE_LOCK_NONE;
- }
- mTimeEnd();
- return rc;
- }/*xOpen()*/
- }/*vfsSyncWrappers*/;
-
- if(dVfs){
- opfsVfs.$xRandomness = dVfs.$xRandomness;
- opfsVfs.$xSleep = dVfs.$xSleep;
- }
- if(!opfsVfs.$xRandomness){
- /* If the default VFS has no xRandomness(), add a basic JS impl... */
- vfsSyncWrappers.xRandomness = function(pVfs, nOut, pOut){
- const heap = wasm.heap8u();
- let i = 0;
- const npOut = Number(pOut);
- for(; i < nOut; ++i) heap[npOut + i] = (Math.random()*255000) & 0xFF;
- return i;
- };
- }
- if(!opfsVfs.$xSleep){
- /* If we can inherit an xSleep() impl from the default VFS then
- assume it's sane and use it, otherwise install a JS-based
- one. */
- vfsSyncWrappers.xSleep = function(pVfs,ms){
- Atomics.wait(state.sabOPView, state.opIds.xSleep, 0, ms);
- return 0;
- };
- }
+ if( 0===rc ) f.lockType = lockType;
+ mTimeEnd();
+ return rc;
+ };
if(sqlite3.oo1){
const OpfsDb = function(...args){
const sanityCheck = function(){
const scope = wasm.scopedAllocPush();
- const sq3File = new sqlite3_file();
+ const sq3File = new capi.sqlite3_file();
try{
const fid = sq3File.pointer;
const openFlags = capi.SQLITE_OPEN_CREATE
rc = state.s11n.deserialize();
log("deserialize() says:",rc);
if("This is รค string."!==rc[0]) toss("String d13n error.");
- vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
+ opfsVfs.vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
rc = wasm.peek(pOut,'i32');
log("xAccess(",dbFile,") exists ?=",rc);
- rc = vfsSyncWrappers.xOpen(opfsVfs.pointer, zDbFile,
+ rc = opfsVfs.vfsSyncWrappers.xOpen(opfsVfs.pointer, zDbFile,
fid, openFlags, pOut);
log("open rc =",rc,"state.sabOPView[xOpen] =",
state.sabOPView[state.opIds.xOpen]);
error("open failed with code",rc);
return;
}
- vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
+ opfsVfs.vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
rc = wasm.peek(pOut,'i32');
if(!rc) toss("xAccess() failed to detect file.");
- rc = ioSyncWrappers.xSync(sq3File.pointer, 0);
+ rc = opfsVfs.ioSyncWrappers.xSync(sq3File.pointer, 0);
if(rc) toss('sync failed w/ rc',rc);
- rc = ioSyncWrappers.xTruncate(sq3File.pointer, 1024);
+ rc = opfsVfs.ioSyncWrappers.xTruncate(sq3File.pointer, 1024);
if(rc) toss('truncate failed w/ rc',rc);
wasm.poke(pOut,0,'i64');
- rc = ioSyncWrappers.xFileSize(sq3File.pointer, pOut);
+ rc = opfsVfs.ioSyncWrappers.xFileSize(sq3File.pointer, pOut);
if(rc) toss('xFileSize failed w/ rc',rc);
log("xFileSize says:",wasm.peek(pOut, 'i64'));
- rc = ioSyncWrappers.xWrite(sq3File.pointer, zDbFile, 10, 1);
+ rc = opfsVfs.ioSyncWrappers.xWrite(sq3File.pointer, zDbFile, 10, 1);
if(rc) toss("xWrite() failed!");
const readBuf = wasm.scopedAlloc(16);
- rc = ioSyncWrappers.xRead(sq3File.pointer, readBuf, 6, 2);
+ rc = opfsVfs.ioSyncWrappers.xRead(sq3File.pointer, readBuf, 6, 2);
wasm.poke(readBuf+6,0);
let jRead = wasm.cstrToJs(readBuf);
log("xRead() got:",jRead);
if("sanity"!==jRead) toss("Unexpected xRead() value.");
- if(vfsSyncWrappers.xSleep){
+ if(opfsVfs.vfsSyncWrappers.xSleep){
log("xSleep()ing before close()ing...");
- vfsSyncWrappers.xSleep(opfsVfs.pointer,2000);
+ opfsVfs.vfsSyncWrappers.xSleep(opfsVfs.pointer,2000);
log("waking up from xSleep()");
}
- rc = ioSyncWrappers.xClose(fid);
+ rc = opfsVfs.ioSyncWrappers.xClose(fid);
log("xClose rc =",rc,"sabOPView =",state.sabOPView);
log("Deleting file:",dbFile);
- vfsSyncWrappers.xDelete(opfsVfs.pointer, zDbFile, 0x1234);
- vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
+ opfsVfs.vfsSyncWrappers.xDelete(opfsVfs.pointer, zDbFile, 0x1234);
+ opfsVfs.vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
rc = wasm.peek(pOut,'i32');
if(rc) toss("Expecting 0 from xAccess(",dbFile,") after xDelete().");
warn("End of OPFS sanity checks.");
}
try {
sqlite3.vfs.installVfs({
- io: {struct: opfsIoMethods, methods: ioSyncWrappers},
- vfs: {struct: opfsVfs, methods: vfsSyncWrappers}
+ io: {struct: opfsVfs.ioMethods, methods: opfsVfs.ioSyncWrappers},
+ vfs: {struct: opfsVfs, methods: opfsVfs.vfsSyncWrappers}
});
state.sabOPView = new Int32Array(state.sabOP);
state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize);
-C Consolidate\sthe\sOPFS\sVFS's\smetrics-tracking\scode.
-D 2026-03-04T16:30:51.574
+C Factor\sout\sabout\s300\slines\sof\scommon\sOPFS\sVFS\sbootstrapping\scode.
+D 2026-03-04T17:54:02.085
F .fossil-settings/binary-glob 61195414528fb3ea9693577e1980230d78a1f8b0a54c78cf1b9b24d0a409ed6a x
F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
F ext/wasm/api/extern-post-js.c-pp.js d9f42ecbedc784c0d086bc37800e52946a14f7a21600b291daa3f963c314f930
F ext/wasm/api/extern-pre-js.js cc61c09c7a24a07dbecb4c352453c3985170cec12b4e7e7e7a4d11d43c5c8f41
F ext/wasm/api/opfs-common-inline.c-pp.js 5be8d6d91963849e218221b48206ae55612630bb2cd7f30b1b6fcf7a9e374b76
-F ext/wasm/api/opfs-common-shared.c-pp.js e00a2f18b6ed6d560b19503d9adce49f2151d1b488cbca24545a631bbed72d25
+F ext/wasm/api/opfs-common-shared.c-pp.js 91b1291447c689a77ffcc3297dc478dd29196311facb063737aeaaf70660a0f0
F ext/wasm/api/post-js-footer.js a50c1a2c4d008aede7b2aa1f18891a7ee71437c2f415b8aeb3db237ddce2935b
F ext/wasm/api/post-js-header.js f35d2dcf1ab7f22a93d565f8e0b622a2934fc4e743edf3b708e4dd8140eeff55
F ext/wasm/api/pre-js.c-pp.js 9234ea680a2f6a2a177e8dcd934bdc5811a9f8409165433a252b87f4c07bba6f
F ext/wasm/api/sqlite3-vfs-helper.c-pp.js 3f828cc66758acb40e9c5b4dcfd87fd478a14c8fb7f0630264e6c7fa0e57515d
F ext/wasm/api/sqlite3-vfs-kvvfs.c-pp.js 2ccf4322f42063aefc150972943e750c77f7926b866f1639d40eec05df075b6e
F ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js 1575ea6bbcf2da1e6df6892c17521a0c1c1c199a672e9090176ea0b88de48bd9
-F ext/wasm/api/sqlite3-vfs-opfs-wl.c-pp.js 007430b82089b2aa8720a5866a9a9b0e9a7ddf3a1e7bafd1b652bec6c6a18196
-F ext/wasm/api/sqlite3-vfs-opfs.c-pp.js 9061455eb3a51aa898d02737aae1210c439d9f412b97d342ae9123746f72fcad
+F ext/wasm/api/sqlite3-vfs-opfs-wl.c-pp.js 929bad4b98f176b2d0a8c1509ca833b42a11f5f0871d2b3bb2597b9e29c8ea24
+F ext/wasm/api/sqlite3-vfs-opfs.c-pp.js 9babe167f28ecd8fe67c97fe0734ec88beecbb61a0580d5218edcb8b3d8670ce
F ext/wasm/api/sqlite3-vtab-helper.c-pp.js 366596d8ff73d4cefb938bbe95bc839d503c3fab6c8335ce4bf52f0d8a7dee81
F ext/wasm/api/sqlite3-wasm.c 45bb20e19b245136711f9b78584371233975811b6560c29ed9b650e225417e29
F ext/wasm/api/sqlite3-worker1-promiser.c-pp.js aa9715f661fb700459a5a6cb1c32a4d6a770723b47aa9ac0e16c2cf87d622a66
F tool/warnings.sh d924598cf2f55a4ecbc2aeb055c10bd5f48114793e7ba25f9585435da29e7e98
F tool/win/sqlite.vsix deb315d026cc8400325c5863eef847784a219a2f
F tool/winmain.c 00c8fb88e365c9017db14c73d3c78af62194d9644feaf60e220ab0f411f3604c
-P b0dd23299e97ff975f213cb3a8b051f4d7b785b29def82e01f53427fdf77ecb6
-R 950709f8ceba3a494ed70c19c6d2296a
+P b71c79ef9672c77a72a976ffcd7cbebfaf0ff314dff97b274f7d092de6a7773f
+R c0d13d0a3e8b693179cc92fc3fedae39
U stephan
-Z a91e3522d6c27cde9f5c40c8510d95f1
+Z 3ba0938d21136544be85b7e2c587b666
# Remove this line to create a well-formed Fossil manifest.
-b71c79ef9672c77a72a976ffcd7cbebfaf0ff314dff97b274f7d092de6a7773f
+57adecbab71795b62b1c2e4570ff504f35681e81dd8c94f78ad8e05ef39d36fd