From: stephan Date: Mon, 21 Nov 2022 03:50:52 +0000 (+0000) Subject: Add test app for experimenting with multi-worker OPFS concurrency. Tweak OPFS VFS... X-Git-Tag: version-3.41.0~398 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=27c4cd183d91d09e34e310d6349cda2b33c255ba;p=thirdparty%2Fsqlite.git Add test app for experimenting with multi-worker OPFS concurrency. Tweak OPFS VFS to significantly improve the otherwise "unfortunate" concurrency situation. FossilOrigin-Name: 96f76e7616f8157a342b9e1c42f7b1feab200d182268871a2b25f67d4ee2564c --- diff --git a/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api b/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api index b903bedee6..1f7908e3b8 100644 --- a/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api +++ b/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api @@ -7,6 +7,7 @@ _sqlite3_bind_null _sqlite3_bind_parameter_count _sqlite3_bind_parameter_index _sqlite3_bind_text +_sqlite3_busy_timeout _sqlite3_changes _sqlite3_changes64 _sqlite3_clear_bindings diff --git a/ext/wasm/api/extern-post-js.js b/ext/wasm/api/extern-post-js.js index cace6ed51c..b327837814 100644 --- a/ext/wasm/api/extern-post-js.js +++ b/ext/wasm/api/extern-post-js.js @@ -59,6 +59,9 @@ const toExportForES6 = li.pop(); initModuleState.sqlite3Dir = li.join('/') + '/'; } + if(initModuleState.sqlite3Dir){ + initModuleState.sqlite3Dir = initModuleState.sqlite3Dir.replace(/[/]{2,}/g,'/'); + } self.sqlite3InitModule = (...args)=>{ //console.warn("Using replaced sqlite3InitModule()",self.location); diff --git a/ext/wasm/api/sqlite3-api-opfs.js b/ext/wasm/api/sqlite3-api-opfs.js index a3f73cc7b2..1fd50dcc6f 100644 --- a/ext/wasm/api/sqlite3-api-opfs.js +++ b/ext/wasm/api/sqlite3-api-opfs.js @@ -92,7 +92,8 @@ const installOpfsVfs = function callee(options){ } const urlParams = new URL(self.location.href).searchParams; if(undefined===options.verbose){ - options.verbose = urlParams.has('opfs-verbose') ? 3 : 2; + options.verbose = urlParams.has('opfs-verbose') + ? (+urlParams.get('opfs-verbose') || 2) : 1; } if(undefined===options.sanityChecks){ options.sanityChecks = urlParams.has('opfs-sanity-check'); @@ -101,6 +102,8 @@ const installOpfsVfs = function callee(options){ options.proxyUri = callee.defaultProxyUri; } + //console.warn("OPFS options =",options,self.location); + if('function' === typeof options.proxyUri){ options.proxyUri = options.proxyUri(); } @@ -1154,7 +1157,10 @@ const installOpfsVfs = function callee(options){ [ /* Truncate journal mode is faster than delete or wal for this vfs, per speedtest1. */ - "pragma journal_mode=truncate;" + "pragma journal_mode=truncate;", + /* Set a default busy-timeout handler to help OPFS dbs + deal with multi-tab/multi-worker contention. */ + "pragma busy_timeout=2000;", /* This vfs benefits hugely from cache on moderate/large speedtest1 --size 50 and --size 100 workloads. We currently @@ -1162,7 +1168,7 @@ const installOpfsVfs = function callee(options){ sqlite3.wasm. If that policy changes, the cache can be set here. */ - //"pragma cache_size=-8388608;" + //"pragma cache_size=-16384;" ].join("") ); } diff --git a/ext/wasm/api/sqlite3-api-prologue.js b/ext/wasm/api/sqlite3-api-prologue.js index fed1c56669..8b2ce0936d 100644 --- a/ext/wasm/api/sqlite3-api-prologue.js +++ b/ext/wasm/api/sqlite3-api-prologue.js @@ -897,6 +897,7 @@ self.sqlite3ApiBootstrap = function sqlite3ApiBootstrap( the lines of sqlite3_prepare_v3(). The slightly problematic part is the final argument (text destructor). */ ], + ["sqlite3_busy_timeout","int", "sqlite3*", "int"], ["sqlite3_close_v2", "int", "sqlite3*"], ["sqlite3_changes", "int", "sqlite3*"], ["sqlite3_clear_bindings","int", "sqlite3_stmt*"], diff --git a/ext/wasm/api/sqlite3-opfs-async-proxy.js b/ext/wasm/api/sqlite3-opfs-async-proxy.js index e4657484ef..3701e8c30d 100644 --- a/ext/wasm/api/sqlite3-opfs-async-proxy.js +++ b/ext/wasm/api/sqlite3-opfs-async-proxy.js @@ -53,7 +53,7 @@ const state = Object.create(null); 2 = warnings and errors 3 = debug, warnings, and errors */ -state.verbose = 2; +state.verbose = 1; const loggers = { 0:console.error.bind(console), @@ -150,6 +150,57 @@ const getDirForFilename = async function f(absFilename, createDirs = false){ return [dh, filename]; }; +/** + If the given file-holding object has a sync handle attached to it, + that handle is remove and asynchronously closed. Though it may + sound sensible to continue work as soon as the close() returns + (noting that it's asynchronous), doing so can cause operations + performed soon afterwards, e.g. a call to getSyncHandle() to fail + because they may happen out of order from the close(). OPFS does + not guaranty that the actual order of operations is retained in + such cases. i.e. always "await" on the result of this function. +*/ +const closeSyncHandle = async (fh)=>{ + if(fh.syncHandle){ + log("Closing sync handle for",fh.filenameAbs); + const h = fh.syncHandle; + delete fh.syncHandle; + delete fh.xLock; + __autoLocks.delete(fh.fid); + return h.close(); + } +}; + +/** + A proxy for closeSyncHandle() which is guaranteed to not throw. + + This function is part of a lock/unlock step in functions which + require a sync access handle but may be called without xLock() + having been called first. Such calls need to release that + handle to avoid locking the file for all of time. This is an + _attempt_ at reducing cross-tab contention but it may prove + to be more of a problem than a solution and may need to be + removed. +*/ +const closeSyncHandleNoThrow = async (fh)=>{ + try{await closeSyncHandle(fh)} + catch(e){ + warn("closeSyncHandleNoThrow() ignoring:",e,fh); + } +}; + +/* Release all auto-locks. */ +const closeAutoLocks = async ()=>{ + if(__autoLocks.size){ + /* Release all auto-locks. */ + for(const fid of __autoLocks){ + const fh = __openFiles[fid]; + await closeSyncHandleNoThrow(fh); + log("Auto-unlocked",fid,fh.filenameAbs); + } + } +}; + /** An error class specifically for use with getSyncHandle(), the goal of which is to eventually be able to distinguish unambiguously @@ -168,7 +219,25 @@ class GetSyncHandleError extends Error { this.name = 'GetSyncHandleError'; } }; - +GetSyncHandleError.convertRc = (e,rc)=>{ + if(1){ + /* This approach returns SQLITE_LOCKED to the C API + when getSyncHandle() fails but makes the very + wild assumption that such a failure _is_ a locking + error. In practice that appears to be the most + common error, by far, but we cannot unambiguously + distinguish that from other errors. + + This approach demonstrably reduces concurrency-related + errors but is highly questionable. + */ + return (e instanceof GetSyncHandleError) + ? state.sq3Codes.SQLITE_LOCKED + : rc; + }else{ + return ec; + } +} /** Returns the sync access handle associated with the given file handle object (which must be a valid handle object, as created by @@ -201,7 +270,8 @@ const getSyncHandle = async (fh)=>{ ); } warn("Error getting sync handle. Waiting",ms, - "ms and trying again.",fh.filenameAbs,e); + "ms and trying again.",fh.filenameAbs,e); + //await closeAutoLocks(); Atomics.wait(state.sabOPView, state.opIds.retry, 0, ms); } } @@ -214,45 +284,6 @@ const getSyncHandle = async (fh)=>{ return fh.syncHandle; }; -/** - If the given file-holding object has a sync handle attached to it, - that handle is remove and asynchronously closed. Though it may - sound sensible to continue work as soon as the close() returns - (noting that it's asynchronous), doing so can cause operations - performed soon afterwards, e.g. a call to getSyncHandle() to fail - because they may happen out of order from the close(). OPFS does - not guaranty that the actual order of operations is retained in - such cases. i.e. always "await" on the result of this function. -*/ -const closeSyncHandle = async (fh)=>{ - if(fh.syncHandle){ - log("Closing sync handle for",fh.filenameAbs); - const h = fh.syncHandle; - delete fh.syncHandle; - delete fh.xLock; - __autoLocks.delete(fh.fid); - return h.close(); - } -}; - -/** - A proxy for closeSyncHandle() which is guaranteed to not throw. - - This function is part of a lock/unlock step in functions which - require a sync access handle but may be called without xLock() - having been called first. Such calls need to release that - handle to avoid locking the file for all of time. This is an - _attempt_ at reducing cross-tab contention but it may prove - to be more of a problem than a solution and may need to be - removed. -*/ -const closeSyncHandleNoThrow = async (fh)=>{ - try{await closeSyncHandle(fh)} - catch(e){ - warn("closeSyncHandleNoThrow() ignoring:",e,fh); - } -}; - /** Stores the given value at state.sabOPView[state.opIds.rc] and then Atomics.notify()'s it. @@ -451,7 +482,7 @@ const vfsAsyncImpls = { rc = 0; }catch(e){ state.s11n.storeException(2,e); - rc = state.sq3Codes.SQLITE_IOERR; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR); } wTimeEnd(); storeAndNotify('xFileSize', rc); @@ -471,7 +502,7 @@ const vfsAsyncImpls = { __autoLocks.delete(fid); }catch(e){ state.s11n.storeException(1,e); - rc = state.sq3Codes.SQLITE_IOERR_LOCK; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_LOCK); fh.xLock = oldLockType; } wTimeEnd(); @@ -545,7 +576,7 @@ const vfsAsyncImpls = { if(undefined===nRead) wTimeEnd(); error("xRead() failed",e,fh); state.s11n.storeException(1,e); - rc = state.sq3Codes.SQLITE_IOERR_READ; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_READ); } storeAndNotify('xRead',rc); mTimeEnd(); @@ -579,7 +610,7 @@ const vfsAsyncImpls = { }catch(e){ error("xTruncate():",e,fh); state.s11n.storeException(2,e); - rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_TRUNCATE); } wTimeEnd(); storeAndNotify('xTruncate',rc); @@ -619,7 +650,7 @@ const vfsAsyncImpls = { }catch(e){ error("xWrite():",e,fh); state.s11n.storeException(1,e); - rc = state.sq3Codes.SQLITE_IOERR_WRITE; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_WRITE); } wTimeEnd(); storeAndNotify('xWrite',rc); @@ -746,22 +777,16 @@ const waitLoop = async function f(){ /** waitTime is how long (ms) to wait for each Atomics.wait(). We need to wake up periodically to give the thread a chance - to do other things. + to do other things. If this is too high (e.g. 500ms) then + even two workers/tabs can easily run into locking errors. */ - const waitTime = 500; + const waitTime = 150; while(!flagAsyncShutdown){ try { if('timed-out'===Atomics.wait( state.sabOPView, state.opIds.whichOp, 0, waitTime )){ - if(__autoLocks.size){ - /* Release all auto-locks. */ - for(const fid of __autoLocks){ - const fh = __openFiles[fid]; - await closeSyncHandleNoThrow(fh); - log("Auto-unlocked",fid,fh.filenameAbs); - } - } + await closeAutoLocks(); continue; } const opId = Atomics.load(state.sabOPView, state.opIds.whichOp); @@ -791,7 +816,7 @@ navigator.storage.getDirectory().then(function(d){ const opt = data.args; state.littleEndian = opt.littleEndian; state.asyncS11nExceptions = opt.asyncS11nExceptions; - state.verbose = opt.verbose ?? 2; + state.verbose = opt.verbose ?? 1; state.fileBufferSize = opt.fileBufferSize; state.sabS11nOffset = opt.sabS11nOffset; state.sabS11nSize = opt.sabS11nSize; diff --git a/ext/wasm/index.html b/ext/wasm/index.html index 37d66603f6..9fa5bbdf49 100644 --- a/ext/wasm/index.html +++ b/ext/wasm/index.html @@ -104,6 +104,9 @@ synchronous sqlite3_vfs interface and the async OPFS impl. +
  • OPFS concurrency + tests using multiple workers. +
  • diff --git a/ext/wasm/tests/opfs/concurrency/index.html b/ext/wasm/tests/opfs/concurrency/index.html new file mode 100644 index 0000000000..79a46692cc --- /dev/null +++ b/ext/wasm/tests/opfs/concurrency/index.html @@ -0,0 +1,34 @@ + + + + + + + + sqlite3 OPFS Worker concurrency tester + + + +

    +

    + OPFS concurrency tester using multiple independent Workers. + This app is incomplete. +

    +
    + + +
    +
    + + + + diff --git a/ext/wasm/tests/opfs/concurrency/test.js b/ext/wasm/tests/opfs/concurrency/test.js new file mode 100644 index 0000000000..d045f3271f --- /dev/null +++ b/ext/wasm/tests/opfs/concurrency/test.js @@ -0,0 +1,97 @@ +(async function(self){ + + const logClass = (function(){ + const mapToString = (v)=>{ + switch(typeof v){ + case 'number': case 'string': case 'boolean': + case 'undefined': case 'bigint': + return ''+v; + default: break; + } + if(null===v) return 'null'; + if(v instanceof Error){ + v = { + message: v.message, + stack: v.stack, + errorClass: v.name + }; + } + return JSON.stringify(v,undefined,2); + }; + const normalizeArgs = (args)=>args.map(mapToString); + const logTarget = document.querySelector('#test-output'); + const logClass = function(cssClass,...args){ + const ln = document.createElement('div'); + if(cssClass){ + for(const c of (Array.isArray(cssClass) ? cssClass : [cssClass])){ + ln.classList.add(c); + } + } + ln.append(document.createTextNode(normalizeArgs(args).join(' '))); + logTarget.append(ln); + }; + const cbReverse = document.querySelector('#cb-log-reverse'); + const cbReverseKey = 'tester1:cb-log-reverse'; + const cbReverseIt = ()=>{ + logTarget.classList[cbReverse.checked ? 'add' : 'remove']('reverse'); + localStorage.setItem(cbReverseKey, cbReverse.checked ? 1 : 0); + }; + cbReverse.addEventListener('change', cbReverseIt, true); + if(localStorage.getItem(cbReverseKey)){ + cbReverse.checked = !!(+localStorage.getItem(cbReverseKey)); + } + cbReverseIt(); + return logClass; + })(); + const stdout = (...args)=>logClass('',...args); + const stderr = (...args)=>logClass('error',...args); + + const wait = async (ms)=>{ + return new Promise((resolve)=>setTimeout(resolve,ms)); + }; + + const urlArgsJs = new URL(document.currentScript.src).searchParams; + const urlArgsHtml = new URL(self.location.href).searchParams; + const options = Object.create(null); + options.sqlite3Dir = urlArgsJs.get('sqlite3.dir'); + options.workerCount = ( + urlArgsHtml.has('workers') ? +urlArgsHtml.get('workers') : 3 + ) || 3; + const workers = []; + workers.post = (type,...args)=>{ + for(const w of workers) w.postMessage({type, payload:args}); + }; + workers.loadedCount = 0; + workers.onmessage = function(msg){ + msg = msg.data; + const wName = msg.worker; + const prefix = 'Worker ['+wName+']:'; + switch(msg.type){ + case 'stdout': stdout(prefix,...msg.payload); break; + case 'stderr': stderr(prefix,...msg.payload); break; + case 'error': stderr(prefix,"ERROR:",...msg.payload); break; + case 'loaded': + stdout(prefix,"loaded"); + if(++workers.loadedCount === workers.length){ + stdout("All workers loaded. Telling them to run..."); + workers.post('run'); + } + break; + default: logClass('error',"Unhandled message type:",msg); break; + } + }; + + stdout("Launching",options.workerCount,"workers..."); + workers.uri = ( + 'worker.js?' + + 'sqlite3.dir='+options.sqlite3Dir + + '&opfs-verbose=2' + ); + for(let i = 0; i < options.workerCount; ++i){ + stdout("Launching worker..."); + workers.push(new Worker(workers.uri+(i ? '' : '&unlink-db'))); + } + // Have to delay onmessage assignment until after the loop + // to avoid that early workers get an undue head start. + workers.forEach((w)=>w.onmessage = workers.onmessage); +})(self); diff --git a/ext/wasm/tests/opfs/concurrency/worker.js b/ext/wasm/tests/opfs/concurrency/worker.js new file mode 100644 index 0000000000..7ba15bf8c1 --- /dev/null +++ b/ext/wasm/tests/opfs/concurrency/worker.js @@ -0,0 +1,95 @@ +importScripts( + (new URL(self.location.href).searchParams).get('sqlite3.dir') + '/sqlite3.js' +); +self.sqlite3InitModule().then(async function(sqlite3){ + const wName = Math.round(Math.random()*10000); + const wPost = (type,...payload)=>{ + postMessage({type, worker: wName, payload}); + }; + const stdout = (...args)=>wPost('stdout',...args); + const stderr = (...args)=>wPost('stderr',...args); + const postErr = (...args)=>wPost('error',...args); + if(!sqlite3.opfs){ + stderr("OPFS support not detected. Aborting."); + return; + } + + const wait = async (ms)=>{ + return new Promise((resolve)=>setTimeout(resolve,ms)); + }; + + const dbName = 'concurrency-tester.db'; + if((new URL(self.location.href).searchParams).has('unlink-db')){ + await sqlite3.opfs.unlink(dbName); + stdout("Unlinked",dbName); + } + wPost('loaded'); + + const run = async function(){ + const db = new sqlite3.opfs.OpfsDb(dbName); + //sqlite3.capi.sqlite3_busy_timeout(db.pointer, 2000); + db.transaction((db)=>{ + db.exec([ + "create table if not exists t1(w TEXT UNIQUE ON CONFLICT REPLACE,v);", + "create table if not exists t2(w TEXT UNIQUE ON CONFLICT REPLACE,v);" + ]); + }); + + const maxIterations = 10; + const interval = Object.assign(Object.create(null),{ + delay: 300, + handle: undefined, + count: 0 + }); + stdout("Starting interval-based db updates with delay of",interval.delay,"ms."); + const doWork = async ()=>{ + const tm = new Date().getTime(); + ++interval.count; + const prefix = "v(#"+interval.count+")"; + stdout("Setting",prefix,"=",tm); + try{ + db.exec({ + sql:"INSERT OR REPLACE INTO t1(w,v) VALUES(?,?)", + bind: [wName, new Date().getTime()] + }); + //stdout("Set",prefix); + }catch(e){ + interval.error = e; + } + }; + const finish = ()=>{ + if(interval.error) stderr("Ending work due to error:",e.message); + else stdout("Ending work after",interval.count,"interval(s)"); + db.close(); + }; + if(1){/*use setInterval()*/ + interval.handle = setInterval(async ()=>{ + await doWork(); + if(interval.error || maxIterations === interval.count){ + clearInterval(interval.handle); + finish(); + } + }, interval.delay); + }else{ + /*This approach provides no concurrency whatsoever: each worker + is run to completion before any others can work.*/ + let i; + for(i = 0; i < maxIterations; ++i){ + await doWork(); + if(interval.error) break; + await wait(interval.ms); + } + finish(); + } + }/*run()*/; + + self.onmessage = function({data}){ + switch(data.type){ + case 'run': run().catch((e)=>postErr(e.message)); + break; + default: + stderr("Unhandled message type '"+data.type+"'."); + break; + } + }; +}); diff --git a/manifest b/manifest index 1c39089785..ccf4fd8d76 100644 --- a/manifest +++ b/manifest @@ -1,5 +1,5 @@ -C js\sdist:\saccount\sfor\sa\sfile\srename\sin\sthe\sprevious\scheckin. -D 2022-11-20T05:47:17.093 +C Add\stest\sapp\sfor\sexperimenting\swith\smulti-worker\sOPFS\sconcurrency.\sTweak\sOPFS\sVFS\sto\ssignificantly\simprove\sthe\sotherwise\s"unfortunate"\sconcurrency\ssituation. +D 2022-11-21T03:50:52.240 F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1 F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724 @@ -491,10 +491,10 @@ F ext/wasm/EXPORTED_FUNCTIONS.fiddle.in 27450c8b8c70875a260aca55435ec927068b34ce F ext/wasm/GNUmakefile 712795c4893ea65f8d30fe414937a33b677a194dd58372b4074aee17039c845e F ext/wasm/README-dist.txt 2d670b426fc7c613b90a7d2f2b05b433088fe65181abead970980f0a4a75ea20 F ext/wasm/README.md ef39861aa21632fdbca0bdd469f78f0096f6449a720f3f39642594af503030e9 -F ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api 9120c2f8f51fa85f46dcf4dcb6b12f4a807d428f6089b99cdb08d8ddfcfd88b2 +F ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api b4d68c97d14944b48d55e06aa44f544a6f56a7fa2bcb6f9e030936a5b2a9479a F ext/wasm/api/EXPORTED_RUNTIME_METHODS.sqlite3-api 1ec3c73e7d66e95529c3c64ac3de2470b0e9e7fbf7a5b41261c367cf4f1b7287 F ext/wasm/api/README.md 29276a845e57004e82efba61fa5866fd05f9137380a1dc26dc4c6d65264cd81c -F ext/wasm/api/extern-post-js.js 59e52f579cd3a332d73dae94c91b9579daafb10dd6ada03803f1afa6bdad7689 +F ext/wasm/api/extern-post-js.js 31400dd1c0ae3458a0e6510229e59318e45eac402a75dd703c2950b9b5758b46 F ext/wasm/api/extern-pre-js.js cc61c09c7a24a07dbecb4c352453c3985170cec12b4e7e7e7a4d11d43c5c8f41 F ext/wasm/api/post-js-footer.js cd0a8ec768501d9bd45d325ab0442037fb0e33d1f3b4f08902f15c34720ee4a1 F ext/wasm/api/post-js-header.js d6ab3dfef4a06960d28a7eaa338d4e2a1a5981e9b38718168bbde8fdb2a439b8 @@ -502,11 +502,11 @@ F ext/wasm/api/pre-js.js b88499dc303c21fc3f55f2c364a0f814f587b60a95784303881169f F ext/wasm/api/sqlite3-api-cleanup.js ecdc69dbfccfe26146f04799fcfd4a6f5790d46e7e3b9b6e9b0491f92ed8ae34 F ext/wasm/api/sqlite3-api-glue.js 056f44b82c126358a0175e08a892d56fadfce177b0d7a0012502a6acf67ea6d5 F ext/wasm/api/sqlite3-api-oo1.js e9a83489bbb4838ce0aee46eaaa9350e0e25a5b926b565e4f5ae8e840e4fbaed -F ext/wasm/api/sqlite3-api-opfs.js b4ece97f94aacd408b37fbe5f6d6bb2cbfbed484ce700b17d1d446a55e6b7e81 -F ext/wasm/api/sqlite3-api-prologue.js fd526fa017fa2578673ca18158354515c719e719a5d93f2f6d0e43f39170430e +F ext/wasm/api/sqlite3-api-opfs.js 4c75ed11df5efff6bcd8dad4ad904d8b11efac2e1dd4cc2c84d1ee8ace4129ef +F ext/wasm/api/sqlite3-api-prologue.js 08e96d26d329e8c1e08813fe0b84ee93e0e78b087efdd6eb2809ae2672902437 F ext/wasm/api/sqlite3-api-worker1.js e94ba98e44afccfa482874cd9acb325883ade50ed1f9f9526beb9de1711f182f F ext/wasm/api/sqlite3-license-version-header.js a661182fc93fc2cf212dfd0b987f8e138a3ac98f850b1112e29b5fbdaecc87c3 -F ext/wasm/api/sqlite3-opfs-async-proxy.js 24d1c1982a012d998907105a4ff1ff6881bf462395e90c06326817701e69f093 +F ext/wasm/api/sqlite3-opfs-async-proxy.js 97cf1909670575eced940d36f1b5ea35c51a431d1035dc2f7ea6982faee97c1b F ext/wasm/api/sqlite3-wasi.h 25356084cfe0d40458a902afb465df8c21fc4152c1d0a59b563a3fba59a068f9 F ext/wasm/api/sqlite3-wasm.c 8fc8f47680df0e9a6c0f2f03cb004148645ecc983aa216daba09cb21f7e092a2 F ext/wasm/api/sqlite3-worker1-promiser.js 0c7a9826dbf82a5ed4e4f7bf7816e825a52aff253afbf3350431f5773faf0e4b @@ -534,7 +534,7 @@ F ext/wasm/fiddle/fiddle-worker.js b4a0c8ab6c0983218543ca771c45f6075449f63a1dcf2 F ext/wasm/fiddle/fiddle.js 974b995119ac443685d7d94d3b3c58c6a36540e9eb3fed7069d5653284071715 F ext/wasm/fiddle/index.html 5daf54e8f3d7777cbb1ca4f93affe28858dbfff25841cb4ab81d694efed28ec2 F ext/wasm/index-dist.html c4337617c4d6d4d0796827cec28ac81d128c6f911dcf888a290a32ad50890408 -F ext/wasm/index.html 5393ced912ee9af18cc8cefbda96fac922839d192d7c3d4ec4f4b42dd7f1cf8b +F ext/wasm/index.html 5be176de5be8ae96889798f803fef4f6a2ef31cee305a0430ca4629f6ae04c27 F ext/wasm/jaccwabyt/jaccwabyt.js 95f573de1826474c9605dda620ee622fcb1673ae74f191eb324c0853aa4dcb66 F ext/wasm/jaccwabyt/jaccwabyt.md 9aa6951b529a8b29f578ec8f0355713c39584c92cf1708f63ba0cf917cb5b68e F ext/wasm/module-symbols.html b8eebafef8e536624bbe5f7a3da40c07a9062b843dfd3161a0bb72cbb6763dc5 @@ -552,6 +552,9 @@ F ext/wasm/test-opfs-vfs.js 44363db07b2a20e73b0eb1808de4400ca71b703af718d0fa6d96 F ext/wasm/tester1-worker.html 5ef353348c37cf2e4fd0b23da562d3275523e036260b510734e9a3239ba8c987 F ext/wasm/tester1.c-pp.html 74aa9b31c75f12490653f814b53c3dd39f40cd3f70d6a53a716f4e8587107399 F ext/wasm/tester1.c-pp.js 0c129495d057c77788b59715152d51f9bf9002ebbcce759ef8b028272ce3519d +F ext/wasm/tests/opfs/concurrency/index.html c7cf329e5b206dd8226d94ab9fec02f5f350d8ed69a57c96d84e876afd3d3d1b +F ext/wasm/tests/opfs/concurrency/test.js 44cfcc04503593256abe2dd663349718f80ee7ab25e19eb066de220101bd604a +F ext/wasm/tests/opfs/concurrency/worker.js f8f3e4f9b21726bef354a74ec9c90f6736df5b16b4f655bfd16a3b9c6ee063ff F ext/wasm/version-info.c 3b36468a90faf1bbd59c65fd0eb66522d9f941eedd364fabccd72273503ae7d5 F ext/wasm/wasmfs.make 8fea9b4f3cde06141de1fc4c586ab405bd32c3f401554f4ebb18c797401a678d F install-sh 9d4de14ab9fb0facae2f48780b874848cbf2f895 x @@ -2056,8 +2059,8 @@ F vsixtest/vsixtest.tcl 6a9a6ab600c25a91a7acc6293828957a386a8a93 F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0 -P 100a596800eca61477d9880092465d594c22be3707f2a11aaf6eb9e234fc6f2d -R fd97f4afc2e7676e23b759586618b38f +P 469f9011a885e19b99210c5e3e582afa140b8b5f0aa7a720334848df5ab6ae98 +R c87fca3e6d0a9c36a2598013e36db2a5 U stephan -Z 42f144497d0f844aac38afbea462a28b +Z b0030359261e278f67d2690556943dbd # Remove this line to create a well-formed Fossil manifest. diff --git a/manifest.uuid b/manifest.uuid index 6d344e429d..2e8d22d1e3 100644 --- a/manifest.uuid +++ b/manifest.uuid @@ -1 +1 @@ -469f9011a885e19b99210c5e3e582afa140b8b5f0aa7a720334848df5ab6ae98 \ No newline at end of file +96f76e7616f8157a342b9e1c42f7b1feab200d182268871a2b25f67d4ee2564c \ No newline at end of file