4518 lines
150 KiB
JavaScript
4518 lines
150 KiB
JavaScript
// include: shell.js
|
|
// The Module object: Our interface to the outside world. We import
|
|
// and export values on it. There are various ways Module can be used:
|
|
// 1. Not defined. We create it here
|
|
// 2. A function parameter, function(moduleArg) => Promise<Module>
|
|
// 3. pre-run appended it, var Module = {}; ..generated code..
|
|
// 4. External script tag defines var Module.
|
|
// We need to check if Module already exists (e.g. case 3 above).
|
|
// Substitution will be replaced with actual code on later stage of the build,
|
|
// this way Closure Compiler will not mangle it (e.g. case 4. above).
|
|
// Note that if you want to run closure, and also to use Module
|
|
// after the generated code, you will need to define var Module = {};
|
|
// before the code. Then that object will be used in the code, and you
|
|
// can continue to use Module afterwards as well.
|
|
var Module = typeof Module != 'undefined' ? Module : {};
|
|
|
|
// Determine the runtime environment we are in. You can customize this by
|
|
// setting the ENVIRONMENT setting at compile time (see settings.js).
|
|
|
|
// Attempt to auto-detect the environment
|
|
var ENVIRONMENT_IS_WEB = typeof window == 'object';
|
|
var ENVIRONMENT_IS_WORKER = typeof WorkerGlobalScope != 'undefined';
|
|
// N.b. Electron.js environment is simultaneously a NODE-environment, but
|
|
// also a web environment.
|
|
var ENVIRONMENT_IS_NODE = typeof process == 'object' && typeof process.versions == 'object' && typeof process.versions.node == 'string' && process.type != 'renderer';
|
|
var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
|
|
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
|
|
}
|
|
|
|
// --pre-jses are emitted after the Module integration code, so that they can
|
|
// refer to Module (if they choose; they can also define Module)
|
|
|
|
|
|
// Sometimes an existing Module object exists with properties
|
|
// meant to overwrite the default module functionality. Here
|
|
// we collect those properties and reapply _after_ we configure
|
|
// the current environment's defaults to avoid having to be so
|
|
// defensive during initialization.
|
|
var moduleOverrides = {...Module};
|
|
|
|
var arguments_ = [];
|
|
var thisProgram = './this.program';
|
|
var quit_ = (status, toThrow) => {
|
|
throw toThrow;
|
|
};
|
|
|
|
// `/` should be present at the end if `scriptDirectory` is not empty
|
|
var scriptDirectory = '';
|
|
function locateFile(path) {
|
|
if (Module['locateFile']) {
|
|
return Module['locateFile'](path, scriptDirectory);
|
|
}
|
|
return scriptDirectory + path;
|
|
}
|
|
|
|
// Hooks that are implemented differently in different runtime environments.
|
|
var readAsync, readBinary;
|
|
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
if (typeof process == 'undefined' || !process.release || process.release.name !== 'node') throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)');
|
|
|
|
var nodeVersion = process.versions.node;
|
|
var numericVersion = nodeVersion.split('.').slice(0, 3);
|
|
numericVersion = (numericVersion[0] * 10000) + (numericVersion[1] * 100) + (numericVersion[2].split('-')[0] * 1);
|
|
var minVersion = 160000;
|
|
if (numericVersion < 160000) {
|
|
throw new Error('This emscripten-generated code requires node v16.0.0 (detected v' + nodeVersion + ')');
|
|
}
|
|
|
|
// These modules will usually be used on Node.js. Load them eagerly to avoid
|
|
// the complexity of lazy-loading.
|
|
var fs = require('fs');
|
|
var nodePath = require('path');
|
|
|
|
scriptDirectory = __dirname + '/';
|
|
|
|
// include: node_shell_read.js
|
|
readBinary = (filename) => {
|
|
// We need to re-wrap `file://` strings to URLs.
|
|
filename = isFileURI(filename) ? new URL(filename) : filename;
|
|
var ret = fs.readFileSync(filename);
|
|
assert(Buffer.isBuffer(ret));
|
|
return ret;
|
|
};
|
|
|
|
readAsync = async (filename, binary = true) => {
|
|
// See the comment in the `readBinary` function.
|
|
filename = isFileURI(filename) ? new URL(filename) : filename;
|
|
var ret = fs.readFileSync(filename, binary ? undefined : 'utf8');
|
|
assert(binary ? Buffer.isBuffer(ret) : typeof ret == 'string');
|
|
return ret;
|
|
};
|
|
// end include: node_shell_read.js
|
|
if (!Module['thisProgram'] && process.argv.length > 1) {
|
|
thisProgram = process.argv[1].replace(/\\/g, '/');
|
|
}
|
|
|
|
arguments_ = process.argv.slice(2);
|
|
|
|
if (typeof module != 'undefined') {
|
|
module['exports'] = Module;
|
|
}
|
|
|
|
quit_ = (status, toThrow) => {
|
|
process.exitCode = status;
|
|
throw toThrow;
|
|
};
|
|
|
|
} else
|
|
if (ENVIRONMENT_IS_SHELL) {
|
|
|
|
if ((typeof process == 'object' && typeof require === 'function') || typeof window == 'object' || typeof WorkerGlobalScope != 'undefined') throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)');
|
|
|
|
} else
|
|
|
|
// Note that this includes Node.js workers when relevant (pthreads is enabled).
|
|
// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and
|
|
// ENVIRONMENT_IS_NODE.
|
|
if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
|
|
if (ENVIRONMENT_IS_WORKER) { // Check worker, not web, since window could be polyfilled
|
|
scriptDirectory = self.location.href;
|
|
} else if (typeof document != 'undefined' && document.currentScript) { // web
|
|
scriptDirectory = document.currentScript.src;
|
|
}
|
|
// blob urls look like blob:http://site.com/etc/etc and we cannot infer anything from them.
|
|
// otherwise, slice off the final part of the url to find the script directory.
|
|
// if scriptDirectory does not contain a slash, lastIndexOf will return -1,
|
|
// and scriptDirectory will correctly be replaced with an empty string.
|
|
// If scriptDirectory contains a query (starting with ?) or a fragment (starting with #),
|
|
// they are removed because they could contain a slash.
|
|
if (scriptDirectory.startsWith('blob:')) {
|
|
scriptDirectory = '';
|
|
} else {
|
|
scriptDirectory = scriptDirectory.slice(0, scriptDirectory.replace(/[?#].*/, '').lastIndexOf('/')+1);
|
|
}
|
|
|
|
if (!(typeof window == 'object' || typeof WorkerGlobalScope != 'undefined')) throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)');
|
|
|
|
{
|
|
// include: web_or_worker_shell_read.js
|
|
if (ENVIRONMENT_IS_WORKER) {
|
|
readBinary = (url) => {
|
|
var xhr = new XMLHttpRequest();
|
|
xhr.open('GET', url, false);
|
|
xhr.responseType = 'arraybuffer';
|
|
xhr.send(null);
|
|
return new Uint8Array(/** @type{!ArrayBuffer} */(xhr.response));
|
|
};
|
|
}
|
|
|
|
readAsync = async (url) => {
|
|
// Fetch has some additional restrictions over XHR, like it can't be used on a file:// url.
|
|
// See https://github.com/github/fetch/pull/92#issuecomment-140665932
|
|
// Cordova or Electron apps are typically loaded from a file:// url.
|
|
// So use XHR on webview if URL is a file URL.
|
|
if (isFileURI(url)) {
|
|
return new Promise((resolve, reject) => {
|
|
var xhr = new XMLHttpRequest();
|
|
xhr.open('GET', url, true);
|
|
xhr.responseType = 'arraybuffer';
|
|
xhr.onload = () => {
|
|
if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
|
|
resolve(xhr.response);
|
|
return;
|
|
}
|
|
reject(xhr.status);
|
|
};
|
|
xhr.onerror = reject;
|
|
xhr.send(null);
|
|
});
|
|
}
|
|
var response = await fetch(url, { credentials: 'same-origin' });
|
|
if (response.ok) {
|
|
return response.arrayBuffer();
|
|
}
|
|
throw new Error(response.status + ' : ' + response.url);
|
|
};
|
|
// end include: web_or_worker_shell_read.js
|
|
}
|
|
} else
|
|
{
|
|
throw new Error('environment detection error');
|
|
}
|
|
|
|
var out = Module['print'] || console.log.bind(console);
|
|
var err = Module['printErr'] || console.error.bind(console);
|
|
|
|
// Merge back in the overrides
|
|
Object.assign(Module, moduleOverrides);
|
|
// Free the object hierarchy contained in the overrides, this lets the GC
|
|
// reclaim data used.
|
|
moduleOverrides = null;
|
|
checkIncomingModuleAPI();
|
|
|
|
// Emit code to handle expected values on the Module object. This applies Module.x
|
|
// to the proper local x. This has two benefits: first, we only emit it if it is
|
|
// expected to arrive, and second, by using a local everywhere else that can be
|
|
// minified.
|
|
|
|
if (Module['arguments']) arguments_ = Module['arguments'];legacyModuleProp('arguments', 'arguments_');
|
|
|
|
if (Module['thisProgram']) thisProgram = Module['thisProgram'];legacyModuleProp('thisProgram', 'thisProgram');
|
|
|
|
// perform assertions in shell.js after we set up out() and err(), as otherwise if an assertion fails it cannot print the message
|
|
// Assertions on removed incoming Module JS APIs.
|
|
assert(typeof Module['memoryInitializerPrefixURL'] == 'undefined', 'Module.memoryInitializerPrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['pthreadMainPrefixURL'] == 'undefined', 'Module.pthreadMainPrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['cdInitializerPrefixURL'] == 'undefined', 'Module.cdInitializerPrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['filePackagePrefixURL'] == 'undefined', 'Module.filePackagePrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['read'] == 'undefined', 'Module.read option was removed');
|
|
assert(typeof Module['readAsync'] == 'undefined', 'Module.readAsync option was removed (modify readAsync in JS)');
|
|
assert(typeof Module['readBinary'] == 'undefined', 'Module.readBinary option was removed (modify readBinary in JS)');
|
|
assert(typeof Module['setWindowTitle'] == 'undefined', 'Module.setWindowTitle option was removed (modify emscripten_set_window_title in JS)');
|
|
assert(typeof Module['TOTAL_MEMORY'] == 'undefined', 'Module.TOTAL_MEMORY has been renamed Module.INITIAL_MEMORY');
|
|
legacyModuleProp('asm', 'wasmExports');
|
|
legacyModuleProp('readAsync', 'readAsync');
|
|
legacyModuleProp('readBinary', 'readBinary');
|
|
legacyModuleProp('setWindowTitle', 'setWindowTitle');
|
|
var IDBFS = 'IDBFS is no longer included by default; build with -lidbfs.js';
|
|
var PROXYFS = 'PROXYFS is no longer included by default; build with -lproxyfs.js';
|
|
var WORKERFS = 'WORKERFS is no longer included by default; build with -lworkerfs.js';
|
|
var FETCHFS = 'FETCHFS is no longer included by default; build with -lfetchfs.js';
|
|
var ICASEFS = 'ICASEFS is no longer included by default; build with -licasefs.js';
|
|
var JSFILEFS = 'JSFILEFS is no longer included by default; build with -ljsfilefs.js';
|
|
var OPFS = 'OPFS is no longer included by default; build with -lopfs.js';
|
|
|
|
var NODEFS = 'NODEFS is no longer included by default; build with -lnodefs.js';
|
|
|
|
assert(!ENVIRONMENT_IS_SHELL, 'shell environment detected but not enabled at build time. Add `shell` to `-sENVIRONMENT` to enable.');
|
|
|
|
// end include: shell.js
|
|
|
|
// include: preamble.js
|
|
// === Preamble library stuff ===
|
|
|
|
// Documentation for the public APIs defined in this file must be updated in:
|
|
// site/source/docs/api_reference/preamble.js.rst
|
|
// A prebuilt local version of the documentation is available at:
|
|
// site/build/text/docs/api_reference/preamble.js.txt
|
|
// You can also build docs locally as HTML or other formats in site/
|
|
// An online HTML version (which may be of a different version of Emscripten)
|
|
// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html
|
|
|
|
var wasmBinary = Module['wasmBinary'];legacyModuleProp('wasmBinary', 'wasmBinary');
|
|
|
|
if (typeof WebAssembly != 'object') {
|
|
err('no native wasm support detected');
|
|
}
|
|
|
|
// Wasm globals
|
|
|
|
var wasmMemory;
|
|
|
|
//========================================
|
|
// Runtime essentials
|
|
//========================================
|
|
|
|
// whether we are quitting the application. no code should run after this.
|
|
// set in exit() and abort()
|
|
var ABORT = false;
|
|
|
|
// set by exit() and abort(). Passed to 'onExit' handler.
|
|
// NOTE: This is also used as the process return code code in shell environments
|
|
// but only when noExitRuntime is false.
|
|
var EXITSTATUS;
|
|
|
|
// In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we
|
|
// don't define it at all in release modes. This matches the behaviour of
|
|
// MINIMAL_RUNTIME.
|
|
// TODO(sbc): Make this the default even without STRICT enabled.
|
|
/** @type {function(*, string=)} */
|
|
function assert(condition, text) {
|
|
if (!condition) {
|
|
abort('Assertion failed' + (text ? ': ' + text : ''));
|
|
}
|
|
}
|
|
|
|
// We used to include malloc/free by default in the past. Show a helpful error in
|
|
// builds with assertions.
|
|
function _malloc() {
|
|
abort('malloc() called but not included in the build - add `_malloc` to EXPORTED_FUNCTIONS');
|
|
}
|
|
function _free() {
|
|
// Show a helpful error since we used to include free by default in the past.
|
|
abort('free() called but not included in the build - add `_free` to EXPORTED_FUNCTIONS');
|
|
}
|
|
|
|
// Memory management
|
|
|
|
var HEAP,
|
|
/** @type {!Int8Array} */
|
|
HEAP8,
|
|
/** @type {!Uint8Array} */
|
|
HEAPU8,
|
|
/** @type {!Int16Array} */
|
|
HEAP16,
|
|
/** @type {!Uint16Array} */
|
|
HEAPU16,
|
|
/** @type {!Int32Array} */
|
|
HEAP32,
|
|
/** @type {!Uint32Array} */
|
|
HEAPU32,
|
|
/** @type {!Float32Array} */
|
|
HEAPF32,
|
|
/* BigInt64Array type is not correctly defined in closure
|
|
/** not-@type {!BigInt64Array} */
|
|
HEAP64,
|
|
/* BigUint64Array type is not correctly defined in closure
|
|
/** not-t@type {!BigUint64Array} */
|
|
HEAPU64,
|
|
/** @type {!Float64Array} */
|
|
HEAPF64;
|
|
|
|
var runtimeInitialized = false;
|
|
|
|
/**
|
|
* Indicates whether filename is delivered via file protocol (as opposed to http/https)
|
|
* @noinline
|
|
*/
|
|
var isFileURI = (filename) => filename.startsWith('file://');
|
|
|
|
// include: runtime_shared.js
|
|
// include: runtime_stack_check.js
|
|
// Initializes the stack cookie. Called at the startup of main and at the startup of each thread in pthreads mode.
|
|
function writeStackCookie() {
|
|
var max = _emscripten_stack_get_end();
|
|
assert((max & 3) == 0);
|
|
// If the stack ends at address zero we write our cookies 4 bytes into the
|
|
// stack. This prevents interference with SAFE_HEAP and ASAN which also
|
|
// monitor writes to address zero.
|
|
if (max == 0) {
|
|
max += 4;
|
|
}
|
|
// The stack grow downwards towards _emscripten_stack_get_end.
|
|
// We write cookies to the final two words in the stack and detect if they are
|
|
// ever overwritten.
|
|
HEAPU32[((max)>>2)] = 0x02135467;
|
|
HEAPU32[(((max)+(4))>>2)] = 0x89BACDFE;
|
|
// Also test the global address 0 for integrity.
|
|
HEAPU32[((0)>>2)] = 1668509029;
|
|
}
|
|
|
|
function checkStackCookie() {
|
|
if (ABORT) return;
|
|
var max = _emscripten_stack_get_end();
|
|
// See writeStackCookie().
|
|
if (max == 0) {
|
|
max += 4;
|
|
}
|
|
var cookie1 = HEAPU32[((max)>>2)];
|
|
var cookie2 = HEAPU32[(((max)+(4))>>2)];
|
|
if (cookie1 != 0x02135467 || cookie2 != 0x89BACDFE) {
|
|
abort(`Stack overflow! Stack cookie has been overwritten at ${ptrToString(max)}, expected hex dwords 0x89BACDFE and 0x2135467, but received ${ptrToString(cookie2)} ${ptrToString(cookie1)}`);
|
|
}
|
|
// Also test the global address 0 for integrity.
|
|
if (HEAPU32[((0)>>2)] != 0x63736d65 /* 'emsc' */) {
|
|
abort('Runtime error: The application has corrupted its heap memory area (address zero)!');
|
|
}
|
|
}
|
|
// end include: runtime_stack_check.js
|
|
// include: runtime_exceptions.js
|
|
// end include: runtime_exceptions.js
|
|
// include: runtime_debug.js
|
|
// Endianness check
|
|
(() => {
|
|
var h16 = new Int16Array(1);
|
|
var h8 = new Int8Array(h16.buffer);
|
|
h16[0] = 0x6373;
|
|
if (h8[0] !== 0x73 || h8[1] !== 0x63) throw 'Runtime error: expected the system to be little-endian! (Run with -sSUPPORT_BIG_ENDIAN to bypass)';
|
|
})();
|
|
|
|
if (Module['ENVIRONMENT']) {
|
|
throw new Error('Module.ENVIRONMENT has been deprecated. To force the environment, use the ENVIRONMENT compile-time option (for example, -sENVIRONMENT=web or -sENVIRONMENT=node)');
|
|
}
|
|
|
|
function legacyModuleProp(prop, newName, incoming=true) {
|
|
if (!Object.getOwnPropertyDescriptor(Module, prop)) {
|
|
Object.defineProperty(Module, prop, {
|
|
configurable: true,
|
|
get() {
|
|
let extra = incoming ? ' (the initial value can be provided on Module, but after startup the value is only looked for on a local variable of that name)' : '';
|
|
abort(`\`Module.${prop}\` has been replaced by \`${newName}\`` + extra);
|
|
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function consumedModuleProp(prop) {
|
|
if (!Object.getOwnPropertyDescriptor(Module, prop)) {
|
|
Object.defineProperty(Module, prop, {
|
|
configurable: true,
|
|
set() {
|
|
abort(`Attempt to set \`Module.${prop}\` after it has already been processed. This can happen, for example, when code is injected via '--post-js' rather than '--pre-js'`);
|
|
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function ignoredModuleProp(prop) {
|
|
if (Object.getOwnPropertyDescriptor(Module, prop)) {
|
|
abort(`\`Module.${prop}\` was supplied but \`${prop}\` not included in INCOMING_MODULE_JS_API`);
|
|
}
|
|
}
|
|
|
|
// forcing the filesystem exports a few things by default
|
|
function isExportedByForceFilesystem(name) {
|
|
return name === 'FS_createPath' ||
|
|
name === 'FS_createDataFile' ||
|
|
name === 'FS_createPreloadedFile' ||
|
|
name === 'FS_unlink' ||
|
|
name === 'addRunDependency' ||
|
|
// The old FS has some functionality that WasmFS lacks.
|
|
name === 'FS_createLazyFile' ||
|
|
name === 'FS_createDevice' ||
|
|
name === 'removeRunDependency';
|
|
}
|
|
|
|
/**
|
|
* Intercept access to a global symbol. This enables us to give informative
|
|
* warnings/errors when folks attempt to use symbols they did not include in
|
|
* their build, or no symbols that no longer exist.
|
|
*/
|
|
function hookGlobalSymbolAccess(sym, func) {
|
|
if (typeof globalThis != 'undefined' && !Object.getOwnPropertyDescriptor(globalThis, sym)) {
|
|
Object.defineProperty(globalThis, sym, {
|
|
configurable: true,
|
|
get() {
|
|
func();
|
|
return undefined;
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function missingGlobal(sym, msg) {
|
|
hookGlobalSymbolAccess(sym, () => {
|
|
warnOnce(`\`${sym}\` is not longer defined by emscripten. ${msg}`);
|
|
});
|
|
}
|
|
|
|
missingGlobal('buffer', 'Please use HEAP8.buffer or wasmMemory.buffer');
|
|
missingGlobal('asm', 'Please use wasmExports instead');
|
|
|
|
function missingLibrarySymbol(sym) {
|
|
hookGlobalSymbolAccess(sym, () => {
|
|
// Can't `abort()` here because it would break code that does runtime
|
|
// checks. e.g. `if (typeof SDL === 'undefined')`.
|
|
var msg = `\`${sym}\` is a library symbol and not included by default; add it to your library.js __deps or to DEFAULT_LIBRARY_FUNCS_TO_INCLUDE on the command line`;
|
|
// DEFAULT_LIBRARY_FUNCS_TO_INCLUDE requires the name as it appears in
|
|
// library.js, which means $name for a JS name with no prefix, or name
|
|
// for a JS name like _name.
|
|
var librarySymbol = sym;
|
|
if (!librarySymbol.startsWith('_')) {
|
|
librarySymbol = '$' + sym;
|
|
}
|
|
msg += ` (e.g. -sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE='${librarySymbol}')`;
|
|
if (isExportedByForceFilesystem(sym)) {
|
|
msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you';
|
|
}
|
|
warnOnce(msg);
|
|
});
|
|
|
|
// Any symbol that is not included from the JS library is also (by definition)
|
|
// not exported on the Module object.
|
|
unexportedRuntimeSymbol(sym);
|
|
}
|
|
|
|
function unexportedRuntimeSymbol(sym) {
|
|
if (!Object.getOwnPropertyDescriptor(Module, sym)) {
|
|
Object.defineProperty(Module, sym, {
|
|
configurable: true,
|
|
get() {
|
|
var msg = `'${sym}' was not exported. add it to EXPORTED_RUNTIME_METHODS (see the Emscripten FAQ)`;
|
|
if (isExportedByForceFilesystem(sym)) {
|
|
msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you';
|
|
}
|
|
abort(msg);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
var runtimeDebug = true; // Switch to false at runtime to disable logging at the right times
|
|
|
|
// Used by XXXXX_DEBUG settings to output debug messages.
|
|
function dbg(...args) {
|
|
if (!runtimeDebug && typeof runtimeDebug != 'undefined') return;
|
|
// TODO(sbc): Make this configurable somehow. Its not always convenient for
|
|
// logging to show up as warnings.
|
|
console.warn(...args);
|
|
}
|
|
// end include: runtime_debug.js
|
|
// include: memoryprofiler.js
|
|
// end include: memoryprofiler.js
|
|
|
|
|
|
function updateMemoryViews() {
|
|
var b = wasmMemory.buffer;
|
|
Module['HEAP8'] = HEAP8 = new Int8Array(b);
|
|
Module['HEAP16'] = HEAP16 = new Int16Array(b);
|
|
Module['HEAPU8'] = HEAPU8 = new Uint8Array(b);
|
|
Module['HEAPU16'] = HEAPU16 = new Uint16Array(b);
|
|
Module['HEAP32'] = HEAP32 = new Int32Array(b);
|
|
Module['HEAPU32'] = HEAPU32 = new Uint32Array(b);
|
|
Module['HEAPF32'] = HEAPF32 = new Float32Array(b);
|
|
Module['HEAPF64'] = HEAPF64 = new Float64Array(b);
|
|
Module['HEAP64'] = HEAP64 = new BigInt64Array(b);
|
|
Module['HEAPU64'] = HEAPU64 = new BigUint64Array(b);
|
|
}
|
|
|
|
// end include: runtime_shared.js
|
|
assert(!Module['STACK_SIZE'], 'STACK_SIZE can no longer be set at runtime. Use -sSTACK_SIZE at link time')
|
|
|
|
assert(typeof Int32Array != 'undefined' && typeof Float64Array !== 'undefined' && Int32Array.prototype.subarray != undefined && Int32Array.prototype.set != undefined,
|
|
'JS engine does not provide full typed array support');
|
|
|
|
// If memory is defined in wasm, the user can't provide it, or set INITIAL_MEMORY
|
|
assert(!Module['wasmMemory'], 'Use of `wasmMemory` detected. Use -sIMPORTED_MEMORY to define wasmMemory externally');
|
|
assert(!Module['INITIAL_MEMORY'], 'Detected runtime INITIAL_MEMORY setting. Use -sIMPORTED_MEMORY to define wasmMemory dynamically');
|
|
|
|
function preRun() {
|
|
if (Module['preRun']) {
|
|
if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
|
|
while (Module['preRun'].length) {
|
|
addOnPreRun(Module['preRun'].shift());
|
|
}
|
|
}
|
|
consumedModuleProp('preRun');
|
|
callRuntimeCallbacks(onPreRuns);
|
|
}
|
|
|
|
function initRuntime() {
|
|
assert(!runtimeInitialized);
|
|
runtimeInitialized = true;
|
|
|
|
checkStackCookie();
|
|
|
|
if (!Module['noFSInit'] && !FS.initialized) FS.init();
|
|
TTY.init();
|
|
|
|
wasmExports['__wasm_call_ctors']();
|
|
|
|
FS.ignorePermissions = false;
|
|
}
|
|
|
|
function preMain() {
|
|
checkStackCookie();
|
|
|
|
}
|
|
|
|
function postRun() {
|
|
checkStackCookie();
|
|
|
|
if (Module['postRun']) {
|
|
if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
|
|
while (Module['postRun'].length) {
|
|
addOnPostRun(Module['postRun'].shift());
|
|
}
|
|
}
|
|
consumedModuleProp('postRun');
|
|
|
|
callRuntimeCallbacks(onPostRuns);
|
|
}
|
|
|
|
// A counter of dependencies for calling run(). If we need to
|
|
// do asynchronous work before running, increment this and
|
|
// decrement it. Incrementing must happen in a place like
|
|
// Module.preRun (used by emcc to add file preloading).
|
|
// Note that you can add dependencies in preRun, even though
|
|
// it happens right before run - run will be postponed until
|
|
// the dependencies are met.
|
|
var runDependencies = 0;
|
|
var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
|
|
var runDependencyTracking = {};
|
|
var runDependencyWatcher = null;
|
|
|
|
function getUniqueRunDependency(id) {
|
|
var orig = id;
|
|
while (1) {
|
|
if (!runDependencyTracking[id]) return id;
|
|
id = orig + Math.random();
|
|
}
|
|
}
|
|
|
|
function addRunDependency(id) {
|
|
runDependencies++;
|
|
|
|
Module['monitorRunDependencies']?.(runDependencies);
|
|
|
|
if (id) {
|
|
assert(!runDependencyTracking[id]);
|
|
runDependencyTracking[id] = 1;
|
|
if (runDependencyWatcher === null && typeof setInterval != 'undefined') {
|
|
// Check for missing dependencies every few seconds
|
|
runDependencyWatcher = setInterval(() => {
|
|
if (ABORT) {
|
|
clearInterval(runDependencyWatcher);
|
|
runDependencyWatcher = null;
|
|
return;
|
|
}
|
|
var shown = false;
|
|
for (var dep in runDependencyTracking) {
|
|
if (!shown) {
|
|
shown = true;
|
|
err('still waiting on run dependencies:');
|
|
}
|
|
err(`dependency: ${dep}`);
|
|
}
|
|
if (shown) {
|
|
err('(end of list)');
|
|
}
|
|
}, 10000);
|
|
}
|
|
} else {
|
|
err('warning: run dependency added without ID');
|
|
}
|
|
}
|
|
|
|
function removeRunDependency(id) {
|
|
runDependencies--;
|
|
|
|
Module['monitorRunDependencies']?.(runDependencies);
|
|
|
|
if (id) {
|
|
assert(runDependencyTracking[id]);
|
|
delete runDependencyTracking[id];
|
|
} else {
|
|
err('warning: run dependency removed without ID');
|
|
}
|
|
if (runDependencies == 0) {
|
|
if (runDependencyWatcher !== null) {
|
|
clearInterval(runDependencyWatcher);
|
|
runDependencyWatcher = null;
|
|
}
|
|
if (dependenciesFulfilled) {
|
|
var callback = dependenciesFulfilled;
|
|
dependenciesFulfilled = null;
|
|
callback(); // can add another dependenciesFulfilled
|
|
}
|
|
}
|
|
}
|
|
|
|
/** @param {string|number=} what */
|
|
function abort(what) {
|
|
Module['onAbort']?.(what);
|
|
|
|
what = 'Aborted(' + what + ')';
|
|
// TODO(sbc): Should we remove printing and leave it up to whoever
|
|
// catches the exception?
|
|
err(what);
|
|
|
|
ABORT = true;
|
|
|
|
// Use a wasm runtime error, because a JS error might be seen as a foreign
|
|
// exception, which means we'd run destructors on it. We need the error to
|
|
// simply make the program stop.
|
|
// FIXME This approach does not work in Wasm EH because it currently does not assume
|
|
// all RuntimeErrors are from traps; it decides whether a RuntimeError is from
|
|
// a trap or not based on a hidden field within the object. So at the moment
|
|
// we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that
|
|
// allows this in the wasm spec.
|
|
|
|
// Suppress closure compiler warning here. Closure compiler's builtin extern
|
|
// definition for WebAssembly.RuntimeError claims it takes no arguments even
|
|
// though it can.
|
|
// TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed.
|
|
/** @suppress {checkTypes} */
|
|
var e = new WebAssembly.RuntimeError(what);
|
|
|
|
// Throw the error whether or not MODULARIZE is set because abort is used
|
|
// in code paths apart from instantiation where an exception is expected
|
|
// to be thrown when abort is called.
|
|
throw e;
|
|
}
|
|
|
|
function createExportWrapper(name, nargs) {
|
|
return (...args) => {
|
|
assert(runtimeInitialized, `native function \`${name}\` called before runtime initialization`);
|
|
var f = wasmExports[name];
|
|
assert(f, `exported native function \`${name}\` not found`);
|
|
// Only assert for too many arguments. Too few can be valid since the missing arguments will be zero filled.
|
|
assert(args.length <= nargs, `native function \`${name}\` called with ${args.length} args but expects ${nargs}`);
|
|
return f(...args);
|
|
};
|
|
}
|
|
|
|
var wasmBinaryFile;
|
|
|
|
function findWasmBinary() {
|
|
return locateFile('minigzip.wasm');
|
|
}
|
|
|
|
function getBinarySync(file) {
|
|
if (file == wasmBinaryFile && wasmBinary) {
|
|
return new Uint8Array(wasmBinary);
|
|
}
|
|
if (readBinary) {
|
|
return readBinary(file);
|
|
}
|
|
throw 'both async and sync fetching of the wasm failed';
|
|
}
|
|
|
|
async function getWasmBinary(binaryFile) {
|
|
// If we don't have the binary yet, load it asynchronously using readAsync.
|
|
if (!wasmBinary) {
|
|
// Fetch the binary using readAsync
|
|
try {
|
|
var response = await readAsync(binaryFile);
|
|
return new Uint8Array(response);
|
|
} catch {
|
|
// Fall back to getBinarySync below;
|
|
}
|
|
}
|
|
|
|
// Otherwise, getBinarySync should be able to get it synchronously
|
|
return getBinarySync(binaryFile);
|
|
}
|
|
|
|
async function instantiateArrayBuffer(binaryFile, imports) {
|
|
try {
|
|
var binary = await getWasmBinary(binaryFile);
|
|
var instance = await WebAssembly.instantiate(binary, imports);
|
|
return instance;
|
|
} catch (reason) {
|
|
err(`failed to asynchronously prepare wasm: ${reason}`);
|
|
|
|
// Warn on some common problems.
|
|
if (isFileURI(wasmBinaryFile)) {
|
|
err(`warning: Loading from a file URI (${wasmBinaryFile}) is not supported in most browsers. See https://emscripten.org/docs/getting_started/FAQ.html#how-do-i-run-a-local-webserver-for-testing-why-does-my-program-stall-in-downloading-or-preparing`);
|
|
}
|
|
abort(reason);
|
|
}
|
|
}
|
|
|
|
async function instantiateAsync(binary, binaryFile, imports) {
|
|
if (!binary && typeof WebAssembly.instantiateStreaming == 'function'
|
|
// Don't use streaming for file:// delivered objects in a webview, fetch them synchronously.
|
|
&& !isFileURI(binaryFile)
|
|
// Avoid instantiateStreaming() on Node.js environment for now, as while
|
|
// Node.js v18.1.0 implements it, it does not have a full fetch()
|
|
// implementation yet.
|
|
//
|
|
// Reference:
|
|
// https://github.com/emscripten-core/emscripten/pull/16917
|
|
&& !ENVIRONMENT_IS_NODE
|
|
) {
|
|
try {
|
|
var response = fetch(binaryFile, { credentials: 'same-origin' });
|
|
var instantiationResult = await WebAssembly.instantiateStreaming(response, imports);
|
|
return instantiationResult;
|
|
} catch (reason) {
|
|
// We expect the most common failure cause to be a bad MIME type for the binary,
|
|
// in which case falling back to ArrayBuffer instantiation should work.
|
|
err(`wasm streaming compile failed: ${reason}`);
|
|
err('falling back to ArrayBuffer instantiation');
|
|
// fall back of instantiateArrayBuffer below
|
|
};
|
|
}
|
|
return instantiateArrayBuffer(binaryFile, imports);
|
|
}
|
|
|
|
function getWasmImports() {
|
|
// prepare imports
|
|
return {
|
|
'env': wasmImports,
|
|
'wasi_snapshot_preview1': wasmImports,
|
|
}
|
|
}
|
|
|
|
// Create the wasm instance.
|
|
// Receives the wasm imports, returns the exports.
|
|
async function createWasm() {
|
|
// Load the wasm module and create an instance of using native support in the JS engine.
|
|
// handle a generated wasm instance, receiving its exports and
|
|
// performing other necessary setup
|
|
/** @param {WebAssembly.Module=} module*/
|
|
function receiveInstance(instance, module) {
|
|
wasmExports = instance.exports;
|
|
|
|
|
|
|
|
wasmMemory = wasmExports['memory'];
|
|
|
|
assert(wasmMemory, 'memory not found in wasm exports');
|
|
updateMemoryViews();
|
|
|
|
removeRunDependency('wasm-instantiate');
|
|
return wasmExports;
|
|
}
|
|
// wait for the pthread pool (if any)
|
|
addRunDependency('wasm-instantiate');
|
|
|
|
// Prefer streaming instantiation if available.
|
|
// Async compilation can be confusing when an error on the page overwrites Module
|
|
// (for example, if the order of elements is wrong, and the one defining Module is
|
|
// later), so we save Module and check it later.
|
|
var trueModule = Module;
|
|
function receiveInstantiationResult(result) {
|
|
// 'result' is a ResultObject object which has both the module and instance.
|
|
// receiveInstance() will swap in the exports (to Module.asm) so they can be called
|
|
assert(Module === trueModule, 'the Module object should not be replaced during async compilation - perhaps the order of HTML elements is wrong?');
|
|
trueModule = null;
|
|
// TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line.
|
|
// When the regression is fixed, can restore the above PTHREADS-enabled path.
|
|
return receiveInstance(result['instance']);
|
|
}
|
|
|
|
var info = getWasmImports();
|
|
|
|
// User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback
|
|
// to manually instantiate the Wasm module themselves. This allows pages to
|
|
// run the instantiation parallel to any other async startup actions they are
|
|
// performing.
|
|
// Also pthreads and wasm workers initialize the wasm instance through this
|
|
// path.
|
|
if (Module['instantiateWasm']) {
|
|
return new Promise((resolve, reject) => {
|
|
try {
|
|
Module['instantiateWasm'](info, (mod, inst) => {
|
|
receiveInstance(mod, inst);
|
|
resolve(mod.exports);
|
|
});
|
|
} catch(e) {
|
|
err(`Module.instantiateWasm callback failed with error: ${e}`);
|
|
reject(e);
|
|
}
|
|
});
|
|
}
|
|
|
|
wasmBinaryFile ??= findWasmBinary();
|
|
var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info);
|
|
var exports = receiveInstantiationResult(result);
|
|
return exports;
|
|
}
|
|
|
|
// end include: preamble.js
|
|
|
|
// Begin JS library code
|
|
|
|
|
|
class ExitStatus {
|
|
name = 'ExitStatus';
|
|
constructor(status) {
|
|
this.message = `Program terminated with exit(${status})`;
|
|
this.status = status;
|
|
}
|
|
}
|
|
|
|
var callRuntimeCallbacks = (callbacks) => {
|
|
while (callbacks.length > 0) {
|
|
// Pass the module as the first argument.
|
|
callbacks.shift()(Module);
|
|
}
|
|
};
|
|
var onPostRuns = [];
|
|
var addOnPostRun = (cb) => onPostRuns.unshift(cb);
|
|
|
|
var onPreRuns = [];
|
|
var addOnPreRun = (cb) => onPreRuns.unshift(cb);
|
|
|
|
|
|
|
|
/**
|
|
* @param {number} ptr
|
|
* @param {string} type
|
|
*/
|
|
function getValue(ptr, type = 'i8') {
|
|
if (type.endsWith('*')) type = '*';
|
|
switch (type) {
|
|
case 'i1': return HEAP8[ptr];
|
|
case 'i8': return HEAP8[ptr];
|
|
case 'i16': return HEAP16[((ptr)>>1)];
|
|
case 'i32': return HEAP32[((ptr)>>2)];
|
|
case 'i64': return HEAP64[((ptr)>>3)];
|
|
case 'float': return HEAPF32[((ptr)>>2)];
|
|
case 'double': return HEAPF64[((ptr)>>3)];
|
|
case '*': return HEAPU32[((ptr)>>2)];
|
|
default: abort(`invalid type for getValue: ${type}`);
|
|
}
|
|
}
|
|
|
|
var noExitRuntime = Module['noExitRuntime'] || true;
|
|
|
|
var ptrToString = (ptr) => {
|
|
assert(typeof ptr === 'number');
|
|
// With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned.
|
|
ptr >>>= 0;
|
|
return '0x' + ptr.toString(16).padStart(8, '0');
|
|
};
|
|
|
|
|
|
/**
|
|
* @param {number} ptr
|
|
* @param {number} value
|
|
* @param {string} type
|
|
*/
|
|
function setValue(ptr, value, type = 'i8') {
|
|
if (type.endsWith('*')) type = '*';
|
|
switch (type) {
|
|
case 'i1': HEAP8[ptr] = value; break;
|
|
case 'i8': HEAP8[ptr] = value; break;
|
|
case 'i16': HEAP16[((ptr)>>1)] = value; break;
|
|
case 'i32': HEAP32[((ptr)>>2)] = value; break;
|
|
case 'i64': HEAP64[((ptr)>>3)] = BigInt(value); break;
|
|
case 'float': HEAPF32[((ptr)>>2)] = value; break;
|
|
case 'double': HEAPF64[((ptr)>>3)] = value; break;
|
|
case '*': HEAPU32[((ptr)>>2)] = value; break;
|
|
default: abort(`invalid type for setValue: ${type}`);
|
|
}
|
|
}
|
|
|
|
var stackRestore = (val) => __emscripten_stack_restore(val);
|
|
|
|
var stackSave = () => _emscripten_stack_get_current();
|
|
|
|
var warnOnce = (text) => {
|
|
warnOnce.shown ||= {};
|
|
if (!warnOnce.shown[text]) {
|
|
warnOnce.shown[text] = 1;
|
|
if (ENVIRONMENT_IS_NODE) text = 'warning: ' + text;
|
|
err(text);
|
|
}
|
|
};
|
|
|
|
/** @suppress {duplicate } */
|
|
var syscallGetVarargI = () => {
|
|
assert(SYSCALLS.varargs != undefined);
|
|
// the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number.
|
|
var ret = HEAP32[((+SYSCALLS.varargs)>>2)];
|
|
SYSCALLS.varargs += 4;
|
|
return ret;
|
|
};
|
|
var syscallGetVarargP = syscallGetVarargI;
|
|
|
|
|
|
var PATH = {
|
|
isAbs:(path) => path.charAt(0) === '/',
|
|
splitPath:(filename) => {
|
|
var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
|
|
return splitPathRe.exec(filename).slice(1);
|
|
},
|
|
normalizeArray:(parts, allowAboveRoot) => {
|
|
// if the path tries to go above the root, `up` ends up > 0
|
|
var up = 0;
|
|
for (var i = parts.length - 1; i >= 0; i--) {
|
|
var last = parts[i];
|
|
if (last === '.') {
|
|
parts.splice(i, 1);
|
|
} else if (last === '..') {
|
|
parts.splice(i, 1);
|
|
up++;
|
|
} else if (up) {
|
|
parts.splice(i, 1);
|
|
up--;
|
|
}
|
|
}
|
|
// if the path is allowed to go above the root, restore leading ..s
|
|
if (allowAboveRoot) {
|
|
for (; up; up--) {
|
|
parts.unshift('..');
|
|
}
|
|
}
|
|
return parts;
|
|
},
|
|
normalize:(path) => {
|
|
var isAbsolute = PATH.isAbs(path),
|
|
trailingSlash = path.slice(-1) === '/';
|
|
// Normalize the path
|
|
path = PATH.normalizeArray(path.split('/').filter((p) => !!p), !isAbsolute).join('/');
|
|
if (!path && !isAbsolute) {
|
|
path = '.';
|
|
}
|
|
if (path && trailingSlash) {
|
|
path += '/';
|
|
}
|
|
return (isAbsolute ? '/' : '') + path;
|
|
},
|
|
dirname:(path) => {
|
|
var result = PATH.splitPath(path),
|
|
root = result[0],
|
|
dir = result[1];
|
|
if (!root && !dir) {
|
|
// No dirname whatsoever
|
|
return '.';
|
|
}
|
|
if (dir) {
|
|
// It has a dirname, strip trailing slash
|
|
dir = dir.slice(0, -1);
|
|
}
|
|
return root + dir;
|
|
},
|
|
basename:(path) => path && path.match(/([^\/]+|\/)\/*$/)[1],
|
|
join:(...paths) => PATH.normalize(paths.join('/')),
|
|
join2:(l, r) => PATH.normalize(l + '/' + r),
|
|
};
|
|
|
|
var initRandomFill = () => {
|
|
// This block is not needed on v19+ since crypto.getRandomValues is builtin
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
var nodeCrypto = require('crypto');
|
|
return (view) => nodeCrypto.randomFillSync(view);
|
|
}
|
|
|
|
return (view) => crypto.getRandomValues(view);
|
|
};
|
|
var randomFill = (view) => {
|
|
// Lazily init on the first invocation.
|
|
(randomFill = initRandomFill())(view);
|
|
};
|
|
|
|
|
|
|
|
var PATH_FS = {
|
|
resolve:(...args) => {
|
|
var resolvedPath = '',
|
|
resolvedAbsolute = false;
|
|
for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) {
|
|
var path = (i >= 0) ? args[i] : FS.cwd();
|
|
// Skip empty and invalid entries
|
|
if (typeof path != 'string') {
|
|
throw new TypeError('Arguments to path.resolve must be strings');
|
|
} else if (!path) {
|
|
return ''; // an invalid portion invalidates the whole thing
|
|
}
|
|
resolvedPath = path + '/' + resolvedPath;
|
|
resolvedAbsolute = PATH.isAbs(path);
|
|
}
|
|
// At this point the path should be resolved to a full absolute path, but
|
|
// handle relative paths to be safe (might happen when process.cwd() fails)
|
|
resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter((p) => !!p), !resolvedAbsolute).join('/');
|
|
return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
|
|
},
|
|
relative:(from, to) => {
|
|
from = PATH_FS.resolve(from).slice(1);
|
|
to = PATH_FS.resolve(to).slice(1);
|
|
function trim(arr) {
|
|
var start = 0;
|
|
for (; start < arr.length; start++) {
|
|
if (arr[start] !== '') break;
|
|
}
|
|
var end = arr.length - 1;
|
|
for (; end >= 0; end--) {
|
|
if (arr[end] !== '') break;
|
|
}
|
|
if (start > end) return [];
|
|
return arr.slice(start, end - start + 1);
|
|
}
|
|
var fromParts = trim(from.split('/'));
|
|
var toParts = trim(to.split('/'));
|
|
var length = Math.min(fromParts.length, toParts.length);
|
|
var samePartsLength = length;
|
|
for (var i = 0; i < length; i++) {
|
|
if (fromParts[i] !== toParts[i]) {
|
|
samePartsLength = i;
|
|
break;
|
|
}
|
|
}
|
|
var outputParts = [];
|
|
for (var i = samePartsLength; i < fromParts.length; i++) {
|
|
outputParts.push('..');
|
|
}
|
|
outputParts = outputParts.concat(toParts.slice(samePartsLength));
|
|
return outputParts.join('/');
|
|
},
|
|
};
|
|
|
|
|
|
var UTF8Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder() : undefined;
|
|
|
|
/**
|
|
* Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given
|
|
* array that contains uint8 values, returns a copy of that string as a
|
|
* Javascript String object.
|
|
* heapOrArray is either a regular array, or a JavaScript typed array view.
|
|
* @param {number=} idx
|
|
* @param {number=} maxBytesToRead
|
|
* @return {string}
|
|
*/
|
|
var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead = NaN) => {
|
|
var endIdx = idx + maxBytesToRead;
|
|
var endPtr = idx;
|
|
// TextDecoder needs to know the byte length in advance, it doesn't stop on
|
|
// null terminator by itself. Also, use the length info to avoid running tiny
|
|
// strings through TextDecoder, since .subarray() allocates garbage.
|
|
// (As a tiny code save trick, compare endPtr against endIdx using a negation,
|
|
// so that undefined/NaN means Infinity)
|
|
while (heapOrArray[endPtr] && !(endPtr >= endIdx)) ++endPtr;
|
|
|
|
if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) {
|
|
return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr));
|
|
}
|
|
var str = '';
|
|
// If building with TextDecoder, we have already computed the string length
|
|
// above, so test loop end condition against that
|
|
while (idx < endPtr) {
|
|
// For UTF8 byte structure, see:
|
|
// http://en.wikipedia.org/wiki/UTF-8#Description
|
|
// https://www.ietf.org/rfc/rfc2279.txt
|
|
// https://tools.ietf.org/html/rfc3629
|
|
var u0 = heapOrArray[idx++];
|
|
if (!(u0 & 0x80)) { str += String.fromCharCode(u0); continue; }
|
|
var u1 = heapOrArray[idx++] & 63;
|
|
if ((u0 & 0xE0) == 0xC0) { str += String.fromCharCode(((u0 & 31) << 6) | u1); continue; }
|
|
var u2 = heapOrArray[idx++] & 63;
|
|
if ((u0 & 0xF0) == 0xE0) {
|
|
u0 = ((u0 & 15) << 12) | (u1 << 6) | u2;
|
|
} else {
|
|
if ((u0 & 0xF8) != 0xF0) warnOnce('Invalid UTF-8 leading byte ' + ptrToString(u0) + ' encountered when deserializing a UTF-8 string in wasm memory to a JS string!');
|
|
u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63);
|
|
}
|
|
|
|
if (u0 < 0x10000) {
|
|
str += String.fromCharCode(u0);
|
|
} else {
|
|
var ch = u0 - 0x10000;
|
|
str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
|
|
}
|
|
}
|
|
return str;
|
|
};
|
|
|
|
var FS_stdin_getChar_buffer = [];
|
|
|
|
var lengthBytesUTF8 = (str) => {
|
|
var len = 0;
|
|
for (var i = 0; i < str.length; ++i) {
|
|
// Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
|
|
// unit, not a Unicode code point of the character! So decode
|
|
// UTF16->UTF32->UTF8.
|
|
// See http://unicode.org/faq/utf_bom.html#utf16-3
|
|
var c = str.charCodeAt(i); // possibly a lead surrogate
|
|
if (c <= 0x7F) {
|
|
len++;
|
|
} else if (c <= 0x7FF) {
|
|
len += 2;
|
|
} else if (c >= 0xD800 && c <= 0xDFFF) {
|
|
len += 4; ++i;
|
|
} else {
|
|
len += 3;
|
|
}
|
|
}
|
|
return len;
|
|
};
|
|
|
|
var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => {
|
|
assert(typeof str === 'string', `stringToUTF8Array expects a string (got ${typeof str})`);
|
|
// Parameter maxBytesToWrite is not optional. Negative values, 0, null,
|
|
// undefined and false each don't write out any bytes.
|
|
if (!(maxBytesToWrite > 0))
|
|
return 0;
|
|
|
|
var startIdx = outIdx;
|
|
var endIdx = outIdx + maxBytesToWrite - 1; // -1 for string null terminator.
|
|
for (var i = 0; i < str.length; ++i) {
|
|
// Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
|
|
// unit, not a Unicode code point of the character! So decode
|
|
// UTF16->UTF32->UTF8.
|
|
// See http://unicode.org/faq/utf_bom.html#utf16-3
|
|
// For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description
|
|
// and https://www.ietf.org/rfc/rfc2279.txt
|
|
// and https://tools.ietf.org/html/rfc3629
|
|
var u = str.charCodeAt(i); // possibly a lead surrogate
|
|
if (u >= 0xD800 && u <= 0xDFFF) {
|
|
var u1 = str.charCodeAt(++i);
|
|
u = 0x10000 + ((u & 0x3FF) << 10) | (u1 & 0x3FF);
|
|
}
|
|
if (u <= 0x7F) {
|
|
if (outIdx >= endIdx) break;
|
|
heap[outIdx++] = u;
|
|
} else if (u <= 0x7FF) {
|
|
if (outIdx + 1 >= endIdx) break;
|
|
heap[outIdx++] = 0xC0 | (u >> 6);
|
|
heap[outIdx++] = 0x80 | (u & 63);
|
|
} else if (u <= 0xFFFF) {
|
|
if (outIdx + 2 >= endIdx) break;
|
|
heap[outIdx++] = 0xE0 | (u >> 12);
|
|
heap[outIdx++] = 0x80 | ((u >> 6) & 63);
|
|
heap[outIdx++] = 0x80 | (u & 63);
|
|
} else {
|
|
if (outIdx + 3 >= endIdx) break;
|
|
if (u > 0x10FFFF) warnOnce('Invalid Unicode code point ' + ptrToString(u) + ' encountered when serializing a JS string to a UTF-8 string in wasm memory! (Valid unicode code points should be in range 0-0x10FFFF).');
|
|
heap[outIdx++] = 0xF0 | (u >> 18);
|
|
heap[outIdx++] = 0x80 | ((u >> 12) & 63);
|
|
heap[outIdx++] = 0x80 | ((u >> 6) & 63);
|
|
heap[outIdx++] = 0x80 | (u & 63);
|
|
}
|
|
}
|
|
// Null-terminate the pointer to the buffer.
|
|
heap[outIdx] = 0;
|
|
return outIdx - startIdx;
|
|
};
|
|
/** @type {function(string, boolean=, number=)} */
|
|
var intArrayFromString = (stringy, dontAddNull, length) => {
|
|
var len = length > 0 ? length : lengthBytesUTF8(stringy)+1;
|
|
var u8array = new Array(len);
|
|
var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length);
|
|
if (dontAddNull) u8array.length = numBytesWritten;
|
|
return u8array;
|
|
};
|
|
var FS_stdin_getChar = () => {
|
|
if (!FS_stdin_getChar_buffer.length) {
|
|
var result = null;
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
// we will read data by chunks of BUFSIZE
|
|
var BUFSIZE = 256;
|
|
var buf = Buffer.alloc(BUFSIZE);
|
|
var bytesRead = 0;
|
|
|
|
// For some reason we must suppress a closure warning here, even though
|
|
// fd definitely exists on process.stdin, and is even the proper way to
|
|
// get the fd of stdin,
|
|
// https://github.com/nodejs/help/issues/2136#issuecomment-523649904
|
|
// This started to happen after moving this logic out of library_tty.js,
|
|
// so it is related to the surrounding code in some unclear manner.
|
|
/** @suppress {missingProperties} */
|
|
var fd = process.stdin.fd;
|
|
|
|
try {
|
|
bytesRead = fs.readSync(fd, buf, 0, BUFSIZE);
|
|
} catch(e) {
|
|
// Cross-platform differences: on Windows, reading EOF throws an
|
|
// exception, but on other OSes, reading EOF returns 0. Uniformize
|
|
// behavior by treating the EOF exception to return 0.
|
|
if (e.toString().includes('EOF')) bytesRead = 0;
|
|
else throw e;
|
|
}
|
|
|
|
if (bytesRead > 0) {
|
|
result = buf.slice(0, bytesRead).toString('utf-8');
|
|
}
|
|
} else
|
|
if (typeof window != 'undefined' &&
|
|
typeof window.prompt == 'function') {
|
|
// Browser.
|
|
result = window.prompt('Input: '); // returns null on cancel
|
|
if (result !== null) {
|
|
result += '\n';
|
|
}
|
|
} else
|
|
{}
|
|
if (!result) {
|
|
return null;
|
|
}
|
|
FS_stdin_getChar_buffer = intArrayFromString(result, true);
|
|
}
|
|
return FS_stdin_getChar_buffer.shift();
|
|
};
|
|
var TTY = {
|
|
ttys:[],
|
|
init() {
|
|
// https://github.com/emscripten-core/emscripten/pull/1555
|
|
// if (ENVIRONMENT_IS_NODE) {
|
|
// // currently, FS.init does not distinguish if process.stdin is a file or TTY
|
|
// // device, it always assumes it's a TTY device. because of this, we're forcing
|
|
// // process.stdin to UTF8 encoding to at least make stdin reading compatible
|
|
// // with text files until FS.init can be refactored.
|
|
// process.stdin.setEncoding('utf8');
|
|
// }
|
|
},
|
|
shutdown() {
|
|
// https://github.com/emscripten-core/emscripten/pull/1555
|
|
// if (ENVIRONMENT_IS_NODE) {
|
|
// // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
|
|
// // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
|
|
// // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
|
|
// // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
|
|
// // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
|
|
// process.stdin.pause();
|
|
// }
|
|
},
|
|
register(dev, ops) {
|
|
TTY.ttys[dev] = { input: [], output: [], ops: ops };
|
|
FS.registerDevice(dev, TTY.stream_ops);
|
|
},
|
|
stream_ops:{
|
|
open(stream) {
|
|
var tty = TTY.ttys[stream.node.rdev];
|
|
if (!tty) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
stream.tty = tty;
|
|
stream.seekable = false;
|
|
},
|
|
close(stream) {
|
|
// flush any pending line data
|
|
stream.tty.ops.fsync(stream.tty);
|
|
},
|
|
fsync(stream) {
|
|
stream.tty.ops.fsync(stream.tty);
|
|
},
|
|
read(stream, buffer, offset, length, pos /* ignored */) {
|
|
if (!stream.tty || !stream.tty.ops.get_char) {
|
|
throw new FS.ErrnoError(60);
|
|
}
|
|
var bytesRead = 0;
|
|
for (var i = 0; i < length; i++) {
|
|
var result;
|
|
try {
|
|
result = stream.tty.ops.get_char(stream.tty);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (result === undefined && bytesRead === 0) {
|
|
throw new FS.ErrnoError(6);
|
|
}
|
|
if (result === null || result === undefined) break;
|
|
bytesRead++;
|
|
buffer[offset+i] = result;
|
|
}
|
|
if (bytesRead) {
|
|
stream.node.atime = Date.now();
|
|
}
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, pos) {
|
|
if (!stream.tty || !stream.tty.ops.put_char) {
|
|
throw new FS.ErrnoError(60);
|
|
}
|
|
try {
|
|
for (var i = 0; i < length; i++) {
|
|
stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
|
|
}
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (length) {
|
|
stream.node.mtime = stream.node.ctime = Date.now();
|
|
}
|
|
return i;
|
|
},
|
|
},
|
|
default_tty_ops:{
|
|
get_char(tty) {
|
|
return FS_stdin_getChar();
|
|
},
|
|
put_char(tty, val) {
|
|
if (val === null || val === 10) {
|
|
out(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
} else {
|
|
if (val != 0) tty.output.push(val); // val == 0 would cut text output off in the middle.
|
|
}
|
|
},
|
|
fsync(tty) {
|
|
if (tty.output?.length > 0) {
|
|
out(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
}
|
|
},
|
|
ioctl_tcgets(tty) {
|
|
// typical setting
|
|
return {
|
|
c_iflag: 25856,
|
|
c_oflag: 5,
|
|
c_cflag: 191,
|
|
c_lflag: 35387,
|
|
c_cc: [
|
|
0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00,
|
|
0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
]
|
|
};
|
|
},
|
|
ioctl_tcsets(tty, optional_actions, data) {
|
|
// currently just ignore
|
|
return 0;
|
|
},
|
|
ioctl_tiocgwinsz(tty) {
|
|
return [24, 80];
|
|
},
|
|
},
|
|
default_tty1_ops:{
|
|
put_char(tty, val) {
|
|
if (val === null || val === 10) {
|
|
err(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
} else {
|
|
if (val != 0) tty.output.push(val);
|
|
}
|
|
},
|
|
fsync(tty) {
|
|
if (tty.output?.length > 0) {
|
|
err(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
}
|
|
},
|
|
},
|
|
};
|
|
|
|
|
|
var mmapAlloc = (size) => {
|
|
abort('internal error: mmapAlloc called but `emscripten_builtin_memalign` native symbol not exported');
|
|
};
|
|
var MEMFS = {
|
|
ops_table:null,
|
|
mount(mount) {
|
|
return MEMFS.createNode(null, '/', 16895, 0);
|
|
},
|
|
createNode(parent, name, mode, dev) {
|
|
if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
|
|
// no supported
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
MEMFS.ops_table ||= {
|
|
dir: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr,
|
|
lookup: MEMFS.node_ops.lookup,
|
|
mknod: MEMFS.node_ops.mknod,
|
|
rename: MEMFS.node_ops.rename,
|
|
unlink: MEMFS.node_ops.unlink,
|
|
rmdir: MEMFS.node_ops.rmdir,
|
|
readdir: MEMFS.node_ops.readdir,
|
|
symlink: MEMFS.node_ops.symlink
|
|
},
|
|
stream: {
|
|
llseek: MEMFS.stream_ops.llseek
|
|
}
|
|
},
|
|
file: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr
|
|
},
|
|
stream: {
|
|
llseek: MEMFS.stream_ops.llseek,
|
|
read: MEMFS.stream_ops.read,
|
|
write: MEMFS.stream_ops.write,
|
|
mmap: MEMFS.stream_ops.mmap,
|
|
msync: MEMFS.stream_ops.msync
|
|
}
|
|
},
|
|
link: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr,
|
|
readlink: MEMFS.node_ops.readlink
|
|
},
|
|
stream: {}
|
|
},
|
|
chrdev: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr
|
|
},
|
|
stream: FS.chrdev_stream_ops
|
|
}
|
|
};
|
|
var node = FS.createNode(parent, name, mode, dev);
|
|
if (FS.isDir(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.dir.node;
|
|
node.stream_ops = MEMFS.ops_table.dir.stream;
|
|
node.contents = {};
|
|
} else if (FS.isFile(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.file.node;
|
|
node.stream_ops = MEMFS.ops_table.file.stream;
|
|
node.usedBytes = 0; // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity.
|
|
// When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred
|
|
// for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size
|
|
// penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme.
|
|
node.contents = null;
|
|
} else if (FS.isLink(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.link.node;
|
|
node.stream_ops = MEMFS.ops_table.link.stream;
|
|
} else if (FS.isChrdev(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.chrdev.node;
|
|
node.stream_ops = MEMFS.ops_table.chrdev.stream;
|
|
}
|
|
node.atime = node.mtime = node.ctime = Date.now();
|
|
// add the new node to the parent
|
|
if (parent) {
|
|
parent.contents[name] = node;
|
|
parent.atime = parent.mtime = parent.ctime = node.atime;
|
|
}
|
|
return node;
|
|
},
|
|
getFileDataAsTypedArray(node) {
|
|
if (!node.contents) return new Uint8Array(0);
|
|
if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes.
|
|
return new Uint8Array(node.contents);
|
|
},
|
|
expandFileStorage(node, newCapacity) {
|
|
var prevCapacity = node.contents ? node.contents.length : 0;
|
|
if (prevCapacity >= newCapacity) return; // No need to expand, the storage was already large enough.
|
|
// Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity.
|
|
// For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to
|
|
// avoid overshooting the allocation cap by a very large margin.
|
|
var CAPACITY_DOUBLING_MAX = 1024 * 1024;
|
|
newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2.0 : 1.125)) >>> 0);
|
|
if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); // At minimum allocate 256b for each file when expanding.
|
|
var oldContents = node.contents;
|
|
node.contents = new Uint8Array(newCapacity); // Allocate new storage.
|
|
if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage.
|
|
},
|
|
resizeFileStorage(node, newSize) {
|
|
if (node.usedBytes == newSize) return;
|
|
if (newSize == 0) {
|
|
node.contents = null; // Fully decommit when requesting a resize to zero.
|
|
node.usedBytes = 0;
|
|
} else {
|
|
var oldContents = node.contents;
|
|
node.contents = new Uint8Array(newSize); // Allocate new storage.
|
|
if (oldContents) {
|
|
node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); // Copy old data over to the new storage.
|
|
}
|
|
node.usedBytes = newSize;
|
|
}
|
|
},
|
|
node_ops:{
|
|
getattr(node) {
|
|
var attr = {};
|
|
// device numbers reuse inode numbers.
|
|
attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
|
|
attr.ino = node.id;
|
|
attr.mode = node.mode;
|
|
attr.nlink = 1;
|
|
attr.uid = 0;
|
|
attr.gid = 0;
|
|
attr.rdev = node.rdev;
|
|
if (FS.isDir(node.mode)) {
|
|
attr.size = 4096;
|
|
} else if (FS.isFile(node.mode)) {
|
|
attr.size = node.usedBytes;
|
|
} else if (FS.isLink(node.mode)) {
|
|
attr.size = node.link.length;
|
|
} else {
|
|
attr.size = 0;
|
|
}
|
|
attr.atime = new Date(node.atime);
|
|
attr.mtime = new Date(node.mtime);
|
|
attr.ctime = new Date(node.ctime);
|
|
// NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
|
|
// but this is not required by the standard.
|
|
attr.blksize = 4096;
|
|
attr.blocks = Math.ceil(attr.size / attr.blksize);
|
|
return attr;
|
|
},
|
|
setattr(node, attr) {
|
|
for (const key of ["mode", "atime", "mtime", "ctime"]) {
|
|
if (attr[key] != null) {
|
|
node[key] = attr[key];
|
|
}
|
|
}
|
|
if (attr.size !== undefined) {
|
|
MEMFS.resizeFileStorage(node, attr.size);
|
|
}
|
|
},
|
|
lookup(parent, name) {
|
|
throw new FS.ErrnoError(44);
|
|
},
|
|
mknod(parent, name, mode, dev) {
|
|
return MEMFS.createNode(parent, name, mode, dev);
|
|
},
|
|
rename(old_node, new_dir, new_name) {
|
|
var new_node;
|
|
try {
|
|
new_node = FS.lookupNode(new_dir, new_name);
|
|
} catch (e) {}
|
|
if (new_node) {
|
|
if (FS.isDir(old_node.mode)) {
|
|
// if we're overwriting a directory at new_name, make sure it's empty.
|
|
for (var i in new_node.contents) {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
}
|
|
FS.hashRemoveNode(new_node);
|
|
}
|
|
// do the internal rewiring
|
|
delete old_node.parent.contents[old_node.name];
|
|
new_dir.contents[new_name] = old_node;
|
|
old_node.name = new_name;
|
|
new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now();
|
|
},
|
|
unlink(parent, name) {
|
|
delete parent.contents[name];
|
|
parent.ctime = parent.mtime = Date.now();
|
|
},
|
|
rmdir(parent, name) {
|
|
var node = FS.lookupNode(parent, name);
|
|
for (var i in node.contents) {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
delete parent.contents[name];
|
|
parent.ctime = parent.mtime = Date.now();
|
|
},
|
|
readdir(node) {
|
|
return ['.', '..', ...Object.keys(node.contents)];
|
|
},
|
|
symlink(parent, newname, oldpath) {
|
|
var node = MEMFS.createNode(parent, newname, 0o777 | 40960, 0);
|
|
node.link = oldpath;
|
|
return node;
|
|
},
|
|
readlink(node) {
|
|
if (!FS.isLink(node.mode)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return node.link;
|
|
},
|
|
},
|
|
stream_ops:{
|
|
read(stream, buffer, offset, length, position) {
|
|
var contents = stream.node.contents;
|
|
if (position >= stream.node.usedBytes) return 0;
|
|
var size = Math.min(stream.node.usedBytes - position, length);
|
|
assert(size >= 0);
|
|
if (size > 8 && contents.subarray) { // non-trivial, and typed array
|
|
buffer.set(contents.subarray(position, position + size), offset);
|
|
} else {
|
|
for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i];
|
|
}
|
|
return size;
|
|
},
|
|
write(stream, buffer, offset, length, position, canOwn) {
|
|
// The data buffer should be a typed array view
|
|
assert(!(buffer instanceof ArrayBuffer));
|
|
|
|
if (!length) return 0;
|
|
var node = stream.node;
|
|
node.mtime = node.ctime = Date.now();
|
|
|
|
if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array?
|
|
if (canOwn) {
|
|
assert(position === 0, 'canOwn must imply no weird position inside the file');
|
|
node.contents = buffer.subarray(offset, offset + length);
|
|
node.usedBytes = length;
|
|
return length;
|
|
} else if (node.usedBytes === 0 && position === 0) { // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data.
|
|
node.contents = buffer.slice(offset, offset + length);
|
|
node.usedBytes = length;
|
|
return length;
|
|
} else if (position + length <= node.usedBytes) { // Writing to an already allocated and used subrange of the file?
|
|
node.contents.set(buffer.subarray(offset, offset + length), position);
|
|
return length;
|
|
}
|
|
}
|
|
|
|
// Appending to an existing file and we need to reallocate, or source data did not come as a typed array.
|
|
MEMFS.expandFileStorage(node, position+length);
|
|
if (node.contents.subarray && buffer.subarray) {
|
|
// Use typed array write which is available.
|
|
node.contents.set(buffer.subarray(offset, offset + length), position);
|
|
} else {
|
|
for (var i = 0; i < length; i++) {
|
|
node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not.
|
|
}
|
|
}
|
|
node.usedBytes = Math.max(node.usedBytes, position + length);
|
|
return length;
|
|
},
|
|
llseek(stream, offset, whence) {
|
|
var position = offset;
|
|
if (whence === 1) {
|
|
position += stream.position;
|
|
} else if (whence === 2) {
|
|
if (FS.isFile(stream.node.mode)) {
|
|
position += stream.node.usedBytes;
|
|
}
|
|
}
|
|
if (position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return position;
|
|
},
|
|
mmap(stream, length, position, prot, flags) {
|
|
if (!FS.isFile(stream.node.mode)) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
var ptr;
|
|
var allocated;
|
|
var contents = stream.node.contents;
|
|
// Only make a new copy when MAP_PRIVATE is specified.
|
|
if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) {
|
|
// We can't emulate MAP_SHARED when the file is not backed by the
|
|
// buffer we're mapping to (e.g. the HEAP buffer).
|
|
allocated = false;
|
|
ptr = contents.byteOffset;
|
|
} else {
|
|
allocated = true;
|
|
ptr = mmapAlloc(length);
|
|
if (!ptr) {
|
|
throw new FS.ErrnoError(48);
|
|
}
|
|
if (contents) {
|
|
// Try to avoid unnecessary slices.
|
|
if (position > 0 || position + length < contents.length) {
|
|
if (contents.subarray) {
|
|
contents = contents.subarray(position, position + length);
|
|
} else {
|
|
contents = Array.prototype.slice.call(contents, position, position + length);
|
|
}
|
|
}
|
|
HEAP8.set(contents, ptr);
|
|
}
|
|
}
|
|
return { ptr, allocated };
|
|
},
|
|
msync(stream, buffer, offset, length, mmapFlags) {
|
|
MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false);
|
|
// should we check if bytesWritten and length are the same?
|
|
return 0;
|
|
},
|
|
},
|
|
};
|
|
|
|
var asyncLoad = async (url) => {
|
|
var arrayBuffer = await readAsync(url);
|
|
assert(arrayBuffer, `Loading data file "${url}" failed (no arrayBuffer).`);
|
|
return new Uint8Array(arrayBuffer);
|
|
};
|
|
|
|
|
|
var FS_createDataFile = (parent, name, fileData, canRead, canWrite, canOwn) => {
|
|
FS.createDataFile(parent, name, fileData, canRead, canWrite, canOwn);
|
|
};
|
|
|
|
var preloadPlugins = Module['preloadPlugins'] || [];
|
|
var FS_handledByPreloadPlugin = (byteArray, fullname, finish, onerror) => {
|
|
// Ensure plugins are ready.
|
|
if (typeof Browser != 'undefined') Browser.init();
|
|
|
|
var handled = false;
|
|
preloadPlugins.forEach((plugin) => {
|
|
if (handled) return;
|
|
if (plugin['canHandle'](fullname)) {
|
|
plugin['handle'](byteArray, fullname, finish, onerror);
|
|
handled = true;
|
|
}
|
|
});
|
|
return handled;
|
|
};
|
|
var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => {
|
|
// TODO we should allow people to just pass in a complete filename instead
|
|
// of parent and name being that we just join them anyways
|
|
var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent;
|
|
var dep = getUniqueRunDependency(`cp ${fullname}`); // might have several active requests for the same fullname
|
|
function processData(byteArray) {
|
|
function finish(byteArray) {
|
|
preFinish?.();
|
|
if (!dontCreateFile) {
|
|
FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
|
|
}
|
|
onload?.();
|
|
removeRunDependency(dep);
|
|
}
|
|
if (FS_handledByPreloadPlugin(byteArray, fullname, finish, () => {
|
|
onerror?.();
|
|
removeRunDependency(dep);
|
|
})) {
|
|
return;
|
|
}
|
|
finish(byteArray);
|
|
}
|
|
addRunDependency(dep);
|
|
if (typeof url == 'string') {
|
|
asyncLoad(url).then(processData, onerror);
|
|
} else {
|
|
processData(url);
|
|
}
|
|
};
|
|
|
|
var FS_modeStringToFlags = (str) => {
|
|
var flagModes = {
|
|
'r': 0,
|
|
'r+': 2,
|
|
'w': 512 | 64 | 1,
|
|
'w+': 512 | 64 | 2,
|
|
'a': 1024 | 64 | 1,
|
|
'a+': 1024 | 64 | 2,
|
|
};
|
|
var flags = flagModes[str];
|
|
if (typeof flags == 'undefined') {
|
|
throw new Error(`Unknown file open mode: ${str}`);
|
|
}
|
|
return flags;
|
|
};
|
|
|
|
var FS_getMode = (canRead, canWrite) => {
|
|
var mode = 0;
|
|
if (canRead) mode |= 292 | 73;
|
|
if (canWrite) mode |= 146;
|
|
return mode;
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
* Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the
|
|
* emscripten HEAP, returns a copy of that string as a Javascript String object.
|
|
*
|
|
* @param {number} ptr
|
|
* @param {number=} maxBytesToRead - An optional length that specifies the
|
|
* maximum number of bytes to read. You can omit this parameter to scan the
|
|
* string until the first 0 byte. If maxBytesToRead is passed, and the string
|
|
* at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the
|
|
* string will cut short at that byte index (i.e. maxBytesToRead will not
|
|
* produce a string of exact length [ptr, ptr+maxBytesToRead[) N.B. mixing
|
|
* frequent uses of UTF8ToString() with and without maxBytesToRead may throw
|
|
* JS JIT optimizations off, so it is worth to consider consistently using one
|
|
* @return {string}
|
|
*/
|
|
var UTF8ToString = (ptr, maxBytesToRead) => {
|
|
assert(typeof ptr == 'number', `UTF8ToString expects a number (got ${typeof ptr})`);
|
|
return ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead) : '';
|
|
};
|
|
|
|
var strError = (errno) => UTF8ToString(_strerror(errno));
|
|
|
|
var ERRNO_CODES = {
|
|
'EPERM': 63,
|
|
'ENOENT': 44,
|
|
'ESRCH': 71,
|
|
'EINTR': 27,
|
|
'EIO': 29,
|
|
'ENXIO': 60,
|
|
'E2BIG': 1,
|
|
'ENOEXEC': 45,
|
|
'EBADF': 8,
|
|
'ECHILD': 12,
|
|
'EAGAIN': 6,
|
|
'EWOULDBLOCK': 6,
|
|
'ENOMEM': 48,
|
|
'EACCES': 2,
|
|
'EFAULT': 21,
|
|
'ENOTBLK': 105,
|
|
'EBUSY': 10,
|
|
'EEXIST': 20,
|
|
'EXDEV': 75,
|
|
'ENODEV': 43,
|
|
'ENOTDIR': 54,
|
|
'EISDIR': 31,
|
|
'EINVAL': 28,
|
|
'ENFILE': 41,
|
|
'EMFILE': 33,
|
|
'ENOTTY': 59,
|
|
'ETXTBSY': 74,
|
|
'EFBIG': 22,
|
|
'ENOSPC': 51,
|
|
'ESPIPE': 70,
|
|
'EROFS': 69,
|
|
'EMLINK': 34,
|
|
'EPIPE': 64,
|
|
'EDOM': 18,
|
|
'ERANGE': 68,
|
|
'ENOMSG': 49,
|
|
'EIDRM': 24,
|
|
'ECHRNG': 106,
|
|
'EL2NSYNC': 156,
|
|
'EL3HLT': 107,
|
|
'EL3RST': 108,
|
|
'ELNRNG': 109,
|
|
'EUNATCH': 110,
|
|
'ENOCSI': 111,
|
|
'EL2HLT': 112,
|
|
'EDEADLK': 16,
|
|
'ENOLCK': 46,
|
|
'EBADE': 113,
|
|
'EBADR': 114,
|
|
'EXFULL': 115,
|
|
'ENOANO': 104,
|
|
'EBADRQC': 103,
|
|
'EBADSLT': 102,
|
|
'EDEADLOCK': 16,
|
|
'EBFONT': 101,
|
|
'ENOSTR': 100,
|
|
'ENODATA': 116,
|
|
'ETIME': 117,
|
|
'ENOSR': 118,
|
|
'ENONET': 119,
|
|
'ENOPKG': 120,
|
|
'EREMOTE': 121,
|
|
'ENOLINK': 47,
|
|
'EADV': 122,
|
|
'ESRMNT': 123,
|
|
'ECOMM': 124,
|
|
'EPROTO': 65,
|
|
'EMULTIHOP': 36,
|
|
'EDOTDOT': 125,
|
|
'EBADMSG': 9,
|
|
'ENOTUNIQ': 126,
|
|
'EBADFD': 127,
|
|
'EREMCHG': 128,
|
|
'ELIBACC': 129,
|
|
'ELIBBAD': 130,
|
|
'ELIBSCN': 131,
|
|
'ELIBMAX': 132,
|
|
'ELIBEXEC': 133,
|
|
'ENOSYS': 52,
|
|
'ENOTEMPTY': 55,
|
|
'ENAMETOOLONG': 37,
|
|
'ELOOP': 32,
|
|
'EOPNOTSUPP': 138,
|
|
'EPFNOSUPPORT': 139,
|
|
'ECONNRESET': 15,
|
|
'ENOBUFS': 42,
|
|
'EAFNOSUPPORT': 5,
|
|
'EPROTOTYPE': 67,
|
|
'ENOTSOCK': 57,
|
|
'ENOPROTOOPT': 50,
|
|
'ESHUTDOWN': 140,
|
|
'ECONNREFUSED': 14,
|
|
'EADDRINUSE': 3,
|
|
'ECONNABORTED': 13,
|
|
'ENETUNREACH': 40,
|
|
'ENETDOWN': 38,
|
|
'ETIMEDOUT': 73,
|
|
'EHOSTDOWN': 142,
|
|
'EHOSTUNREACH': 23,
|
|
'EINPROGRESS': 26,
|
|
'EALREADY': 7,
|
|
'EDESTADDRREQ': 17,
|
|
'EMSGSIZE': 35,
|
|
'EPROTONOSUPPORT': 66,
|
|
'ESOCKTNOSUPPORT': 137,
|
|
'EADDRNOTAVAIL': 4,
|
|
'ENETRESET': 39,
|
|
'EISCONN': 30,
|
|
'ENOTCONN': 53,
|
|
'ETOOMANYREFS': 141,
|
|
'EUSERS': 136,
|
|
'EDQUOT': 19,
|
|
'ESTALE': 72,
|
|
'ENOTSUP': 138,
|
|
'ENOMEDIUM': 148,
|
|
'EILSEQ': 25,
|
|
'EOVERFLOW': 61,
|
|
'ECANCELED': 11,
|
|
'ENOTRECOVERABLE': 56,
|
|
'EOWNERDEAD': 62,
|
|
'ESTRPIPE': 135,
|
|
};
|
|
var FS = {
|
|
root:null,
|
|
mounts:[],
|
|
devices:{
|
|
},
|
|
streams:[],
|
|
nextInode:1,
|
|
nameTable:null,
|
|
currentPath:"/",
|
|
initialized:false,
|
|
ignorePermissions:true,
|
|
filesystems:null,
|
|
syncFSRequests:0,
|
|
readFiles:{
|
|
},
|
|
ErrnoError:class extends Error {
|
|
name = 'ErrnoError';
|
|
// We set the `name` property to be able to identify `FS.ErrnoError`
|
|
// - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway.
|
|
// - when using PROXYFS, an error can come from an underlying FS
|
|
// as different FS objects have their own FS.ErrnoError each,
|
|
// the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs.
|
|
// we'll use the reliable test `err.name == "ErrnoError"` instead
|
|
constructor(errno) {
|
|
super(runtimeInitialized ? strError(errno) : '');
|
|
this.errno = errno;
|
|
for (var key in ERRNO_CODES) {
|
|
if (ERRNO_CODES[key] === errno) {
|
|
this.code = key;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
},
|
|
FSStream:class {
|
|
shared = {};
|
|
get object() {
|
|
return this.node;
|
|
}
|
|
set object(val) {
|
|
this.node = val;
|
|
}
|
|
get isRead() {
|
|
return (this.flags & 2097155) !== 1;
|
|
}
|
|
get isWrite() {
|
|
return (this.flags & 2097155) !== 0;
|
|
}
|
|
get isAppend() {
|
|
return (this.flags & 1024);
|
|
}
|
|
get flags() {
|
|
return this.shared.flags;
|
|
}
|
|
set flags(val) {
|
|
this.shared.flags = val;
|
|
}
|
|
get position() {
|
|
return this.shared.position;
|
|
}
|
|
set position(val) {
|
|
this.shared.position = val;
|
|
}
|
|
},
|
|
FSNode:class {
|
|
node_ops = {};
|
|
stream_ops = {};
|
|
readMode = 292 | 73;
|
|
writeMode = 146;
|
|
mounted = null;
|
|
constructor(parent, name, mode, rdev) {
|
|
if (!parent) {
|
|
parent = this; // root node sets parent to itself
|
|
}
|
|
this.parent = parent;
|
|
this.mount = parent.mount;
|
|
this.id = FS.nextInode++;
|
|
this.name = name;
|
|
this.mode = mode;
|
|
this.rdev = rdev;
|
|
this.atime = this.mtime = this.ctime = Date.now();
|
|
}
|
|
get read() {
|
|
return (this.mode & this.readMode) === this.readMode;
|
|
}
|
|
set read(val) {
|
|
val ? this.mode |= this.readMode : this.mode &= ~this.readMode;
|
|
}
|
|
get write() {
|
|
return (this.mode & this.writeMode) === this.writeMode;
|
|
}
|
|
set write(val) {
|
|
val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode;
|
|
}
|
|
get isFolder() {
|
|
return FS.isDir(this.mode);
|
|
}
|
|
get isDevice() {
|
|
return FS.isChrdev(this.mode);
|
|
}
|
|
},
|
|
lookupPath(path, opts = {}) {
|
|
if (!path) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
opts.follow_mount ??= true
|
|
|
|
if (!PATH.isAbs(path)) {
|
|
path = FS.cwd() + '/' + path;
|
|
}
|
|
|
|
// limit max consecutive symlinks to 40 (SYMLOOP_MAX).
|
|
linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) {
|
|
// split the absolute path
|
|
var parts = path.split('/').filter((p) => !!p);
|
|
|
|
// start at the root
|
|
var current = FS.root;
|
|
var current_path = '/';
|
|
|
|
for (var i = 0; i < parts.length; i++) {
|
|
var islast = (i === parts.length-1);
|
|
if (islast && opts.parent) {
|
|
// stop resolving
|
|
break;
|
|
}
|
|
|
|
if (parts[i] === '.') {
|
|
continue;
|
|
}
|
|
|
|
if (parts[i] === '..') {
|
|
current_path = PATH.dirname(current_path);
|
|
current = current.parent;
|
|
continue;
|
|
}
|
|
|
|
current_path = PATH.join2(current_path, parts[i]);
|
|
try {
|
|
current = FS.lookupNode(current, parts[i]);
|
|
} catch (e) {
|
|
// if noent_okay is true, suppress a ENOENT in the last component
|
|
// and return an object with an undefined node. This is needed for
|
|
// resolving symlinks in the path when creating a file.
|
|
if ((e?.errno === 44) && islast && opts.noent_okay) {
|
|
return { path: current_path };
|
|
}
|
|
throw e;
|
|
}
|
|
|
|
// jump to the mount's root node if this is a mountpoint
|
|
if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) {
|
|
current = current.mounted.root;
|
|
}
|
|
|
|
// by default, lookupPath will not follow a symlink if it is the final path component.
|
|
// setting opts.follow = true will override this behavior.
|
|
if (FS.isLink(current.mode) && (!islast || opts.follow)) {
|
|
if (!current.node_ops.readlink) {
|
|
throw new FS.ErrnoError(52);
|
|
}
|
|
var link = current.node_ops.readlink(current);
|
|
if (!PATH.isAbs(link)) {
|
|
link = PATH.dirname(current_path) + '/' + link;
|
|
}
|
|
path = link + '/' + parts.slice(i + 1).join('/');
|
|
continue linkloop;
|
|
}
|
|
}
|
|
return { path: current_path, node: current };
|
|
}
|
|
throw new FS.ErrnoError(32);
|
|
},
|
|
getPath(node) {
|
|
var path;
|
|
while (true) {
|
|
if (FS.isRoot(node)) {
|
|
var mount = node.mount.mountpoint;
|
|
if (!path) return mount;
|
|
return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path;
|
|
}
|
|
path = path ? `${node.name}/${path}` : node.name;
|
|
node = node.parent;
|
|
}
|
|
},
|
|
hashName(parentid, name) {
|
|
var hash = 0;
|
|
|
|
for (var i = 0; i < name.length; i++) {
|
|
hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
|
|
}
|
|
return ((parentid + hash) >>> 0) % FS.nameTable.length;
|
|
},
|
|
hashAddNode(node) {
|
|
var hash = FS.hashName(node.parent.id, node.name);
|
|
node.name_next = FS.nameTable[hash];
|
|
FS.nameTable[hash] = node;
|
|
},
|
|
hashRemoveNode(node) {
|
|
var hash = FS.hashName(node.parent.id, node.name);
|
|
if (FS.nameTable[hash] === node) {
|
|
FS.nameTable[hash] = node.name_next;
|
|
} else {
|
|
var current = FS.nameTable[hash];
|
|
while (current) {
|
|
if (current.name_next === node) {
|
|
current.name_next = node.name_next;
|
|
break;
|
|
}
|
|
current = current.name_next;
|
|
}
|
|
}
|
|
},
|
|
lookupNode(parent, name) {
|
|
var errCode = FS.mayLookup(parent);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
var hash = FS.hashName(parent.id, name);
|
|
for (var node = FS.nameTable[hash]; node; node = node.name_next) {
|
|
var nodeName = node.name;
|
|
if (node.parent.id === parent.id && nodeName === name) {
|
|
return node;
|
|
}
|
|
}
|
|
// if we failed to find it in the cache, call into the VFS
|
|
return FS.lookup(parent, name);
|
|
},
|
|
createNode(parent, name, mode, rdev) {
|
|
assert(typeof parent == 'object')
|
|
var node = new FS.FSNode(parent, name, mode, rdev);
|
|
|
|
FS.hashAddNode(node);
|
|
|
|
return node;
|
|
},
|
|
destroyNode(node) {
|
|
FS.hashRemoveNode(node);
|
|
},
|
|
isRoot(node) {
|
|
return node === node.parent;
|
|
},
|
|
isMountpoint(node) {
|
|
return !!node.mounted;
|
|
},
|
|
isFile(mode) {
|
|
return (mode & 61440) === 32768;
|
|
},
|
|
isDir(mode) {
|
|
return (mode & 61440) === 16384;
|
|
},
|
|
isLink(mode) {
|
|
return (mode & 61440) === 40960;
|
|
},
|
|
isChrdev(mode) {
|
|
return (mode & 61440) === 8192;
|
|
},
|
|
isBlkdev(mode) {
|
|
return (mode & 61440) === 24576;
|
|
},
|
|
isFIFO(mode) {
|
|
return (mode & 61440) === 4096;
|
|
},
|
|
isSocket(mode) {
|
|
return (mode & 49152) === 49152;
|
|
},
|
|
flagsToPermissionString(flag) {
|
|
var perms = ['r', 'w', 'rw'][flag & 3];
|
|
if ((flag & 512)) {
|
|
perms += 'w';
|
|
}
|
|
return perms;
|
|
},
|
|
nodePermissions(node, perms) {
|
|
if (FS.ignorePermissions) {
|
|
return 0;
|
|
}
|
|
// return 0 if any user, group or owner bits are set.
|
|
if (perms.includes('r') && !(node.mode & 292)) {
|
|
return 2;
|
|
} else if (perms.includes('w') && !(node.mode & 146)) {
|
|
return 2;
|
|
} else if (perms.includes('x') && !(node.mode & 73)) {
|
|
return 2;
|
|
}
|
|
return 0;
|
|
},
|
|
mayLookup(dir) {
|
|
if (!FS.isDir(dir.mode)) return 54;
|
|
var errCode = FS.nodePermissions(dir, 'x');
|
|
if (errCode) return errCode;
|
|
if (!dir.node_ops.lookup) return 2;
|
|
return 0;
|
|
},
|
|
mayCreate(dir, name) {
|
|
if (!FS.isDir(dir.mode)) {
|
|
return 54;
|
|
}
|
|
try {
|
|
var node = FS.lookupNode(dir, name);
|
|
return 20;
|
|
} catch (e) {
|
|
}
|
|
return FS.nodePermissions(dir, 'wx');
|
|
},
|
|
mayDelete(dir, name, isdir) {
|
|
var node;
|
|
try {
|
|
node = FS.lookupNode(dir, name);
|
|
} catch (e) {
|
|
return e.errno;
|
|
}
|
|
var errCode = FS.nodePermissions(dir, 'wx');
|
|
if (errCode) {
|
|
return errCode;
|
|
}
|
|
if (isdir) {
|
|
if (!FS.isDir(node.mode)) {
|
|
return 54;
|
|
}
|
|
if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
|
|
return 10;
|
|
}
|
|
} else {
|
|
if (FS.isDir(node.mode)) {
|
|
return 31;
|
|
}
|
|
}
|
|
return 0;
|
|
},
|
|
mayOpen(node, flags) {
|
|
if (!node) {
|
|
return 44;
|
|
}
|
|
if (FS.isLink(node.mode)) {
|
|
return 32;
|
|
} else if (FS.isDir(node.mode)) {
|
|
if (FS.flagsToPermissionString(flags) !== 'r' // opening for write
|
|
|| (flags & (512 | 64))) { // TODO: check for O_SEARCH? (== search for dir only)
|
|
return 31;
|
|
}
|
|
}
|
|
return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
|
|
},
|
|
checkOpExists(op, err) {
|
|
if (!op) {
|
|
throw new FS.ErrnoError(err);
|
|
}
|
|
return op;
|
|
},
|
|
MAX_OPEN_FDS:4096,
|
|
nextfd() {
|
|
for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) {
|
|
if (!FS.streams[fd]) {
|
|
return fd;
|
|
}
|
|
}
|
|
throw new FS.ErrnoError(33);
|
|
},
|
|
getStreamChecked(fd) {
|
|
var stream = FS.getStream(fd);
|
|
if (!stream) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
return stream;
|
|
},
|
|
getStream:(fd) => FS.streams[fd],
|
|
createStream(stream, fd = -1) {
|
|
assert(fd >= -1);
|
|
|
|
// clone it, so we can return an instance of FSStream
|
|
stream = Object.assign(new FS.FSStream(), stream);
|
|
if (fd == -1) {
|
|
fd = FS.nextfd();
|
|
}
|
|
stream.fd = fd;
|
|
FS.streams[fd] = stream;
|
|
return stream;
|
|
},
|
|
closeStream(fd) {
|
|
FS.streams[fd] = null;
|
|
},
|
|
dupStream(origStream, fd = -1) {
|
|
var stream = FS.createStream(origStream, fd);
|
|
stream.stream_ops?.dup?.(stream);
|
|
return stream;
|
|
},
|
|
doSetAttr(stream, node, attr) {
|
|
var setattr = stream?.stream_ops.setattr;
|
|
var arg = setattr ? stream : node;
|
|
setattr ??= node.node_ops.setattr;
|
|
FS.checkOpExists(setattr, 63)
|
|
setattr(arg, attr);
|
|
},
|
|
chrdev_stream_ops:{
|
|
open(stream) {
|
|
var device = FS.getDevice(stream.node.rdev);
|
|
// override node's stream ops with the device's
|
|
stream.stream_ops = device.stream_ops;
|
|
// forward the open call
|
|
stream.stream_ops.open?.(stream);
|
|
},
|
|
llseek() {
|
|
throw new FS.ErrnoError(70);
|
|
},
|
|
},
|
|
major:(dev) => ((dev) >> 8),
|
|
minor:(dev) => ((dev) & 0xff),
|
|
makedev:(ma, mi) => ((ma) << 8 | (mi)),
|
|
registerDevice(dev, ops) {
|
|
FS.devices[dev] = { stream_ops: ops };
|
|
},
|
|
getDevice:(dev) => FS.devices[dev],
|
|
getMounts(mount) {
|
|
var mounts = [];
|
|
var check = [mount];
|
|
|
|
while (check.length) {
|
|
var m = check.pop();
|
|
|
|
mounts.push(m);
|
|
|
|
check.push(...m.mounts);
|
|
}
|
|
|
|
return mounts;
|
|
},
|
|
syncfs(populate, callback) {
|
|
if (typeof populate == 'function') {
|
|
callback = populate;
|
|
populate = false;
|
|
}
|
|
|
|
FS.syncFSRequests++;
|
|
|
|
if (FS.syncFSRequests > 1) {
|
|
err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`);
|
|
}
|
|
|
|
var mounts = FS.getMounts(FS.root.mount);
|
|
var completed = 0;
|
|
|
|
function doCallback(errCode) {
|
|
assert(FS.syncFSRequests > 0);
|
|
FS.syncFSRequests--;
|
|
return callback(errCode);
|
|
}
|
|
|
|
function done(errCode) {
|
|
if (errCode) {
|
|
if (!done.errored) {
|
|
done.errored = true;
|
|
return doCallback(errCode);
|
|
}
|
|
return;
|
|
}
|
|
if (++completed >= mounts.length) {
|
|
doCallback(null);
|
|
}
|
|
};
|
|
|
|
// sync all mounts
|
|
mounts.forEach((mount) => {
|
|
if (!mount.type.syncfs) {
|
|
return done(null);
|
|
}
|
|
mount.type.syncfs(mount, populate, done);
|
|
});
|
|
},
|
|
mount(type, opts, mountpoint) {
|
|
if (typeof type == 'string') {
|
|
// The filesystem was not included, and instead we have an error
|
|
// message stored in the variable.
|
|
throw type;
|
|
}
|
|
var root = mountpoint === '/';
|
|
var pseudo = !mountpoint;
|
|
var node;
|
|
|
|
if (root && FS.root) {
|
|
throw new FS.ErrnoError(10);
|
|
} else if (!root && !pseudo) {
|
|
var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
|
|
|
|
mountpoint = lookup.path; // use the absolute path
|
|
node = lookup.node;
|
|
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
|
|
if (!FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
}
|
|
|
|
var mount = {
|
|
type,
|
|
opts,
|
|
mountpoint,
|
|
mounts: []
|
|
};
|
|
|
|
// create a root node for the fs
|
|
var mountRoot = type.mount(mount);
|
|
mountRoot.mount = mount;
|
|
mount.root = mountRoot;
|
|
|
|
if (root) {
|
|
FS.root = mountRoot;
|
|
} else if (node) {
|
|
// set as a mountpoint
|
|
node.mounted = mount;
|
|
|
|
// add the new mount to the current mount's children
|
|
if (node.mount) {
|
|
node.mount.mounts.push(mount);
|
|
}
|
|
}
|
|
|
|
return mountRoot;
|
|
},
|
|
unmount(mountpoint) {
|
|
var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
|
|
|
|
if (!FS.isMountpoint(lookup.node)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
|
|
// destroy the nodes for this mount, and all its child mounts
|
|
var node = lookup.node;
|
|
var mount = node.mounted;
|
|
var mounts = FS.getMounts(mount);
|
|
|
|
Object.keys(FS.nameTable).forEach((hash) => {
|
|
var current = FS.nameTable[hash];
|
|
|
|
while (current) {
|
|
var next = current.name_next;
|
|
|
|
if (mounts.includes(current.mount)) {
|
|
FS.destroyNode(current);
|
|
}
|
|
|
|
current = next;
|
|
}
|
|
});
|
|
|
|
// no longer a mountpoint
|
|
node.mounted = null;
|
|
|
|
// remove this mount from the child mounts
|
|
var idx = node.mount.mounts.indexOf(mount);
|
|
assert(idx !== -1);
|
|
node.mount.mounts.splice(idx, 1);
|
|
},
|
|
lookup(parent, name) {
|
|
return parent.node_ops.lookup(parent, name);
|
|
},
|
|
mknod(path, mode, dev) {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
var parent = lookup.node;
|
|
var name = PATH.basename(path);
|
|
if (!name) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (name === '.' || name === '..') {
|
|
throw new FS.ErrnoError(20);
|
|
}
|
|
var errCode = FS.mayCreate(parent, name);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.mknod) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
return parent.node_ops.mknod(parent, name, mode, dev);
|
|
},
|
|
statfs(path) {
|
|
return FS.statfsNode(FS.lookupPath(path, {follow: true}).node);
|
|
},
|
|
statfsStream(stream) {
|
|
// We keep a separate statfsStream function because noderawfs overrides
|
|
// it. In noderawfs, stream.node is sometimes null. Instead, we need to
|
|
// look at stream.path.
|
|
return FS.statfsNode(stream.node);
|
|
},
|
|
statfsNode(node) {
|
|
// NOTE: None of the defaults here are true. We're just returning safe and
|
|
// sane values. Currently nodefs and rawfs replace these defaults,
|
|
// other file systems leave them alone.
|
|
var rtn = {
|
|
bsize: 4096,
|
|
frsize: 4096,
|
|
blocks: 1e6,
|
|
bfree: 5e5,
|
|
bavail: 5e5,
|
|
files: FS.nextInode,
|
|
ffree: FS.nextInode - 1,
|
|
fsid: 42,
|
|
flags: 2,
|
|
namelen: 255,
|
|
};
|
|
|
|
if (node.node_ops.statfs) {
|
|
Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root));
|
|
}
|
|
return rtn;
|
|
},
|
|
create(path, mode = 0o666) {
|
|
mode &= 4095;
|
|
mode |= 32768;
|
|
return FS.mknod(path, mode, 0);
|
|
},
|
|
mkdir(path, mode = 0o777) {
|
|
mode &= 511 | 512;
|
|
mode |= 16384;
|
|
return FS.mknod(path, mode, 0);
|
|
},
|
|
mkdirTree(path, mode) {
|
|
var dirs = path.split('/');
|
|
var d = '';
|
|
for (var dir of dirs) {
|
|
if (!dir) continue;
|
|
if (d || PATH.isAbs(path)) d += '/';
|
|
d += dir;
|
|
try {
|
|
FS.mkdir(d, mode);
|
|
} catch(e) {
|
|
if (e.errno != 20) throw e;
|
|
}
|
|
}
|
|
},
|
|
mkdev(path, mode, dev) {
|
|
if (typeof dev == 'undefined') {
|
|
dev = mode;
|
|
mode = 0o666;
|
|
}
|
|
mode |= 8192;
|
|
return FS.mknod(path, mode, dev);
|
|
},
|
|
symlink(oldpath, newpath) {
|
|
if (!PATH_FS.resolve(oldpath)) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var lookup = FS.lookupPath(newpath, { parent: true });
|
|
var parent = lookup.node;
|
|
if (!parent) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var newname = PATH.basename(newpath);
|
|
var errCode = FS.mayCreate(parent, newname);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.symlink) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
return parent.node_ops.symlink(parent, newname, oldpath);
|
|
},
|
|
rename(old_path, new_path) {
|
|
var old_dirname = PATH.dirname(old_path);
|
|
var new_dirname = PATH.dirname(new_path);
|
|
var old_name = PATH.basename(old_path);
|
|
var new_name = PATH.basename(new_path);
|
|
// parents must exist
|
|
var lookup, old_dir, new_dir;
|
|
|
|
// let the errors from non existent directories percolate up
|
|
lookup = FS.lookupPath(old_path, { parent: true });
|
|
old_dir = lookup.node;
|
|
lookup = FS.lookupPath(new_path, { parent: true });
|
|
new_dir = lookup.node;
|
|
|
|
if (!old_dir || !new_dir) throw new FS.ErrnoError(44);
|
|
// need to be part of the same mount
|
|
if (old_dir.mount !== new_dir.mount) {
|
|
throw new FS.ErrnoError(75);
|
|
}
|
|
// source must exist
|
|
var old_node = FS.lookupNode(old_dir, old_name);
|
|
// old path should not be an ancestor of the new path
|
|
var relative = PATH_FS.relative(old_path, new_dirname);
|
|
if (relative.charAt(0) !== '.') {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
// new path should not be an ancestor of the old path
|
|
relative = PATH_FS.relative(new_path, old_dirname);
|
|
if (relative.charAt(0) !== '.') {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
// see if the new path already exists
|
|
var new_node;
|
|
try {
|
|
new_node = FS.lookupNode(new_dir, new_name);
|
|
} catch (e) {
|
|
// not fatal
|
|
}
|
|
// early out if nothing needs to change
|
|
if (old_node === new_node) {
|
|
return;
|
|
}
|
|
// we'll need to delete the old entry
|
|
var isdir = FS.isDir(old_node.mode);
|
|
var errCode = FS.mayDelete(old_dir, old_name, isdir);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
// need delete permissions if we'll be overwriting.
|
|
// need create permissions if new doesn't already exist.
|
|
errCode = new_node ?
|
|
FS.mayDelete(new_dir, new_name, isdir) :
|
|
FS.mayCreate(new_dir, new_name);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!old_dir.node_ops.rename) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
// if we are going to change the parent, check write permissions
|
|
if (new_dir !== old_dir) {
|
|
errCode = FS.nodePermissions(old_dir, 'w');
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
}
|
|
// remove the node from the lookup hash
|
|
FS.hashRemoveNode(old_node);
|
|
// do the underlying fs rename
|
|
try {
|
|
old_dir.node_ops.rename(old_node, new_dir, new_name);
|
|
// update old node (we do this here to avoid each backend
|
|
// needing to)
|
|
old_node.parent = new_dir;
|
|
} catch (e) {
|
|
throw e;
|
|
} finally {
|
|
// add the node back to the hash (in case node_ops.rename
|
|
// changed its name)
|
|
FS.hashAddNode(old_node);
|
|
}
|
|
},
|
|
rmdir(path) {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
var parent = lookup.node;
|
|
var name = PATH.basename(path);
|
|
var node = FS.lookupNode(parent, name);
|
|
var errCode = FS.mayDelete(parent, name, true);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.rmdir) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
parent.node_ops.rmdir(parent, name);
|
|
FS.destroyNode(node);
|
|
},
|
|
readdir(path) {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
var node = lookup.node;
|
|
var readdir = FS.checkOpExists(node.node_ops.readdir, 54);
|
|
return readdir(node);
|
|
},
|
|
unlink(path) {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
var parent = lookup.node;
|
|
if (!parent) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var name = PATH.basename(path);
|
|
var node = FS.lookupNode(parent, name);
|
|
var errCode = FS.mayDelete(parent, name, false);
|
|
if (errCode) {
|
|
// According to POSIX, we should map EISDIR to EPERM, but
|
|
// we instead do what Linux does (and we must, as we use
|
|
// the musl linux libc).
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.unlink) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
parent.node_ops.unlink(parent, name);
|
|
FS.destroyNode(node);
|
|
},
|
|
readlink(path) {
|
|
var lookup = FS.lookupPath(path);
|
|
var link = lookup.node;
|
|
if (!link) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
if (!link.node_ops.readlink) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return link.node_ops.readlink(link);
|
|
},
|
|
stat(path, dontFollow) {
|
|
var lookup = FS.lookupPath(path, { follow: !dontFollow });
|
|
var node = lookup.node;
|
|
var getattr = FS.checkOpExists(node.node_ops.getattr, 63);
|
|
return getattr(node);
|
|
},
|
|
fstat(fd) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
var node = stream.node;
|
|
var getattr = stream.stream_ops.getattr;
|
|
var arg = getattr ? stream : node;
|
|
getattr ??= node.node_ops.getattr;
|
|
FS.checkOpExists(getattr, 63)
|
|
return getattr(arg);
|
|
},
|
|
lstat(path) {
|
|
return FS.stat(path, true);
|
|
},
|
|
doChmod(stream, node, mode, dontFollow) {
|
|
FS.doSetAttr(stream, node, {
|
|
mode: (mode & 4095) | (node.mode & ~4095),
|
|
ctime: Date.now(),
|
|
dontFollow
|
|
});
|
|
},
|
|
chmod(path, mode, dontFollow) {
|
|
var node;
|
|
if (typeof path == 'string') {
|
|
var lookup = FS.lookupPath(path, { follow: !dontFollow });
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doChmod(null, node, mode, dontFollow);
|
|
},
|
|
lchmod(path, mode) {
|
|
FS.chmod(path, mode, true);
|
|
},
|
|
fchmod(fd, mode) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
FS.doChmod(stream, stream.node, mode, false);
|
|
},
|
|
doChown(stream, node, dontFollow) {
|
|
FS.doSetAttr(stream, node, {
|
|
timestamp: Date.now(),
|
|
dontFollow
|
|
// we ignore the uid / gid for now
|
|
});
|
|
},
|
|
chown(path, uid, gid, dontFollow) {
|
|
var node;
|
|
if (typeof path == 'string') {
|
|
var lookup = FS.lookupPath(path, { follow: !dontFollow });
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doChown(null, node, dontFollow);
|
|
},
|
|
lchown(path, uid, gid) {
|
|
FS.chown(path, uid, gid, true);
|
|
},
|
|
fchown(fd, uid, gid) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
FS.doChown(stream, stream.node, false);
|
|
},
|
|
doTruncate(stream, node, len) {
|
|
if (FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!FS.isFile(node.mode)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var errCode = FS.nodePermissions(node, 'w');
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
FS.doSetAttr(stream, node, {
|
|
size: len,
|
|
timestamp: Date.now()
|
|
});
|
|
},
|
|
truncate(path, len) {
|
|
if (len < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var node;
|
|
if (typeof path == 'string') {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doTruncate(null, node, len);
|
|
},
|
|
ftruncate(fd, len) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
if (len < 0 || (stream.flags & 2097155) === 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
FS.doTruncate(stream, stream.node, len);
|
|
},
|
|
utime(path, atime, mtime) {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
var node = lookup.node;
|
|
var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
|
|
setattr(node, {
|
|
atime: atime,
|
|
mtime: mtime
|
|
});
|
|
},
|
|
open(path, flags, mode = 0o666) {
|
|
if (path === "") {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags;
|
|
if ((flags & 64)) {
|
|
mode = (mode & 4095) | 32768;
|
|
} else {
|
|
mode = 0;
|
|
}
|
|
var node;
|
|
var isDirPath;
|
|
if (typeof path == 'object') {
|
|
node = path;
|
|
} else {
|
|
isDirPath = path.endsWith("/");
|
|
// noent_okay makes it so that if the final component of the path
|
|
// doesn't exist, lookupPath returns `node: undefined`. `path` will be
|
|
// updated to point to the target of all symlinks.
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: !(flags & 131072),
|
|
noent_okay: true
|
|
});
|
|
node = lookup.node;
|
|
path = lookup.path;
|
|
}
|
|
// perhaps we need to create the node
|
|
var created = false;
|
|
if ((flags & 64)) {
|
|
if (node) {
|
|
// if O_CREAT and O_EXCL are set, error out if the node already exists
|
|
if ((flags & 128)) {
|
|
throw new FS.ErrnoError(20);
|
|
}
|
|
} else if (isDirPath) {
|
|
throw new FS.ErrnoError(31);
|
|
} else {
|
|
// node doesn't exist, try to create it
|
|
// Ignore the permission bits here to ensure we can `open` this new
|
|
// file below. We use chmod below the apply the permissions once the
|
|
// file is open.
|
|
node = FS.mknod(path, mode | 0o777, 0);
|
|
created = true;
|
|
}
|
|
}
|
|
if (!node) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
// can't truncate a device
|
|
if (FS.isChrdev(node.mode)) {
|
|
flags &= ~512;
|
|
}
|
|
// if asked only for a directory, then this must be one
|
|
if ((flags & 65536) && !FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
// check permissions, if this is not a file we just created now (it is ok to
|
|
// create and write to a file with read-only permissions; it is read-only
|
|
// for later use)
|
|
if (!created) {
|
|
var errCode = FS.mayOpen(node, flags);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
}
|
|
// do truncation if necessary
|
|
if ((flags & 512) && !created) {
|
|
FS.truncate(node, 0);
|
|
}
|
|
// we've already handled these, don't pass down to the underlying vfs
|
|
flags &= ~(128 | 512 | 131072);
|
|
|
|
// register the stream with the filesystem
|
|
var stream = FS.createStream({
|
|
node,
|
|
path: FS.getPath(node), // we want the absolute path to the node
|
|
flags,
|
|
seekable: true,
|
|
position: 0,
|
|
stream_ops: node.stream_ops,
|
|
// used by the file family libc calls (fopen, fwrite, ferror, etc.)
|
|
ungotten: [],
|
|
error: false
|
|
});
|
|
// call the new stream's open function
|
|
if (stream.stream_ops.open) {
|
|
stream.stream_ops.open(stream);
|
|
}
|
|
if (created) {
|
|
FS.chmod(node, mode & 0o777);
|
|
}
|
|
if (Module['logReadFiles'] && !(flags & 1)) {
|
|
if (!(path in FS.readFiles)) {
|
|
FS.readFiles[path] = 1;
|
|
}
|
|
}
|
|
return stream;
|
|
},
|
|
close(stream) {
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (stream.getdents) stream.getdents = null; // free readdir state
|
|
try {
|
|
if (stream.stream_ops.close) {
|
|
stream.stream_ops.close(stream);
|
|
}
|
|
} catch (e) {
|
|
throw e;
|
|
} finally {
|
|
FS.closeStream(stream.fd);
|
|
}
|
|
stream.fd = null;
|
|
},
|
|
isClosed(stream) {
|
|
return stream.fd === null;
|
|
},
|
|
llseek(stream, offset, whence) {
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (!stream.seekable || !stream.stream_ops.llseek) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
if (whence != 0 && whence != 1 && whence != 2) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
stream.position = stream.stream_ops.llseek(stream, offset, whence);
|
|
stream.ungotten = [];
|
|
return stream.position;
|
|
},
|
|
read(stream, buffer, offset, length, position) {
|
|
assert(offset >= 0);
|
|
if (length < 0 || position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if ((stream.flags & 2097155) === 1) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (FS.isDir(stream.node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!stream.stream_ops.read) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var seeking = typeof position != 'undefined';
|
|
if (!seeking) {
|
|
position = stream.position;
|
|
} else if (!stream.seekable) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
|
|
if (!seeking) stream.position += bytesRead;
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, position, canOwn) {
|
|
assert(offset >= 0);
|
|
if (length < 0 || position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if ((stream.flags & 2097155) === 0) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (FS.isDir(stream.node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!stream.stream_ops.write) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (stream.seekable && stream.flags & 1024) {
|
|
// seek to the end before writing in append mode
|
|
FS.llseek(stream, 0, 2);
|
|
}
|
|
var seeking = typeof position != 'undefined';
|
|
if (!seeking) {
|
|
position = stream.position;
|
|
} else if (!stream.seekable) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
|
|
if (!seeking) stream.position += bytesWritten;
|
|
return bytesWritten;
|
|
},
|
|
mmap(stream, length, position, prot, flags) {
|
|
// User requests writing to file (prot & PROT_WRITE != 0).
|
|
// Checking if we have permissions to write to the file unless
|
|
// MAP_PRIVATE flag is set. According to POSIX spec it is possible
|
|
// to write to file opened in read-only mode with MAP_PRIVATE flag,
|
|
// as all modifications will be visible only in the memory of
|
|
// the current process.
|
|
if ((prot & 2) !== 0
|
|
&& (flags & 2) === 0
|
|
&& (stream.flags & 2097155) !== 2) {
|
|
throw new FS.ErrnoError(2);
|
|
}
|
|
if ((stream.flags & 2097155) === 1) {
|
|
throw new FS.ErrnoError(2);
|
|
}
|
|
if (!stream.stream_ops.mmap) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
if (!length) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return stream.stream_ops.mmap(stream, length, position, prot, flags);
|
|
},
|
|
msync(stream, buffer, offset, length, mmapFlags) {
|
|
assert(offset >= 0);
|
|
if (!stream.stream_ops.msync) {
|
|
return 0;
|
|
}
|
|
return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags);
|
|
},
|
|
ioctl(stream, cmd, arg) {
|
|
if (!stream.stream_ops.ioctl) {
|
|
throw new FS.ErrnoError(59);
|
|
}
|
|
return stream.stream_ops.ioctl(stream, cmd, arg);
|
|
},
|
|
readFile(path, opts = {}) {
|
|
opts.flags = opts.flags || 0;
|
|
opts.encoding = opts.encoding || 'binary';
|
|
if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
|
|
throw new Error(`Invalid encoding type "${opts.encoding}"`);
|
|
}
|
|
var ret;
|
|
var stream = FS.open(path, opts.flags);
|
|
var stat = FS.stat(path);
|
|
var length = stat.size;
|
|
var buf = new Uint8Array(length);
|
|
FS.read(stream, buf, 0, length, 0);
|
|
if (opts.encoding === 'utf8') {
|
|
ret = UTF8ArrayToString(buf);
|
|
} else if (opts.encoding === 'binary') {
|
|
ret = buf;
|
|
}
|
|
FS.close(stream);
|
|
return ret;
|
|
},
|
|
writeFile(path, data, opts = {}) {
|
|
opts.flags = opts.flags || 577;
|
|
var stream = FS.open(path, opts.flags, opts.mode);
|
|
if (typeof data == 'string') {
|
|
var buf = new Uint8Array(lengthBytesUTF8(data)+1);
|
|
var actualNumBytes = stringToUTF8Array(data, buf, 0, buf.length);
|
|
FS.write(stream, buf, 0, actualNumBytes, undefined, opts.canOwn);
|
|
} else if (ArrayBuffer.isView(data)) {
|
|
FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn);
|
|
} else {
|
|
throw new Error('Unsupported data type');
|
|
}
|
|
FS.close(stream);
|
|
},
|
|
cwd:() => FS.currentPath,
|
|
chdir(path) {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
if (lookup.node === null) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
if (!FS.isDir(lookup.node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
var errCode = FS.nodePermissions(lookup.node, 'x');
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
FS.currentPath = lookup.path;
|
|
},
|
|
createDefaultDirectories() {
|
|
FS.mkdir('/tmp');
|
|
FS.mkdir('/home');
|
|
FS.mkdir('/home/web_user');
|
|
},
|
|
createDefaultDevices() {
|
|
// create /dev
|
|
FS.mkdir('/dev');
|
|
// setup /dev/null
|
|
FS.registerDevice(FS.makedev(1, 3), {
|
|
read: () => 0,
|
|
write: (stream, buffer, offset, length, pos) => length,
|
|
llseek: () => 0,
|
|
});
|
|
FS.mkdev('/dev/null', FS.makedev(1, 3));
|
|
// setup /dev/tty and /dev/tty1
|
|
// stderr needs to print output using err() rather than out()
|
|
// so we register a second tty just for it.
|
|
TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
|
|
TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
|
|
FS.mkdev('/dev/tty', FS.makedev(5, 0));
|
|
FS.mkdev('/dev/tty1', FS.makedev(6, 0));
|
|
// setup /dev/[u]random
|
|
// use a buffer to avoid overhead of individual crypto calls per byte
|
|
var randomBuffer = new Uint8Array(1024), randomLeft = 0;
|
|
var randomByte = () => {
|
|
if (randomLeft === 0) {
|
|
randomFill(randomBuffer);
|
|
randomLeft = randomBuffer.byteLength;
|
|
}
|
|
return randomBuffer[--randomLeft];
|
|
};
|
|
FS.createDevice('/dev', 'random', randomByte);
|
|
FS.createDevice('/dev', 'urandom', randomByte);
|
|
// we're not going to emulate the actual shm device,
|
|
// just create the tmp dirs that reside in it commonly
|
|
FS.mkdir('/dev/shm');
|
|
FS.mkdir('/dev/shm/tmp');
|
|
},
|
|
createSpecialDirectories() {
|
|
// create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the
|
|
// name of the stream for fd 6 (see test_unistd_ttyname)
|
|
FS.mkdir('/proc');
|
|
var proc_self = FS.mkdir('/proc/self');
|
|
FS.mkdir('/proc/self/fd');
|
|
FS.mount({
|
|
mount() {
|
|
var node = FS.createNode(proc_self, 'fd', 16895, 73);
|
|
node.stream_ops = {
|
|
llseek: MEMFS.stream_ops.llseek,
|
|
};
|
|
node.node_ops = {
|
|
lookup(parent, name) {
|
|
var fd = +name;
|
|
var stream = FS.getStreamChecked(fd);
|
|
var ret = {
|
|
parent: null,
|
|
mount: { mountpoint: 'fake' },
|
|
node_ops: { readlink: () => stream.path },
|
|
id: fd + 1,
|
|
};
|
|
ret.parent = ret; // make it look like a simple root node
|
|
return ret;
|
|
},
|
|
readdir() {
|
|
return Array.from(FS.streams.entries())
|
|
.filter(([k, v]) => v)
|
|
.map(([k, v]) => k.toString());
|
|
}
|
|
};
|
|
return node;
|
|
}
|
|
}, {}, '/proc/self/fd');
|
|
},
|
|
createStandardStreams(input, output, error) {
|
|
// TODO deprecate the old functionality of a single
|
|
// input / output callback and that utilizes FS.createDevice
|
|
// and instead require a unique set of stream ops
|
|
|
|
// by default, we symlink the standard streams to the
|
|
// default tty devices. however, if the standard streams
|
|
// have been overwritten we create a unique device for
|
|
// them instead.
|
|
if (input) {
|
|
FS.createDevice('/dev', 'stdin', input);
|
|
} else {
|
|
FS.symlink('/dev/tty', '/dev/stdin');
|
|
}
|
|
if (output) {
|
|
FS.createDevice('/dev', 'stdout', null, output);
|
|
} else {
|
|
FS.symlink('/dev/tty', '/dev/stdout');
|
|
}
|
|
if (error) {
|
|
FS.createDevice('/dev', 'stderr', null, error);
|
|
} else {
|
|
FS.symlink('/dev/tty1', '/dev/stderr');
|
|
}
|
|
|
|
// open default streams for the stdin, stdout and stderr devices
|
|
var stdin = FS.open('/dev/stdin', 0);
|
|
var stdout = FS.open('/dev/stdout', 1);
|
|
var stderr = FS.open('/dev/stderr', 1);
|
|
assert(stdin.fd === 0, `invalid handle for stdin (${stdin.fd})`);
|
|
assert(stdout.fd === 1, `invalid handle for stdout (${stdout.fd})`);
|
|
assert(stderr.fd === 2, `invalid handle for stderr (${stderr.fd})`);
|
|
},
|
|
staticInit() {
|
|
FS.nameTable = new Array(4096);
|
|
|
|
FS.mount(MEMFS, {}, '/');
|
|
|
|
FS.createDefaultDirectories();
|
|
FS.createDefaultDevices();
|
|
FS.createSpecialDirectories();
|
|
|
|
FS.filesystems = {
|
|
'MEMFS': MEMFS,
|
|
};
|
|
},
|
|
init(input, output, error) {
|
|
assert(!FS.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
|
|
FS.initialized = true;
|
|
|
|
// Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
|
|
input ??= Module['stdin'];
|
|
output ??= Module['stdout'];
|
|
error ??= Module['stderr'];
|
|
|
|
FS.createStandardStreams(input, output, error);
|
|
},
|
|
quit() {
|
|
FS.initialized = false;
|
|
// force-flush all streams, so we get musl std streams printed out
|
|
_fflush(0);
|
|
// close all of our streams
|
|
for (var stream of FS.streams) {
|
|
if (stream) {
|
|
FS.close(stream);
|
|
}
|
|
}
|
|
},
|
|
findObject(path, dontResolveLastLink) {
|
|
var ret = FS.analyzePath(path, dontResolveLastLink);
|
|
if (!ret.exists) {
|
|
return null;
|
|
}
|
|
return ret.object;
|
|
},
|
|
analyzePath(path, dontResolveLastLink) {
|
|
// operate from within the context of the symlink's target
|
|
try {
|
|
var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
|
|
path = lookup.path;
|
|
} catch (e) {
|
|
}
|
|
var ret = {
|
|
isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
|
|
parentExists: false, parentPath: null, parentObject: null
|
|
};
|
|
try {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
ret.parentExists = true;
|
|
ret.parentPath = lookup.path;
|
|
ret.parentObject = lookup.node;
|
|
ret.name = PATH.basename(path);
|
|
lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
|
|
ret.exists = true;
|
|
ret.path = lookup.path;
|
|
ret.object = lookup.node;
|
|
ret.name = lookup.node.name;
|
|
ret.isRoot = lookup.path === '/';
|
|
} catch (e) {
|
|
ret.error = e.errno;
|
|
};
|
|
return ret;
|
|
},
|
|
createPath(parent, path, canRead, canWrite) {
|
|
parent = typeof parent == 'string' ? parent : FS.getPath(parent);
|
|
var parts = path.split('/').reverse();
|
|
while (parts.length) {
|
|
var part = parts.pop();
|
|
if (!part) continue;
|
|
var current = PATH.join2(parent, part);
|
|
try {
|
|
FS.mkdir(current);
|
|
} catch (e) {
|
|
if (e.errno != 20) throw e;
|
|
}
|
|
parent = current;
|
|
}
|
|
return current;
|
|
},
|
|
createFile(parent, name, properties, canRead, canWrite) {
|
|
var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
|
|
var mode = FS_getMode(canRead, canWrite);
|
|
return FS.create(path, mode);
|
|
},
|
|
createDataFile(parent, name, data, canRead, canWrite, canOwn) {
|
|
var path = name;
|
|
if (parent) {
|
|
parent = typeof parent == 'string' ? parent : FS.getPath(parent);
|
|
path = name ? PATH.join2(parent, name) : parent;
|
|
}
|
|
var mode = FS_getMode(canRead, canWrite);
|
|
var node = FS.create(path, mode);
|
|
if (data) {
|
|
if (typeof data == 'string') {
|
|
var arr = new Array(data.length);
|
|
for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
|
|
data = arr;
|
|
}
|
|
// make sure we can write to the file
|
|
FS.chmod(node, mode | 146);
|
|
var stream = FS.open(node, 577);
|
|
FS.write(stream, data, 0, data.length, 0, canOwn);
|
|
FS.close(stream);
|
|
FS.chmod(node, mode);
|
|
}
|
|
},
|
|
createDevice(parent, name, input, output) {
|
|
var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
|
|
var mode = FS_getMode(!!input, !!output);
|
|
FS.createDevice.major ??= 64;
|
|
var dev = FS.makedev(FS.createDevice.major++, 0);
|
|
// Create a fake device that a set of stream ops to emulate
|
|
// the old behavior.
|
|
FS.registerDevice(dev, {
|
|
open(stream) {
|
|
stream.seekable = false;
|
|
},
|
|
close(stream) {
|
|
// flush any pending line data
|
|
if (output?.buffer?.length) {
|
|
output(10);
|
|
}
|
|
},
|
|
read(stream, buffer, offset, length, pos /* ignored */) {
|
|
var bytesRead = 0;
|
|
for (var i = 0; i < length; i++) {
|
|
var result;
|
|
try {
|
|
result = input();
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (result === undefined && bytesRead === 0) {
|
|
throw new FS.ErrnoError(6);
|
|
}
|
|
if (result === null || result === undefined) break;
|
|
bytesRead++;
|
|
buffer[offset+i] = result;
|
|
}
|
|
if (bytesRead) {
|
|
stream.node.atime = Date.now();
|
|
}
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, pos) {
|
|
for (var i = 0; i < length; i++) {
|
|
try {
|
|
output(buffer[offset+i]);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
}
|
|
if (length) {
|
|
stream.node.mtime = stream.node.ctime = Date.now();
|
|
}
|
|
return i;
|
|
}
|
|
});
|
|
return FS.mkdev(path, mode, dev);
|
|
},
|
|
forceLoadFile(obj) {
|
|
if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
|
|
if (typeof XMLHttpRequest != 'undefined') {
|
|
throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
|
|
} else { // Command-line.
|
|
try {
|
|
obj.contents = readBinary(obj.url);
|
|
obj.usedBytes = obj.contents.length;
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
}
|
|
},
|
|
createLazyFile(parent, name, url, canRead, canWrite) {
|
|
// Lazy chunked Uint8Array (implements get and length from Uint8Array).
|
|
// Actual getting is abstracted away for eventual reuse.
|
|
class LazyUint8Array {
|
|
lengthKnown = false;
|
|
chunks = []; // Loaded chunks. Index is the chunk number
|
|
get(idx) {
|
|
if (idx > this.length-1 || idx < 0) {
|
|
return undefined;
|
|
}
|
|
var chunkOffset = idx % this.chunkSize;
|
|
var chunkNum = (idx / this.chunkSize)|0;
|
|
return this.getter(chunkNum)[chunkOffset];
|
|
}
|
|
setDataGetter(getter) {
|
|
this.getter = getter;
|
|
}
|
|
cacheLength() {
|
|
// Find length
|
|
var xhr = new XMLHttpRequest();
|
|
xhr.open('HEAD', url, false);
|
|
xhr.send(null);
|
|
if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
|
|
var datalength = Number(xhr.getResponseHeader("Content-length"));
|
|
var header;
|
|
var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
|
|
var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip";
|
|
|
|
var chunkSize = 1024*1024; // Chunk size in bytes
|
|
|
|
if (!hasByteServing) chunkSize = datalength;
|
|
|
|
// Function to get a range from the remote URL.
|
|
var doXHR = (from, to) => {
|
|
if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
|
|
if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
|
|
|
|
// TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
|
|
var xhr = new XMLHttpRequest();
|
|
xhr.open('GET', url, false);
|
|
if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
|
|
|
|
// Some hints to the browser that we want binary data.
|
|
xhr.responseType = 'arraybuffer';
|
|
if (xhr.overrideMimeType) {
|
|
xhr.overrideMimeType('text/plain; charset=x-user-defined');
|
|
}
|
|
|
|
xhr.send(null);
|
|
if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
|
|
if (xhr.response !== undefined) {
|
|
return new Uint8Array(/** @type{Array<number>} */(xhr.response || []));
|
|
}
|
|
return intArrayFromString(xhr.responseText || '', true);
|
|
};
|
|
var lazyArray = this;
|
|
lazyArray.setDataGetter((chunkNum) => {
|
|
var start = chunkNum * chunkSize;
|
|
var end = (chunkNum+1) * chunkSize - 1; // including this byte
|
|
end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
|
|
if (typeof lazyArray.chunks[chunkNum] == 'undefined') {
|
|
lazyArray.chunks[chunkNum] = doXHR(start, end);
|
|
}
|
|
if (typeof lazyArray.chunks[chunkNum] == 'undefined') throw new Error('doXHR failed!');
|
|
return lazyArray.chunks[chunkNum];
|
|
});
|
|
|
|
if (usesGzip || !datalength) {
|
|
// if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length
|
|
chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file
|
|
datalength = this.getter(0).length;
|
|
chunkSize = datalength;
|
|
out("LazyFiles on gzip forces download of the whole file when length is accessed");
|
|
}
|
|
|
|
this._length = datalength;
|
|
this._chunkSize = chunkSize;
|
|
this.lengthKnown = true;
|
|
}
|
|
get length() {
|
|
if (!this.lengthKnown) {
|
|
this.cacheLength();
|
|
}
|
|
return this._length;
|
|
}
|
|
get chunkSize() {
|
|
if (!this.lengthKnown) {
|
|
this.cacheLength();
|
|
}
|
|
return this._chunkSize;
|
|
}
|
|
}
|
|
|
|
if (typeof XMLHttpRequest != 'undefined') {
|
|
if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
|
|
var lazyArray = new LazyUint8Array();
|
|
var properties = { isDevice: false, contents: lazyArray };
|
|
} else {
|
|
var properties = { isDevice: false, url: url };
|
|
}
|
|
|
|
var node = FS.createFile(parent, name, properties, canRead, canWrite);
|
|
// This is a total hack, but I want to get this lazy file code out of the
|
|
// core of MEMFS. If we want to keep this lazy file concept I feel it should
|
|
// be its own thin LAZYFS proxying calls to MEMFS.
|
|
if (properties.contents) {
|
|
node.contents = properties.contents;
|
|
} else if (properties.url) {
|
|
node.contents = null;
|
|
node.url = properties.url;
|
|
}
|
|
// Add a function that defers querying the file size until it is asked the first time.
|
|
Object.defineProperties(node, {
|
|
usedBytes: {
|
|
get: function() { return this.contents.length; }
|
|
}
|
|
});
|
|
// override each stream op with one that tries to force load the lazy file first
|
|
var stream_ops = {};
|
|
var keys = Object.keys(node.stream_ops);
|
|
keys.forEach((key) => {
|
|
var fn = node.stream_ops[key];
|
|
stream_ops[key] = (...args) => {
|
|
FS.forceLoadFile(node);
|
|
return fn(...args);
|
|
};
|
|
});
|
|
function writeChunks(stream, buffer, offset, length, position) {
|
|
var contents = stream.node.contents;
|
|
if (position >= contents.length)
|
|
return 0;
|
|
var size = Math.min(contents.length - position, length);
|
|
assert(size >= 0);
|
|
if (contents.slice) { // normal array
|
|
for (var i = 0; i < size; i++) {
|
|
buffer[offset + i] = contents[position + i];
|
|
}
|
|
} else {
|
|
for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
|
|
buffer[offset + i] = contents.get(position + i);
|
|
}
|
|
}
|
|
return size;
|
|
}
|
|
// use a custom read function
|
|
stream_ops.read = (stream, buffer, offset, length, position) => {
|
|
FS.forceLoadFile(node);
|
|
return writeChunks(stream, buffer, offset, length, position)
|
|
};
|
|
// use a custom mmap function
|
|
stream_ops.mmap = (stream, length, position, prot, flags) => {
|
|
FS.forceLoadFile(node);
|
|
var ptr = mmapAlloc(length);
|
|
if (!ptr) {
|
|
throw new FS.ErrnoError(48);
|
|
}
|
|
writeChunks(stream, HEAP8, ptr, length, position);
|
|
return { ptr, allocated: true };
|
|
};
|
|
node.stream_ops = stream_ops;
|
|
return node;
|
|
},
|
|
absolutePath() {
|
|
abort('FS.absolutePath has been removed; use PATH_FS.resolve instead');
|
|
},
|
|
createFolder() {
|
|
abort('FS.createFolder has been removed; use FS.mkdir instead');
|
|
},
|
|
createLink() {
|
|
abort('FS.createLink has been removed; use FS.symlink instead');
|
|
},
|
|
joinPath() {
|
|
abort('FS.joinPath has been removed; use PATH.join instead');
|
|
},
|
|
mmapAlloc() {
|
|
abort('FS.mmapAlloc has been replaced by the top level function mmapAlloc');
|
|
},
|
|
standardizePath() {
|
|
abort('FS.standardizePath has been removed; use PATH.normalize instead');
|
|
},
|
|
};
|
|
|
|
var SYSCALLS = {
|
|
DEFAULT_POLLMASK:5,
|
|
calculateAt(dirfd, path, allowEmpty) {
|
|
if (PATH.isAbs(path)) {
|
|
return path;
|
|
}
|
|
// relative path
|
|
var dir;
|
|
if (dirfd === -100) {
|
|
dir = FS.cwd();
|
|
} else {
|
|
var dirstream = SYSCALLS.getStreamFromFD(dirfd);
|
|
dir = dirstream.path;
|
|
}
|
|
if (path.length == 0) {
|
|
if (!allowEmpty) {
|
|
throw new FS.ErrnoError(44);;
|
|
}
|
|
return dir;
|
|
}
|
|
return dir + '/' + path;
|
|
},
|
|
writeStat(buf, stat) {
|
|
HEAP32[((buf)>>2)] = stat.dev;
|
|
HEAP32[(((buf)+(4))>>2)] = stat.mode;
|
|
HEAPU32[(((buf)+(8))>>2)] = stat.nlink;
|
|
HEAP32[(((buf)+(12))>>2)] = stat.uid;
|
|
HEAP32[(((buf)+(16))>>2)] = stat.gid;
|
|
HEAP32[(((buf)+(20))>>2)] = stat.rdev;
|
|
HEAP64[(((buf)+(24))>>3)] = BigInt(stat.size);
|
|
HEAP32[(((buf)+(32))>>2)] = 4096;
|
|
HEAP32[(((buf)+(36))>>2)] = stat.blocks;
|
|
var atime = stat.atime.getTime();
|
|
var mtime = stat.mtime.getTime();
|
|
var ctime = stat.ctime.getTime();
|
|
HEAP64[(((buf)+(40))>>3)] = BigInt(Math.floor(atime / 1000));
|
|
HEAPU32[(((buf)+(48))>>2)] = (atime % 1000) * 1000 * 1000;
|
|
HEAP64[(((buf)+(56))>>3)] = BigInt(Math.floor(mtime / 1000));
|
|
HEAPU32[(((buf)+(64))>>2)] = (mtime % 1000) * 1000 * 1000;
|
|
HEAP64[(((buf)+(72))>>3)] = BigInt(Math.floor(ctime / 1000));
|
|
HEAPU32[(((buf)+(80))>>2)] = (ctime % 1000) * 1000 * 1000;
|
|
HEAP64[(((buf)+(88))>>3)] = BigInt(stat.ino);
|
|
return 0;
|
|
},
|
|
writeStatFs(buf, stats) {
|
|
HEAP32[(((buf)+(4))>>2)] = stats.bsize;
|
|
HEAP32[(((buf)+(40))>>2)] = stats.bsize;
|
|
HEAP32[(((buf)+(8))>>2)] = stats.blocks;
|
|
HEAP32[(((buf)+(12))>>2)] = stats.bfree;
|
|
HEAP32[(((buf)+(16))>>2)] = stats.bavail;
|
|
HEAP32[(((buf)+(20))>>2)] = stats.files;
|
|
HEAP32[(((buf)+(24))>>2)] = stats.ffree;
|
|
HEAP32[(((buf)+(28))>>2)] = stats.fsid;
|
|
HEAP32[(((buf)+(44))>>2)] = stats.flags; // ST_NOSUID
|
|
HEAP32[(((buf)+(36))>>2)] = stats.namelen;
|
|
},
|
|
doMsync(addr, stream, len, flags, offset) {
|
|
if (!FS.isFile(stream.node.mode)) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
if (flags & 2) {
|
|
// MAP_PRIVATE calls need not to be synced back to underlying fs
|
|
return 0;
|
|
}
|
|
var buffer = HEAPU8.slice(addr, addr + len);
|
|
FS.msync(stream, buffer, offset, len, flags);
|
|
},
|
|
getStreamFromFD(fd) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
return stream;
|
|
},
|
|
varargs:undefined,
|
|
getStr(ptr) {
|
|
var ret = UTF8ToString(ptr);
|
|
return ret;
|
|
},
|
|
};
|
|
function ___syscall_fcntl64(fd, cmd, varargs) {
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
switch (cmd) {
|
|
case 0: {
|
|
var arg = syscallGetVarargI();
|
|
if (arg < 0) {
|
|
return -28;
|
|
}
|
|
while (FS.streams[arg]) {
|
|
arg++;
|
|
}
|
|
var newStream;
|
|
newStream = FS.dupStream(stream, arg);
|
|
return newStream.fd;
|
|
}
|
|
case 1:
|
|
case 2:
|
|
return 0; // FD_CLOEXEC makes no sense for a single process.
|
|
case 3:
|
|
return stream.flags;
|
|
case 4: {
|
|
var arg = syscallGetVarargI();
|
|
stream.flags |= arg;
|
|
return 0;
|
|
}
|
|
case 12: {
|
|
var arg = syscallGetVarargP();
|
|
var offset = 0;
|
|
// We're always unlocked.
|
|
HEAP16[(((arg)+(offset))>>1)] = 2;
|
|
return 0;
|
|
}
|
|
case 13:
|
|
case 14:
|
|
// Pretend that the locking is successful. These are process-level locks,
|
|
// and Emscripten programs are a single process. If we supported linking a
|
|
// filesystem between programs, we'd need to do more here.
|
|
// See https://github.com/emscripten-core/emscripten/issues/23697
|
|
return 0;
|
|
}
|
|
return -28;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
|
|
function ___syscall_ioctl(fd, op, varargs) {
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
switch (op) {
|
|
case 21509: {
|
|
if (!stream.tty) return -59;
|
|
return 0;
|
|
}
|
|
case 21505: {
|
|
if (!stream.tty) return -59;
|
|
if (stream.tty.ops.ioctl_tcgets) {
|
|
var termios = stream.tty.ops.ioctl_tcgets(stream);
|
|
var argp = syscallGetVarargP();
|
|
HEAP32[((argp)>>2)] = termios.c_iflag || 0;
|
|
HEAP32[(((argp)+(4))>>2)] = termios.c_oflag || 0;
|
|
HEAP32[(((argp)+(8))>>2)] = termios.c_cflag || 0;
|
|
HEAP32[(((argp)+(12))>>2)] = termios.c_lflag || 0;
|
|
for (var i = 0; i < 32; i++) {
|
|
HEAP8[(argp + i)+(17)] = termios.c_cc[i] || 0;
|
|
}
|
|
return 0;
|
|
}
|
|
return 0;
|
|
}
|
|
case 21510:
|
|
case 21511:
|
|
case 21512: {
|
|
if (!stream.tty) return -59;
|
|
return 0; // no-op, not actually adjusting terminal settings
|
|
}
|
|
case 21506:
|
|
case 21507:
|
|
case 21508: {
|
|
if (!stream.tty) return -59;
|
|
if (stream.tty.ops.ioctl_tcsets) {
|
|
var argp = syscallGetVarargP();
|
|
var c_iflag = HEAP32[((argp)>>2)];
|
|
var c_oflag = HEAP32[(((argp)+(4))>>2)];
|
|
var c_cflag = HEAP32[(((argp)+(8))>>2)];
|
|
var c_lflag = HEAP32[(((argp)+(12))>>2)];
|
|
var c_cc = []
|
|
for (var i = 0; i < 32; i++) {
|
|
c_cc.push(HEAP8[(argp + i)+(17)]);
|
|
}
|
|
return stream.tty.ops.ioctl_tcsets(stream.tty, op, { c_iflag, c_oflag, c_cflag, c_lflag, c_cc });
|
|
}
|
|
return 0; // no-op, not actually adjusting terminal settings
|
|
}
|
|
case 21519: {
|
|
if (!stream.tty) return -59;
|
|
var argp = syscallGetVarargP();
|
|
HEAP32[((argp)>>2)] = 0;
|
|
return 0;
|
|
}
|
|
case 21520: {
|
|
if (!stream.tty) return -59;
|
|
return -28; // not supported
|
|
}
|
|
case 21531: {
|
|
var argp = syscallGetVarargP();
|
|
return FS.ioctl(stream, op, argp);
|
|
}
|
|
case 21523: {
|
|
// TODO: in theory we should write to the winsize struct that gets
|
|
// passed in, but for now musl doesn't read anything on it
|
|
if (!stream.tty) return -59;
|
|
if (stream.tty.ops.ioctl_tiocgwinsz) {
|
|
var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty);
|
|
var argp = syscallGetVarargP();
|
|
HEAP16[((argp)>>1)] = winsize[0];
|
|
HEAP16[(((argp)+(2))>>1)] = winsize[1];
|
|
}
|
|
return 0;
|
|
}
|
|
case 21524: {
|
|
// TODO: technically, this ioctl call should change the window size.
|
|
// but, since emscripten doesn't have any concept of a terminal window
|
|
// yet, we'll just silently throw it away as we do TIOCGWINSZ
|
|
if (!stream.tty) return -59;
|
|
return 0;
|
|
}
|
|
case 21515: {
|
|
if (!stream.tty) return -59;
|
|
return 0;
|
|
}
|
|
default: return -28; // not supported
|
|
}
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
|
|
function ___syscall_openat(dirfd, path, flags, varargs) {
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
var mode = varargs ? syscallGetVarargI() : 0;
|
|
return FS.open(path, flags, mode).fd;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_unlinkat(dirfd, path, flags) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
if (flags === 0) {
|
|
FS.unlink(path);
|
|
} else if (flags === 512) {
|
|
FS.rmdir(path);
|
|
} else {
|
|
abort('Invalid flags passed to unlinkat');
|
|
}
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var __abort_js = () =>
|
|
abort('native code called abort()');
|
|
|
|
var abortOnCannotGrowMemory = (requestedSize) => {
|
|
abort(`Cannot enlarge memory arrays to size ${requestedSize} bytes (OOM). Either (1) compile with -sINITIAL_MEMORY=X with X higher than the current value ${HEAP8.length}, (2) compile with -sALLOW_MEMORY_GROWTH which allows increasing the size at runtime, or (3) if you want malloc to return NULL (0) instead of this abort, compile with -sABORTING_MALLOC=0`);
|
|
};
|
|
var _emscripten_resize_heap = (requestedSize) => {
|
|
var oldSize = HEAPU8.length;
|
|
// With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned.
|
|
requestedSize >>>= 0;
|
|
abortOnCannotGrowMemory(requestedSize);
|
|
};
|
|
|
|
|
|
var runtimeKeepaliveCounter = 0;
|
|
var keepRuntimeAlive = () => noExitRuntime || runtimeKeepaliveCounter > 0;
|
|
var _proc_exit = (code) => {
|
|
EXITSTATUS = code;
|
|
if (!keepRuntimeAlive()) {
|
|
Module['onExit']?.(code);
|
|
ABORT = true;
|
|
}
|
|
quit_(code, new ExitStatus(code));
|
|
};
|
|
|
|
|
|
/** @suppress {duplicate } */
|
|
/** @param {boolean|number=} implicit */
|
|
var exitJS = (status, implicit) => {
|
|
EXITSTATUS = status;
|
|
|
|
checkUnflushedContent();
|
|
|
|
// if exit() was called explicitly, warn the user if the runtime isn't actually being shut down
|
|
if (keepRuntimeAlive() && !implicit) {
|
|
var msg = `program exited (with status: ${status}), but keepRuntimeAlive() is set (counter=${runtimeKeepaliveCounter}) due to an async operation, so halting execution but not exiting the runtime or preventing further async execution (you can use emscripten_force_exit, if you want to force a true shutdown)`;
|
|
err(msg);
|
|
}
|
|
|
|
_proc_exit(status);
|
|
};
|
|
var _exit = exitJS;
|
|
|
|
function _fd_close(fd) {
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
FS.close(stream);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
/** @param {number=} offset */
|
|
var doReadv = (stream, iov, iovcnt, offset) => {
|
|
var ret = 0;
|
|
for (var i = 0; i < iovcnt; i++) {
|
|
var ptr = HEAPU32[((iov)>>2)];
|
|
var len = HEAPU32[(((iov)+(4))>>2)];
|
|
iov += 8;
|
|
var curr = FS.read(stream, HEAP8, ptr, len, offset);
|
|
if (curr < 0) return -1;
|
|
ret += curr;
|
|
if (curr < len) break; // nothing more to read
|
|
if (typeof offset != 'undefined') {
|
|
offset += curr;
|
|
}
|
|
}
|
|
return ret;
|
|
};
|
|
|
|
function _fd_read(fd, iov, iovcnt, pnum) {
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var num = doReadv(stream, iov, iovcnt);
|
|
HEAPU32[((pnum)>>2)] = num;
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
|
|
var INT53_MAX = 9007199254740992;
|
|
|
|
var INT53_MIN = -9007199254740992;
|
|
var bigintToI53Checked = (num) => (num < INT53_MIN || num > INT53_MAX) ? NaN : Number(num);
|
|
function _fd_seek(fd, offset, whence, newOffset) {
|
|
offset = bigintToI53Checked(offset);
|
|
|
|
|
|
try {
|
|
|
|
if (isNaN(offset)) return 61;
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
FS.llseek(stream, offset, whence);
|
|
HEAP64[((newOffset)>>3)] = BigInt(stream.position);
|
|
if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; // reset readdir state
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
;
|
|
}
|
|
|
|
/** @param {number=} offset */
|
|
var doWritev = (stream, iov, iovcnt, offset) => {
|
|
var ret = 0;
|
|
for (var i = 0; i < iovcnt; i++) {
|
|
var ptr = HEAPU32[((iov)>>2)];
|
|
var len = HEAPU32[(((iov)+(4))>>2)];
|
|
iov += 8;
|
|
var curr = FS.write(stream, HEAP8, ptr, len, offset);
|
|
if (curr < 0) return -1;
|
|
ret += curr;
|
|
if (curr < len) {
|
|
// No more space to write.
|
|
break;
|
|
}
|
|
if (typeof offset != 'undefined') {
|
|
offset += curr;
|
|
}
|
|
}
|
|
return ret;
|
|
};
|
|
|
|
function _fd_write(fd, iov, iovcnt, pnum) {
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var num = doWritev(stream, iov, iovcnt);
|
|
HEAPU32[((pnum)>>2)] = num;
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
|
|
var handleException = (e) => {
|
|
// Certain exception types we do not treat as errors since they are used for
|
|
// internal control flow.
|
|
// 1. ExitStatus, which is thrown by exit()
|
|
// 2. "unwind", which is thrown by emscripten_unwind_to_js_event_loop() and others
|
|
// that wish to return to JS event loop.
|
|
if (e instanceof ExitStatus || e == 'unwind') {
|
|
return EXITSTATUS;
|
|
}
|
|
checkStackCookie();
|
|
if (e instanceof WebAssembly.RuntimeError) {
|
|
if (_emscripten_stack_get_current() <= 0) {
|
|
err('Stack overflow detected. You can try increasing -sSTACK_SIZE (currently set to 65536)');
|
|
}
|
|
}
|
|
quit_(1, e);
|
|
};
|
|
|
|
|
|
var stringToUTF8 = (str, outPtr, maxBytesToWrite) => {
|
|
assert(typeof maxBytesToWrite == 'number', 'stringToUTF8(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!');
|
|
return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite);
|
|
};
|
|
|
|
var stackAlloc = (sz) => __emscripten_stack_alloc(sz);
|
|
var stringToUTF8OnStack = (str) => {
|
|
var size = lengthBytesUTF8(str) + 1;
|
|
var ret = stackAlloc(size);
|
|
stringToUTF8(str, ret, size);
|
|
return ret;
|
|
};
|
|
|
|
FS.createPreloadedFile = FS_createPreloadedFile;
|
|
FS.staticInit();
|
|
// Set module methods based on EXPORTED_RUNTIME_METHODS
|
|
;
|
|
// End JS library code
|
|
|
|
function checkIncomingModuleAPI() {
|
|
ignoredModuleProp('fetchSettings');
|
|
}
|
|
var wasmImports = {
|
|
/** @export */
|
|
__syscall_fcntl64: ___syscall_fcntl64,
|
|
/** @export */
|
|
__syscall_ioctl: ___syscall_ioctl,
|
|
/** @export */
|
|
__syscall_openat: ___syscall_openat,
|
|
/** @export */
|
|
__syscall_unlinkat: ___syscall_unlinkat,
|
|
/** @export */
|
|
_abort_js: __abort_js,
|
|
/** @export */
|
|
emscripten_resize_heap: _emscripten_resize_heap,
|
|
/** @export */
|
|
exit: _exit,
|
|
/** @export */
|
|
fd_close: _fd_close,
|
|
/** @export */
|
|
fd_read: _fd_read,
|
|
/** @export */
|
|
fd_seek: _fd_seek,
|
|
/** @export */
|
|
fd_write: _fd_write
|
|
};
|
|
var wasmExports;
|
|
createWasm();
|
|
var ___wasm_call_ctors = createExportWrapper('__wasm_call_ctors', 0);
|
|
var _main = Module['_main'] = createExportWrapper('__main_argc_argv', 2);
|
|
var _strerror = createExportWrapper('strerror', 1);
|
|
var _fflush = createExportWrapper('fflush', 1);
|
|
var _emscripten_stack_init = () => (_emscripten_stack_init = wasmExports['emscripten_stack_init'])();
|
|
var _emscripten_stack_get_free = () => (_emscripten_stack_get_free = wasmExports['emscripten_stack_get_free'])();
|
|
var _emscripten_stack_get_base = () => (_emscripten_stack_get_base = wasmExports['emscripten_stack_get_base'])();
|
|
var _emscripten_stack_get_end = () => (_emscripten_stack_get_end = wasmExports['emscripten_stack_get_end'])();
|
|
var __emscripten_stack_restore = (a0) => (__emscripten_stack_restore = wasmExports['_emscripten_stack_restore'])(a0);
|
|
var __emscripten_stack_alloc = (a0) => (__emscripten_stack_alloc = wasmExports['_emscripten_stack_alloc'])(a0);
|
|
var _emscripten_stack_get_current = () => (_emscripten_stack_get_current = wasmExports['emscripten_stack_get_current'])();
|
|
|
|
|
|
// include: postamble.js
|
|
// === Auto-generated postamble setup entry stuff ===
|
|
|
|
var missingLibrarySymbols = [
|
|
'writeI53ToI64',
|
|
'writeI53ToI64Clamped',
|
|
'writeI53ToI64Signaling',
|
|
'writeI53ToU64Clamped',
|
|
'writeI53ToU64Signaling',
|
|
'readI53FromI64',
|
|
'readI53FromU64',
|
|
'convertI32PairToI53',
|
|
'convertI32PairToI53Checked',
|
|
'convertU32PairToI53',
|
|
'getTempRet0',
|
|
'setTempRet0',
|
|
'zeroMemory',
|
|
'getHeapMax',
|
|
'growMemory',
|
|
'inetPton4',
|
|
'inetNtop4',
|
|
'inetPton6',
|
|
'inetNtop6',
|
|
'readSockaddr',
|
|
'writeSockaddr',
|
|
'emscriptenLog',
|
|
'readEmAsmArgs',
|
|
'jstoi_q',
|
|
'getExecutableName',
|
|
'listenOnce',
|
|
'autoResumeAudioContext',
|
|
'getDynCaller',
|
|
'dynCall',
|
|
'runtimeKeepalivePush',
|
|
'runtimeKeepalivePop',
|
|
'callUserCallback',
|
|
'maybeExit',
|
|
'asmjsMangle',
|
|
'alignMemory',
|
|
'HandleAllocator',
|
|
'getNativeTypeSize',
|
|
'addOnInit',
|
|
'addOnPostCtor',
|
|
'addOnPreMain',
|
|
'addOnExit',
|
|
'STACK_SIZE',
|
|
'STACK_ALIGN',
|
|
'POINTER_SIZE',
|
|
'ASSERTIONS',
|
|
'getCFunc',
|
|
'ccall',
|
|
'cwrap',
|
|
'uleb128Encode',
|
|
'sigToWasmTypes',
|
|
'generateFuncType',
|
|
'convertJsFunctionToWasm',
|
|
'getEmptyTableSlot',
|
|
'updateTableMap',
|
|
'getFunctionAddress',
|
|
'addFunction',
|
|
'removeFunction',
|
|
'reallyNegative',
|
|
'unSign',
|
|
'strLen',
|
|
'reSign',
|
|
'formatString',
|
|
'intArrayToString',
|
|
'AsciiToString',
|
|
'stringToAscii',
|
|
'UTF16ToString',
|
|
'stringToUTF16',
|
|
'lengthBytesUTF16',
|
|
'UTF32ToString',
|
|
'stringToUTF32',
|
|
'lengthBytesUTF32',
|
|
'stringToNewUTF8',
|
|
'writeArrayToMemory',
|
|
'registerKeyEventCallback',
|
|
'maybeCStringToJsString',
|
|
'findEventTarget',
|
|
'getBoundingClientRect',
|
|
'fillMouseEventData',
|
|
'registerMouseEventCallback',
|
|
'registerWheelEventCallback',
|
|
'registerUiEventCallback',
|
|
'registerFocusEventCallback',
|
|
'fillDeviceOrientationEventData',
|
|
'registerDeviceOrientationEventCallback',
|
|
'fillDeviceMotionEventData',
|
|
'registerDeviceMotionEventCallback',
|
|
'screenOrientation',
|
|
'fillOrientationChangeEventData',
|
|
'registerOrientationChangeEventCallback',
|
|
'fillFullscreenChangeEventData',
|
|
'registerFullscreenChangeEventCallback',
|
|
'JSEvents_requestFullscreen',
|
|
'JSEvents_resizeCanvasForFullscreen',
|
|
'registerRestoreOldStyle',
|
|
'hideEverythingExceptGivenElement',
|
|
'restoreHiddenElements',
|
|
'setLetterbox',
|
|
'softFullscreenResizeWebGLRenderTarget',
|
|
'doRequestFullscreen',
|
|
'fillPointerlockChangeEventData',
|
|
'registerPointerlockChangeEventCallback',
|
|
'registerPointerlockErrorEventCallback',
|
|
'requestPointerLock',
|
|
'fillVisibilityChangeEventData',
|
|
'registerVisibilityChangeEventCallback',
|
|
'registerTouchEventCallback',
|
|
'fillGamepadEventData',
|
|
'registerGamepadEventCallback',
|
|
'registerBeforeUnloadEventCallback',
|
|
'fillBatteryEventData',
|
|
'battery',
|
|
'registerBatteryEventCallback',
|
|
'setCanvasElementSize',
|
|
'getCanvasElementSize',
|
|
'jsStackTrace',
|
|
'getCallstack',
|
|
'convertPCtoSourceLocation',
|
|
'getEnvStrings',
|
|
'checkWasiClock',
|
|
'wasiRightsToMuslOFlags',
|
|
'wasiOFlagsToMuslOFlags',
|
|
'safeSetTimeout',
|
|
'setImmediateWrapped',
|
|
'safeRequestAnimationFrame',
|
|
'clearImmediateWrapped',
|
|
'registerPostMainLoop',
|
|
'registerPreMainLoop',
|
|
'getPromise',
|
|
'makePromise',
|
|
'idsToPromises',
|
|
'makePromiseCallback',
|
|
'ExceptionInfo',
|
|
'findMatchingCatch',
|
|
'Browser_asyncPrepareDataCounter',
|
|
'isLeapYear',
|
|
'ydayFromDate',
|
|
'arraySum',
|
|
'addDays',
|
|
'getSocketFromFD',
|
|
'getSocketAddress',
|
|
'FS_unlink',
|
|
'FS_mkdirTree',
|
|
'_setNetworkCallback',
|
|
'heapObjectForWebGLType',
|
|
'toTypedArrayIndex',
|
|
'webgl_enable_ANGLE_instanced_arrays',
|
|
'webgl_enable_OES_vertex_array_object',
|
|
'webgl_enable_WEBGL_draw_buffers',
|
|
'webgl_enable_WEBGL_multi_draw',
|
|
'webgl_enable_EXT_polygon_offset_clamp',
|
|
'webgl_enable_EXT_clip_control',
|
|
'webgl_enable_WEBGL_polygon_mode',
|
|
'emscriptenWebGLGet',
|
|
'computeUnpackAlignedImageSize',
|
|
'colorChannelsInGlTextureFormat',
|
|
'emscriptenWebGLGetTexPixelData',
|
|
'emscriptenWebGLGetUniform',
|
|
'webglGetUniformLocation',
|
|
'webglPrepareUniformLocationsBeforeFirstUse',
|
|
'webglGetLeftBracePos',
|
|
'emscriptenWebGLGetVertexAttrib',
|
|
'__glGetActiveAttribOrUniform',
|
|
'writeGLArray',
|
|
'registerWebGlEventCallback',
|
|
'runAndAbortIfError',
|
|
'ALLOC_NORMAL',
|
|
'ALLOC_STACK',
|
|
'allocate',
|
|
'writeStringToMemory',
|
|
'writeAsciiToMemory',
|
|
'demangle',
|
|
'stackTrace',
|
|
];
|
|
missingLibrarySymbols.forEach(missingLibrarySymbol)
|
|
|
|
var unexportedSymbols = [
|
|
'run',
|
|
'addRunDependency',
|
|
'removeRunDependency',
|
|
'out',
|
|
'err',
|
|
'callMain',
|
|
'abort',
|
|
'wasmMemory',
|
|
'wasmExports',
|
|
'writeStackCookie',
|
|
'checkStackCookie',
|
|
'INT53_MAX',
|
|
'INT53_MIN',
|
|
'bigintToI53Checked',
|
|
'stackSave',
|
|
'stackRestore',
|
|
'stackAlloc',
|
|
'ptrToString',
|
|
'exitJS',
|
|
'abortOnCannotGrowMemory',
|
|
'ENV',
|
|
'ERRNO_CODES',
|
|
'strError',
|
|
'DNS',
|
|
'Protocols',
|
|
'Sockets',
|
|
'timers',
|
|
'warnOnce',
|
|
'readEmAsmArgsArray',
|
|
'jstoi_s',
|
|
'handleException',
|
|
'keepRuntimeAlive',
|
|
'asyncLoad',
|
|
'mmapAlloc',
|
|
'wasmTable',
|
|
'noExitRuntime',
|
|
'addOnPreRun',
|
|
'addOnPostRun',
|
|
'freeTableIndexes',
|
|
'functionsInTableMap',
|
|
'setValue',
|
|
'getValue',
|
|
'PATH',
|
|
'PATH_FS',
|
|
'UTF8Decoder',
|
|
'UTF8ArrayToString',
|
|
'UTF8ToString',
|
|
'stringToUTF8Array',
|
|
'stringToUTF8',
|
|
'lengthBytesUTF8',
|
|
'intArrayFromString',
|
|
'UTF16Decoder',
|
|
'stringToUTF8OnStack',
|
|
'JSEvents',
|
|
'specialHTMLTargets',
|
|
'findCanvasEventTarget',
|
|
'currentFullscreenStrategy',
|
|
'restoreOldWindowedStyle',
|
|
'UNWIND_CACHE',
|
|
'ExitStatus',
|
|
'doReadv',
|
|
'doWritev',
|
|
'initRandomFill',
|
|
'randomFill',
|
|
'emSetImmediate',
|
|
'emClearImmediate_deps',
|
|
'emClearImmediate',
|
|
'promiseMap',
|
|
'uncaughtExceptionCount',
|
|
'exceptionLast',
|
|
'exceptionCaught',
|
|
'Browser',
|
|
'getPreloadedImageData__data',
|
|
'wget',
|
|
'MONTH_DAYS_REGULAR',
|
|
'MONTH_DAYS_LEAP',
|
|
'MONTH_DAYS_REGULAR_CUMULATIVE',
|
|
'MONTH_DAYS_LEAP_CUMULATIVE',
|
|
'SYSCALLS',
|
|
'preloadPlugins',
|
|
'FS_createPreloadedFile',
|
|
'FS_modeStringToFlags',
|
|
'FS_getMode',
|
|
'FS_stdin_getChar_buffer',
|
|
'FS_stdin_getChar',
|
|
'FS_createPath',
|
|
'FS_createDevice',
|
|
'FS_readFile',
|
|
'FS',
|
|
'FS_createDataFile',
|
|
'FS_createLazyFile',
|
|
'MEMFS',
|
|
'TTY',
|
|
'PIPEFS',
|
|
'SOCKFS',
|
|
'tempFixedLengthArray',
|
|
'miniTempWebGLFloatBuffers',
|
|
'miniTempWebGLIntBuffers',
|
|
'GL',
|
|
'AL',
|
|
'GLUT',
|
|
'EGL',
|
|
'GLEW',
|
|
'IDBStore',
|
|
'SDL',
|
|
'SDL_gfx',
|
|
'allocateUTF8',
|
|
'allocateUTF8OnStack',
|
|
'print',
|
|
'printErr',
|
|
];
|
|
unexportedSymbols.forEach(unexportedRuntimeSymbol);
|
|
|
|
|
|
|
|
var calledRun;
|
|
|
|
function callMain(args = []) {
|
|
assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on Module["onRuntimeInitialized"])');
|
|
assert(typeof onPreRuns === 'undefined' || onPreRuns.length == 0, 'cannot call main when preRun functions remain to be called');
|
|
|
|
var entryFunction = _main;
|
|
|
|
args.unshift(thisProgram);
|
|
|
|
var argc = args.length;
|
|
var argv = stackAlloc((argc + 1) * 4);
|
|
var argv_ptr = argv;
|
|
args.forEach((arg) => {
|
|
HEAPU32[((argv_ptr)>>2)] = stringToUTF8OnStack(arg);
|
|
argv_ptr += 4;
|
|
});
|
|
HEAPU32[((argv_ptr)>>2)] = 0;
|
|
|
|
try {
|
|
|
|
var ret = entryFunction(argc, argv);
|
|
|
|
// if we're not running an evented main loop, it's time to exit
|
|
exitJS(ret, /* implicit = */ true);
|
|
return ret;
|
|
} catch (e) {
|
|
return handleException(e);
|
|
}
|
|
}
|
|
|
|
function stackCheckInit() {
|
|
// This is normally called automatically during __wasm_call_ctors but need to
|
|
// get these values before even running any of the ctors so we call it redundantly
|
|
// here.
|
|
_emscripten_stack_init();
|
|
// TODO(sbc): Move writeStackCookie to native to to avoid this.
|
|
writeStackCookie();
|
|
}
|
|
|
|
function run(args = arguments_) {
|
|
|
|
if (runDependencies > 0) {
|
|
dependenciesFulfilled = run;
|
|
return;
|
|
}
|
|
|
|
stackCheckInit();
|
|
|
|
preRun();
|
|
|
|
// a preRun added a dependency, run will be called later
|
|
if (runDependencies > 0) {
|
|
dependenciesFulfilled = run;
|
|
return;
|
|
}
|
|
|
|
function doRun() {
|
|
// run may have just been called through dependencies being fulfilled just in this very frame,
|
|
// or while the async setStatus time below was happening
|
|
assert(!calledRun);
|
|
calledRun = true;
|
|
Module['calledRun'] = true;
|
|
|
|
if (ABORT) return;
|
|
|
|
initRuntime();
|
|
|
|
preMain();
|
|
|
|
Module['onRuntimeInitialized']?.();
|
|
consumedModuleProp('onRuntimeInitialized');
|
|
|
|
var noInitialRun = Module['noInitialRun'];legacyModuleProp('noInitialRun', 'noInitialRun');
|
|
if (!noInitialRun) callMain(args);
|
|
|
|
postRun();
|
|
}
|
|
|
|
if (Module['setStatus']) {
|
|
Module['setStatus']('Running...');
|
|
setTimeout(() => {
|
|
setTimeout(() => Module['setStatus'](''), 1);
|
|
doRun();
|
|
}, 1);
|
|
} else
|
|
{
|
|
doRun();
|
|
}
|
|
checkStackCookie();
|
|
}
|
|
|
|
function checkUnflushedContent() {
|
|
// Compiler settings do not allow exiting the runtime, so flushing
|
|
// the streams is not possible. but in ASSERTIONS mode we check
|
|
// if there was something to flush, and if so tell the user they
|
|
// should request that the runtime be exitable.
|
|
// Normally we would not even include flush() at all, but in ASSERTIONS
|
|
// builds we do so just for this check, and here we see if there is any
|
|
// content to flush, that is, we check if there would have been
|
|
// something a non-ASSERTIONS build would have not seen.
|
|
// How we flush the streams depends on whether we are in SYSCALLS_REQUIRE_FILESYSTEM=0
|
|
// mode (which has its own special function for this; otherwise, all
|
|
// the code is inside libc)
|
|
var oldOut = out;
|
|
var oldErr = err;
|
|
var has = false;
|
|
out = err = (x) => {
|
|
has = true;
|
|
}
|
|
try { // it doesn't matter if it fails
|
|
_fflush(0);
|
|
// also flush in the JS FS layer
|
|
['stdout', 'stderr'].forEach((name) => {
|
|
var info = FS.analyzePath('/dev/' + name);
|
|
if (!info) return;
|
|
var stream = info.object;
|
|
var rdev = stream.rdev;
|
|
var tty = TTY.ttys[rdev];
|
|
if (tty?.output?.length) {
|
|
has = true;
|
|
}
|
|
});
|
|
} catch(e) {}
|
|
out = oldOut;
|
|
err = oldErr;
|
|
if (has) {
|
|
warnOnce('stdio streams had content in them that was not flushed. you should set EXIT_RUNTIME to 1 (see the Emscripten FAQ), or make sure to emit a newline when you printf etc.');
|
|
}
|
|
}
|
|
|
|
if (Module['preInit']) {
|
|
if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
|
|
while (Module['preInit'].length > 0) {
|
|
Module['preInit'].pop()();
|
|
}
|
|
}
|
|
consumedModuleProp('preInit');
|
|
|
|
run();
|
|
|
|
// end include: postamble.js
|
|
|