diff --git a/.github/workflows/test-shared.yml b/.github/workflows/test-shared.yml index 7f92b98929b8be..452cb1d6db8fa3 100644 --- a/.github/workflows/test-shared.yml +++ b/.github/workflows/test-shared.yml @@ -178,7 +178,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | - core.exportVariable('SCCACHE_GHA_VERSION', 'on'); + core.exportVariable('SCCACHE_GHA_ENABLED', 'on'); core.exportVariable('ACTIONS_CACHE_SERVICE_V2', 'on'); core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); @@ -195,7 +195,7 @@ jobs: nix-shell \ -I nixpkgs=./tools/nix/pkgs.nix \ --pure --keep TAR_DIR --keep FLAKY_TESTS \ - --keep SCCACHE_GHA_VERSION --keep ACTIONS_CACHE_SERVICE_V2 --keep ACTIONS_RESULTS_URL --keep ACTIONS_RUNTIME_TOKEN \ + --keep SCCACHE_GHA_ENABLED --keep ACTIONS_CACHE_SERVICE_V2 --keep ACTIONS_RESULTS_URL --keep ACTIONS_RUNTIME_TOKEN \ --arg loadJSBuiltinsDynamically false \ --arg ccache '(import {}).sccache' \ --arg devTools '[]' \ diff --git a/lib/_http_client.js b/lib/_http_client.js index ee4f47be64ab3c..68fec845695f26 100644 --- a/lib/_http_client.js +++ b/lib/_http_client.js @@ -870,7 +870,11 @@ function responseOnTimeout() { function requestOnFinish() { const req = this; - if (req.shouldKeepAlive && req._ended) + // If the response ends before this request finishes writing, `responseOnEnd()` + // already released the socket. When `finish` fires later, that socket may + // belong to a different request, so only call `responseKeepAlive()` when the + // original request is still alive (`!req.destroyed`). + if (req.shouldKeepAlive && req._ended && !req.destroyed) responseKeepAlive(req); } diff --git a/lib/internal/worker.js b/lib/internal/worker.js index 08e87d07e7eb80..2a4caed82cf7c5 100644 --- a/lib/internal/worker.js +++ b/lib/internal/worker.js @@ -112,8 +112,8 @@ if (isMainThread) { cwdCounter = new Uint32Array(constructSharedArrayBuffer(4)); const originalChdir = process.chdir; process.chdir = function(path) { - AtomicsAdd(cwdCounter, 0, 1); originalChdir(path); + AtomicsAdd(cwdCounter, 0, 1); }; } diff --git a/lib/zlib.js b/lib/zlib.js index 655dcb517b968b..056b1a13a17392 100644 --- a/lib/zlib.js +++ b/lib/zlib.js @@ -830,11 +830,29 @@ function Brotli(opts, mode) { }); } + let dictionary = opts?.dictionary; + if (dictionary !== undefined && !isArrayBufferView(dictionary)) { + if (isAnyArrayBuffer(dictionary)) { + dictionary = Buffer.from(dictionary); + } else { + throw new ERR_INVALID_ARG_TYPE( + 'options.dictionary', + ['Buffer', 'TypedArray', 'DataView', 'ArrayBuffer'], + dictionary, + ); + } + } + const handle = mode === BROTLI_DECODE ? new binding.BrotliDecoder(mode) : new binding.BrotliEncoder(mode); this._writeState = new Uint32Array(2); - handle.init(brotliInitParamsArray, this._writeState, processCallback); + handle.init( + brotliInitParamsArray, + this._writeState, + processCallback, + dictionary, + ); ZlibBase.call(this, opts, mode, handle, brotliDefaultOpts); } diff --git a/src/node_zlib.cc b/src/node_zlib.cc index d3bd0f6f6540b4..9d49f13d07c125 100644 --- a/src/node_zlib.cc +++ b/src/node_zlib.cc @@ -40,6 +40,7 @@ #include "brotli/decode.h" #include "brotli/encode.h" +#include "brotli/shared_dictionary.h" #include "zlib.h" #include "zstd.h" #include "zstd_errors.h" @@ -256,7 +257,7 @@ class BrotliEncoderContext final : public BrotliContext { public: void Close(); void DoThreadPoolWork(); - CompressionError Init(); + CompressionError Init(std::vector&& dictionary = {}); CompressionError ResetStream(); CompressionError SetParams(int key, uint32_t value); CompressionError GetErrorInfo() const; @@ -268,13 +269,18 @@ class BrotliEncoderContext final : public BrotliContext { private: bool last_result_ = false; DeleteFnPtr state_; + DeleteFnPtr + prepared_dictionary_; + // Dictionary data must remain valid while the prepared dictionary is alive. + std::vector dictionary_; }; class BrotliDecoderContext final : public BrotliContext { public: void Close(); void DoThreadPoolWork(); - CompressionError Init(); + CompressionError Init(std::vector&& dictionary = {}); CompressionError ResetStream(); CompressionError SetParams(int key, uint32_t value); CompressionError GetErrorInfo() const; @@ -288,6 +294,8 @@ class BrotliDecoderContext final : public BrotliContext { BrotliDecoderErrorCode error_ = BROTLI_DECODER_NO_ERROR; std::string error_string_; DeleteFnPtr state_; + // Dictionary data must remain valid for the lifetime of the decoder. + std::vector dictionary_; }; class ZstdContext : public MemoryRetainer { @@ -830,7 +838,8 @@ class BrotliCompressionStream final : static void Init(const FunctionCallbackInfo& args) { BrotliCompressionStream* wrap; ASSIGN_OR_RETURN_UNWRAP(&wrap, args.This()); - CHECK(args.Length() == 3 && "init(params, writeResult, writeCallback)"); + CHECK((args.Length() == 3 || args.Length() == 4) && + "init(params, writeResult, writeCallback[, dictionary])"); CHECK(args[1]->IsUint32Array()); CHECK_GE(args[1].As()->Length(), 2); @@ -841,7 +850,18 @@ class BrotliCompressionStream final : wrap->InitStream(write_result, write_js_callback); AllocScope alloc_scope(wrap); - CompressionError err = wrap->context()->Init(); + std::vector dictionary; + if (args.Length() == 4 && !args[3]->IsUndefined()) { + if (!args[3]->IsArrayBufferView()) { + THROW_ERR_INVALID_ARG_TYPE( + wrap->env(), "dictionary must be an ArrayBufferView if provided"); + return; + } + ArrayBufferViewContents contents(args[3]); + dictionary.assign(contents.data(), contents.data() + contents.length()); + } + + CompressionError err = wrap->context()->Init(std::move(dictionary)); if (err.IsError()) { wrap->EmitError(err); // TODO(addaleax): Sometimes we generate better error codes in C++ land, @@ -1387,23 +1407,57 @@ void BrotliEncoderContext::DoThreadPoolWork() { void BrotliEncoderContext::Close() { state_.reset(); + prepared_dictionary_.reset(); + dictionary_.clear(); mode_ = NONE; } -CompressionError BrotliEncoderContext::Init() { +CompressionError BrotliEncoderContext::Init(std::vector&& dictionary) { brotli_alloc_func alloc = CompressionStreamMemoryOwner::AllocForBrotli; brotli_free_func free = CompressionStreamMemoryOwner::FreeForZlib; void* opaque = CompressionStream::AllocatorOpaquePointerForContext( this); + + // Clean up any previous dictionary state before re-initializing. + prepared_dictionary_.reset(); + dictionary_.clear(); + state_.reset(BrotliEncoderCreateInstance(alloc, free, opaque)); if (!state_) { return CompressionError("Could not initialize Brotli instance", "ERR_ZLIB_INITIALIZATION_FAILED", -1); - } else { - return CompressionError {}; } + + if (!dictionary.empty()) { + // The dictionary data must remain valid for the lifetime of the prepared + // dictionary, so take ownership via move. + dictionary_ = std::move(dictionary); + + prepared_dictionary_.reset( + BrotliEncoderPrepareDictionary(BROTLI_SHARED_DICTIONARY_RAW, + dictionary_.size(), + dictionary_.data(), + BROTLI_MAX_QUALITY, + alloc, + free, + opaque)); + if (!prepared_dictionary_) { + return CompressionError("Failed to prepare brotli dictionary", + "ERR_ZLIB_DICTIONARY_LOAD_FAILED", + -1); + } + + if (!BrotliEncoderAttachPreparedDictionary(state_.get(), + prepared_dictionary_.get())) { + return CompressionError("Failed to attach brotli dictionary", + "ERR_ZLIB_DICTIONARY_LOAD_FAILED", + -1); + } + } + + return CompressionError{}; } CompressionError BrotliEncoderContext::ResetStream() { @@ -1435,6 +1489,7 @@ CompressionError BrotliEncoderContext::GetErrorInfo() const { void BrotliDecoderContext::Close() { state_.reset(); + dictionary_.clear(); mode_ = NONE; } @@ -1455,20 +1510,39 @@ void BrotliDecoderContext::DoThreadPoolWork() { } } -CompressionError BrotliDecoderContext::Init() { +CompressionError BrotliDecoderContext::Init(std::vector&& dictionary) { brotli_alloc_func alloc = CompressionStreamMemoryOwner::AllocForBrotli; brotli_free_func free = CompressionStreamMemoryOwner::FreeForZlib; void* opaque = CompressionStream::AllocatorOpaquePointerForContext( this); + + // Clean up any previous dictionary state before re-initializing. + dictionary_.clear(); + state_.reset(BrotliDecoderCreateInstance(alloc, free, opaque)); if (!state_) { return CompressionError("Could not initialize Brotli instance", "ERR_ZLIB_INITIALIZATION_FAILED", -1); - } else { - return CompressionError {}; } + + if (!dictionary.empty()) { + // The dictionary data must remain valid for the lifetime of the decoder, + // so take ownership via move. + dictionary_ = std::move(dictionary); + + if (!BrotliDecoderAttachDictionary(state_.get(), + BROTLI_SHARED_DICTIONARY_RAW, + dictionary_.size(), + dictionary_.data())) { + return CompressionError("Failed to attach brotli dictionary", + "ERR_ZLIB_DICTIONARY_LOAD_FAILED", + -1); + } + } + + return CompressionError{}; } CompressionError BrotliDecoderContext::ResetStream() { diff --git a/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-navigation.https.html b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-navigation.https.html new file mode 100644 index 00000000000000..a92e0f7a8a9b20 --- /dev/null +++ b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-navigation.https.html @@ -0,0 +1,251 @@ + + + + + + + + + + + + + + + + diff --git a/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-self-fetch.https.html b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-self-fetch.https.html new file mode 100644 index 00000000000000..cbff2fa18fa15d --- /dev/null +++ b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-self-fetch.https.html @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-worker-creation.https.html b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-worker-creation.https.html new file mode 100644 index 00000000000000..e8d0bad4c97580 --- /dev/null +++ b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition-worker-creation.https.html @@ -0,0 +1,117 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/fixtures/wpt/FileAPI/BlobURL/cross-partition.https.html b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition.https.html new file mode 100644 index 00000000000000..cb66ebe77093bc --- /dev/null +++ b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition.https.html @@ -0,0 +1,482 @@ + + + + + + + + + + + + + + + + diff --git a/test/fixtures/wpt/FileAPI/BlobURL/cross-partition.tentative.https.html b/test/fixtures/wpt/FileAPI/BlobURL/cross-partition.tentative.https.html deleted file mode 100644 index c75ce07d054eb7..00000000000000 --- a/test/fixtures/wpt/FileAPI/BlobURL/cross-partition.tentative.https.html +++ /dev/null @@ -1,276 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/test/fixtures/wpt/FileAPI/BlobURL/resources/common.js b/test/fixtures/wpt/FileAPI/BlobURL/resources/common.js new file mode 100644 index 00000000000000..60a97d6a5ea5fe --- /dev/null +++ b/test/fixtures/wpt/FileAPI/BlobURL/resources/common.js @@ -0,0 +1,33 @@ +const add_iframe_js = (iframe_origin, response_queue_uuid) => ` + const importScript = ${importScript}; + await importScript("/html/cross-origin-embedder-policy/credentialless" + + "/resources/common.js"); + await importScript("/html/anonymous-iframe/resources/common.js"); + await importScript("/common/utils.js"); + + // dispatcher.js has already been loaded by the popup this is running in. + await send("${response_queue_uuid}", newIframe("${iframe_origin}")); +`; + +async function create_test_iframes(t, response_queue_uuid) { + const same_site_origin = get_host_info().HTTPS_ORIGIN; + const cross_site_origin = get_host_info().HTTPS_NOTSAMESITE_ORIGIN; + + assert_equals("https://" + window.location.host, same_site_origin, + "this test assumes that the page's window.location.host corresponds to " + + "get_host_info().HTTPS_ORIGIN"); + + // Create a same-origin iframe in a cross-site popup. + const not_same_site_popup_uuid = newPopup(t, cross_site_origin); + await send(not_same_site_popup_uuid, + add_iframe_js(same_site_origin, response_queue_uuid)); + const cross_site_iframe_uuid = await receive(response_queue_uuid); + + // Create a same-origin iframe in a same-site popup. + const same_origin_popup_uuid = newPopup(t, same_site_origin); + await send(same_origin_popup_uuid, + add_iframe_js(same_site_origin, response_queue_uuid)); + const same_site_iframe_uuid = await receive(response_queue_uuid); + + return [cross_site_iframe_uuid, same_site_iframe_uuid]; +} \ No newline at end of file diff --git a/test/fixtures/wpt/FileAPI/BlobURL/support/file_test2.txt b/test/fixtures/wpt/FileAPI/BlobURL/support/file_test2.txt deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/test/fixtures/wpt/FileAPI/WEB_FEATURES.yml b/test/fixtures/wpt/FileAPI/WEB_FEATURES.yml new file mode 100644 index 00000000000000..8d279ceaaeab21 --- /dev/null +++ b/test/fixtures/wpt/FileAPI/WEB_FEATURES.yml @@ -0,0 +1,3 @@ +features: +- name: file + files: "**" diff --git a/test/fixtures/wpt/FileAPI/blob/Blob-constructor.any.js b/test/fixtures/wpt/FileAPI/blob/Blob-constructor.any.js index 6dc44e8e156cce..03155b99cb20ea 100644 --- a/test/fixtures/wpt/FileAPI/blob/Blob-constructor.any.js +++ b/test/fixtures/wpt/FileAPI/blob/Blob-constructor.any.js @@ -104,6 +104,59 @@ test_blob(function() { desc: "A Uint8Array object should be treated as a sequence for the blobParts argument." }); +test(function() { + assert_throws_js(TypeError, function() { new Blob(true) }); +}, "blobParts not an object: boolean"); + +test(function() { + Boolean.prototype[Symbol.iterator] = () => ["FAIL"][Symbol.iterator]() + this.add_cleanup(function() { delete Boolean.prototype[Symbol.iterator] }); + assert_throws_js(TypeError, function() { new Blob(true) }); +}, "blobParts not an object: boolean with Boolean.prototype[Symbol.iterator]"); + +test(function() { + assert_throws_js(TypeError, function() { new Blob("fail") }); +}, "blobParts not an object: string"); + +test(function() { + const original = String.prototype[Symbol.iterator]; + String.prototype[Symbol.iterator] = () => ["FAIL"][Symbol.iterator]() + this.add_cleanup(function() { String.prototype[Symbol.iterator] = original }); + assert_throws_js(TypeError, function() { new Blob("fail") }); +}, "blobParts not an object: string with String.prototype[Symbol.iterator]"); + +test(function() { + assert_throws_js(TypeError, function() { new Blob(7) }); +}, "blobParts not an object: number"); + +test(function() { + Number.prototype[Symbol.iterator] = () => ["FAIL"][Symbol.iterator]() + this.add_cleanup(function() { delete Number.prototype[Symbol.iterator] }); + assert_throws_js(TypeError, function() { new Blob(7) }); +}, "blobParts not an object: number with Number.prototype[Symbol.iterator]"); + +test(function() { + assert_throws_js(TypeError, function() { new Blob(7n) }); +}, "blobParts not an object: BigInt"); + +test(function() { + BigInt.prototype[Symbol.iterator] = () => ["FAIL"][Symbol.iterator]() + this.add_cleanup(function() { delete BigInt.prototype[Symbol.iterator] }); + assert_throws_js(TypeError, function() { new Blob(7n) }); +}, "blobParts not an object: BigInt with BigInt.prototype[Symbol.iterator]"); + +test(function() { + const symbol = Symbol(); + assert_throws_js(TypeError, function() { new Blob(symbol) }); +}, "blobParts not an object: Symbol"); + +test(function() { + const symbol = Symbol(); + Symbol.prototype[Symbol.iterator] = () => ["FAIL"][Symbol.iterator]() + this.add_cleanup(function() { delete Symbol.prototype[Symbol.iterator] }); + assert_throws_js(TypeError, function() { new Blob(symbol) }); +}, "blobParts not an object: Symbol with Symbol.prototype[Symbol.iterator]"); + var test_error = { name: "test", message: "test error", @@ -290,14 +343,22 @@ test_blob(function() { new Int16Array([0x4150, 0x5353]), new Uint32Array([0x53534150]), new Int32Array([0x53534150]), - new Float16Array([2.65625, 58.59375]), new Float32Array([0xD341500000]) ]); }, { - expected: "PASSPASSPASSPASSPASSPASSPASSPASS", + expected: "PASSPASSPASSPASSPASSPASSPASS", type: "", desc: "Passing typed arrays as elements of the blobParts array should work." }); +test_blob(function() { + return new Blob([ + new Float16Array([2.65625, 58.59375]) + ]); +}, { + expected: "PASS", + type: "", + desc: "Passing a Float16Array as element of the blobParts array should work." +}); test_blob(function() { return new Blob([ // 0x535 3415053534150 diff --git a/test/fixtures/wpt/FileAPI/blob/Blob-newobject.any.js b/test/fixtures/wpt/FileAPI/blob/Blob-newobject.any.js new file mode 100644 index 00000000000000..e036c3a9a743e6 --- /dev/null +++ b/test/fixtures/wpt/FileAPI/blob/Blob-newobject.any.js @@ -0,0 +1,12 @@ +// META: title=Blob methods return new objects ([NewObject]) +// META: global=window,worker +'use strict'; + +['stream', 'text', 'arrayBuffer', 'bytes'].forEach(method => { + test(() => { + const blob = new Blob(['PASS']); + const a = blob[method](); + const b = blob[method](); + assert_not_equals(a, b, `Blob.${method}() must return a new object`); + }, `Blob.${method}() returns [NewObject]`); +}); diff --git a/test/fixtures/wpt/FileAPI/blob/Blob-slice.any.js b/test/fixtures/wpt/FileAPI/blob/Blob-slice.any.js index 1f85d44d269191..bedc8d737302a0 100644 --- a/test/fixtures/wpt/FileAPI/blob/Blob-slice.any.js +++ b/test/fixtures/wpt/FileAPI/blob/Blob-slice.any.js @@ -12,24 +12,50 @@ test_blob(function() { }); test(function() { - var blob1, blob2; + var blob1 = new Blob(["squiggle"]); + var blob2 = new Blob(["steak"], {type: "content/type"}); - test_blob(function() { - return blob1 = new Blob(["squiggle"]); - }, { + test_blob(() => blob1, + { expected: "squiggle", type: "", desc: "blob1." }); - test_blob(function() { - return blob2 = new Blob(["steak"], {type: "content/type"}); - }, { + test_blob(() => blob2, + { expected: "steak", type: "content/type", desc: "blob2." }); + test_blob(function() { + var blob = new Blob(["abcd"]); + return blob.slice(undefined, undefined, "content/type"); + }, { + expected: "abcd", + type: "content/type", + desc: "undefined start/end Blob slice" + }); + + test_blob(function() { + var blob = new Blob(["abcd"]); + return blob.slice(undefined, 2, "content/type"); + }, { + expected: "ab", + type: "content/type", + desc: "undefined start Blob slice" + }); + + test_blob(function() { + var blob = new Blob(["abcd"]); + return blob.slice(2, undefined, "content/type"); + }, { + expected: "cd", + type: "content/type", + desc: "undefined end Blob slice" + }); + test_blob(function() { return new Blob().slice(0,0,null); }, { @@ -74,6 +100,21 @@ test(function() { {start: 7, end: 4, contents: ""}] ], + // Test double start/end values + [ + ["abcd"], + [{start: 0.5, contents: "abcd"}, + {start: 1.5, contents: "cd"}, + {start: 2.5, contents: "cd"}, + {start: 3.5, contents: ""}, + {start: 0, end: 0.5, contents: ""}, + {start: 0, end: 1.5, contents: "ab"}, + {start: 0, end: 2.5, contents: "ab"}, + {start: 0, end: 3.5, contents: "abcd"}, + {start: 1.5, end: 2.5, contents: ""}, + {start: 1.5, end: 3.5, contents: "cd"}] + ], + // Test 3 strings [ ["foo", "bar", "baz"], diff --git a/test/fixtures/wpt/FileAPI/reading-data-section/filereader_readAsArrayBuffer.any.js b/test/fixtures/wpt/FileAPI/reading-data-section/filereader_readAsArrayBuffer.any.js index d06e3170782b7c..88c4f4d26cc0b2 100644 --- a/test/fixtures/wpt/FileAPI/reading-data-section/filereader_readAsArrayBuffer.any.js +++ b/test/fixtures/wpt/FileAPI/reading-data-section/filereader_readAsArrayBuffer.any.js @@ -7,6 +7,7 @@ reader.onload = this.step_func(function(evt) { assert_equals(reader.result.byteLength, 4, "The byteLength is 4"); assert_true(reader.result instanceof ArrayBuffer, "The result is instanceof ArrayBuffer"); + assert_array_equals(new Uint8Array(reader.result), [84, 69, 83, 84]); assert_equals(reader.readyState, reader.DONE); this.done(); }); diff --git a/test/fixtures/wpt/FileAPI/support/Blob.js b/test/fixtures/wpt/FileAPI/support/Blob.js index 2c249746858918..e8a52425a17085 100644 --- a/test/fixtures/wpt/FileAPI/support/Blob.js +++ b/test/fixtures/wpt/FileAPI/support/Blob.js @@ -5,23 +5,16 @@ self.test_blob = (fn, expectations) => { type = expectations.type, desc = expectations.desc; - var t = async_test(desc); - t.step(function() { + promise_test(async (t) => { var blob = fn(); assert_true(blob instanceof Blob); assert_false(blob instanceof File); assert_equals(blob.type, type); assert_equals(blob.size, expected.length); - var fr = new FileReader(); - fr.onload = t.step_func_done(function(event) { - assert_equals(this.result, expected); - }, fr); - fr.onerror = t.step_func(function(e) { - assert_unreached("got error event on FileReader"); - }); - fr.readAsText(blob, "UTF-8"); - }); + const text = await blob.text(); + assert_equals(text, expected); + }, desc); } self.test_blob_binary = (fn, expectations) => { @@ -29,25 +22,18 @@ self.test_blob_binary = (fn, expectations) => { type = expectations.type, desc = expectations.desc; - var t = async_test(desc); - t.step(function() { + promise_test(async (t) => { var blob = fn(); assert_true(blob instanceof Blob); assert_false(blob instanceof File); assert_equals(blob.type, type); assert_equals(blob.size, expected.length); - var fr = new FileReader(); - fr.onload = t.step_func_done(function(event) { - assert_true(this.result instanceof ArrayBuffer, - "Result should be an ArrayBuffer"); - assert_array_equals(new Uint8Array(this.result), expected); - }, fr); - fr.onerror = t.step_func(function(e) { - assert_unreached("got error event on FileReader"); - }); - fr.readAsArrayBuffer(blob); - }); + const ab = await blob.arrayBuffer(); + assert_true(ab instanceof ArrayBuffer, + "Result should be an ArrayBuffer"); + assert_array_equals(new Uint8Array(ab), expected); + }, desc); } // Assert that two TypedArray objects have the same byte values diff --git a/test/fixtures/wpt/FileAPI/support/send-file-form-helper.js b/test/fixtures/wpt/FileAPI/support/send-file-form-helper.js index 39c73c41b42207..d6adf21ec33795 100644 --- a/test/fixtures/wpt/FileAPI/support/send-file-form-helper.js +++ b/test/fixtures/wpt/FileAPI/support/send-file-form-helper.js @@ -180,7 +180,7 @@ const formPostFileUploadTest = ({ // Used to verify that the browser agrees with the test about // field value replacement and encoding independently of file system - // idiosyncrasies. + // idiosyncracies. form.append(Object.assign(document.createElement('input'), { type: 'hidden', name: 'filename', diff --git a/test/fixtures/wpt/FileAPI/support/send-file-formdata-helper.js b/test/fixtures/wpt/FileAPI/support/send-file-formdata-helper.js index dd62a0e98e92c8..53c8cca7e09b8e 100644 --- a/test/fixtures/wpt/FileAPI/support/send-file-formdata-helper.js +++ b/test/fixtures/wpt/FileAPI/support/send-file-formdata-helper.js @@ -34,7 +34,7 @@ const formDataPostFileUploadTest = ({ // Used to verify that the browser agrees with the test about // field value replacement and encoding independently of file system - // idiosyncrasies. + // idiosyncracies. formData.append("filename", fileBaseName); // Same, but with name and value reversed to ensure field names diff --git a/test/fixtures/wpt/FileAPI/url/url-in-tags.window.js b/test/fixtures/wpt/FileAPI/url/url-in-tags.window.js index f20b3599013bf5..8a722dcac82b66 100644 --- a/test/fixtures/wpt/FileAPI/url/url-in-tags.window.js +++ b/test/fixtures/wpt/FileAPI/url/url-in-tags.window.js @@ -1,3 +1,10 @@ +setup(() => { + const viewport_meta = document.createElement('meta'); + viewport_meta.name = "viewport"; + viewport_meta.content = "width=device-width,initial-scale=1"; + document.head.appendChild(viewport_meta); +}); + async_test(t => { const run_result = 'test_script_OK'; const blob_contents = 'window.test_result = "' + run_result + '";'; diff --git a/test/fixtures/wpt/README.md b/test/fixtures/wpt/README.md index dc3fa71a19782a..2ce7de20ded95f 100644 --- a/test/fixtures/wpt/README.md +++ b/test/fixtures/wpt/README.md @@ -17,7 +17,7 @@ Last update: - dom/events: https://github.com/web-platform-tests/wpt/tree/0a811c5161/dom/events - encoding: https://github.com/web-platform-tests/wpt/tree/1ac8deee08/encoding - fetch/data-urls/resources: https://github.com/web-platform-tests/wpt/tree/7c79d998ff/fetch/data-urls/resources -- FileAPI: https://github.com/web-platform-tests/wpt/tree/cceaf3628d/FileAPI +- FileAPI: https://github.com/web-platform-tests/wpt/tree/7f51301888/FileAPI - hr-time: https://github.com/web-platform-tests/wpt/tree/34cafd797e/hr-time - html/webappapis/atob: https://github.com/web-platform-tests/wpt/tree/f267e1dca6/html/webappapis/atob - html/webappapis/microtask-queuing: https://github.com/web-platform-tests/wpt/tree/2c5c3c4c27/html/webappapis/microtask-queuing diff --git a/test/fixtures/wpt/versions.json b/test/fixtures/wpt/versions.json index 8eb9b356f618a8..50173e71b1b9d7 100644 --- a/test/fixtures/wpt/versions.json +++ b/test/fixtures/wpt/versions.json @@ -28,7 +28,7 @@ "path": "fetch/data-urls/resources" }, "FileAPI": { - "commit": "cceaf3628da950621004d9b5d8c1d1f367073347", + "commit": "7f5130188818b6c12c636491186b459ec2bf131f", "path": "FileAPI" }, "hr-time": { diff --git a/test/parallel/test-buffer-concat.js b/test/parallel/test-buffer-concat.js index 9f0eadd2f10163..9323126897d691 100644 --- a/test/parallel/test-buffer-concat.js +++ b/test/parallel/test-buffer-concat.js @@ -22,6 +22,7 @@ 'use strict'; const common = require('../common'); const assert = require('assert'); +const { kMaxLength } = require('buffer'); const zero = []; const one = [ Buffer.from('asdf') ]; @@ -84,8 +85,8 @@ assert.throws(() => { Buffer.concat([Buffer.from('hello')], -2); }, { code: 'ERR_OUT_OF_RANGE', - message: 'The value of "length" is out of range. It must be >= 0 && <= 9007199254740991. ' + - 'Received -2' + message: 'The value of "length" is out of range. It must be >= 0 && <= ' + + `${kMaxLength}. Received -2` }); // eslint-disable-next-line node-core/crypto-check diff --git a/test/parallel/test-http-expect-continue-reuse-race.js b/test/parallel/test-http-expect-continue-reuse-race.js new file mode 100644 index 00000000000000..d56319d0e6bab1 --- /dev/null +++ b/test/parallel/test-http-expect-continue-reuse-race.js @@ -0,0 +1,117 @@ +'use strict'; + +// Regression test for a keep-alive socket reuse race condition. +// +// The race is between responseOnEnd() and requestOnFinish(), both of which +// can call responseKeepAlive(). The window is: req.end() has been called, +// the socket write has completed (writableFinished true), but the write +// callback that emits the 'finish' event has not fired yet. +// +// With plain HTTP the window is normally too narrow to hit. This test +// widens it by delaying every client-socket write *callback* by a few +// milliseconds (the actual I/O is not delayed, so writableFinished becomes +// true while the 'finish'-emitting callback is still pending). +// +// With Expect: 100-continue, the server responds quickly while the client +// delays req.end() just slightly (setTimeout 0), creating the perfect +// timing for the response to arrive in that window. +// +// On unpatched Node, the double responseKeepAlive() call corrupts the +// socket by stripping a subsequent request's listeners and emitting a +// spurious 'free' event, causing requests to hang / time out. + +const common = require('../common'); +const assert = require('assert'); +const http = require('http'); + +const REQUEST_COUNT = 100; +const agent = new http.Agent({ keepAlive: true, maxSockets: 1 }); + +// Delay every write *callback* on the client socket so that +// socket.writableLength drops to 0 (writableFinished becomes true) before +// the callback that ultimately emits the 'finish' event fires. With +// HTTPS the TLS layer provides this gap naturally; for plain HTTP we +// need to create it artificially. +const patchedSockets = new WeakSet(); +function patchSocket(socket) { + if (patchedSockets.has(socket)) return; + patchedSockets.add(socket); + const delay = 5; + const origWrite = socket.write; + socket.write = function(chunk, encoding, cb) { + if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + if (typeof cb === 'function') { + const orig = cb; + cb = (...args) => setTimeout(() => orig(...args), delay); + } + return origWrite.call(this, chunk, encoding, cb); + }; +} + +const server = http.createServer(common.mustCall((req, res) => { + req.on('error', common.mustNotCall()); + res.writeHead(200); + res.end(); +}, REQUEST_COUNT)); + +server.listen(0, common.mustCall(() => { + const { port } = server.address(); + + async function run() { + try { + for (let i = 0; i < REQUEST_COUNT; i++) { + await sendRequest(port); + } + } finally { + agent.destroy(); + server.close(); + } + } + + run().then(common.mustCall()); +})); + +function sendRequest(port) { + let timeout; + const promise = new Promise((resolve, reject) => { + function done(err) { + clearTimeout(timeout); + if (err) + reject(err); + else + resolve(); + } + + const req = http.request({ + port, + host: '127.0.0.1', + method: 'POST', + agent, + headers: { + 'Content-Length': '0', + 'Expect': '100-continue', + }, + }, common.mustCall((res) => { + assert.strictEqual(res.statusCode, 200); + res.resume(); + res.once('end', done); + res.once('error', done); + })); + + req.on('socket', patchSocket); + + timeout = setTimeout(() => { + const err = new Error('request timed out'); + req.destroy(err); + done(err); + }, common.platformTimeout(5000)); + + req.once('error', done); + + setTimeout(() => req.end(Buffer.alloc(0)), 0); + }); + return promise.finally(() => clearTimeout(timeout)); +} diff --git a/test/parallel/test-https-expect-continue-reuse-race.js b/test/parallel/test-https-expect-continue-reuse-race.js new file mode 100644 index 00000000000000..cc754477b788c3 --- /dev/null +++ b/test/parallel/test-https-expect-continue-reuse-race.js @@ -0,0 +1,97 @@ +'use strict'; + +// Regression test for a keep-alive socket reuse race condition. +// +// The race is between responseOnEnd() and requestOnFinish(), both of which +// can call responseKeepAlive(). The window is: req.end() has been called, +// the socket write has completed (writableFinished true), but the write +// callback that emits the 'finish' event has not fired yet. +// +// HTTPS widens this window because the TLS layer introduces async +// indirection between the actual write completion and the JS callback. +// +// With Expect: 100-continue, the server responds quickly while the client +// delays req.end() just slightly (setTimeout 0), creating the perfect +// timing for the response to arrive in that window. +// +// On unpatched Node, the double responseKeepAlive() call corrupts the +// socket by stripping a subsequent request's listeners and emitting a +// spurious 'free' event, causing requests to hang / time out. + +const common = require('../common'); + +if (!common.hasCrypto) + common.skip('missing crypto'); + +const assert = require('assert'); +const https = require('https'); +const fixtures = require('../common/fixtures'); + +const REQUEST_COUNT = 100; +const agent = new https.Agent({ keepAlive: true, maxSockets: 1 }); + +const key = fixtures.readKey('agent1-key.pem'); +const cert = fixtures.readKey('agent1-cert.pem'); +const server = https.createServer({ key, cert }, common.mustCall((req, res) => { + req.on('error', common.mustNotCall()); + res.writeHead(200); + res.end(); +}, REQUEST_COUNT)); + +server.listen(0, common.mustCall(() => { + const { port } = server.address(); + + async function run() { + try { + for (let i = 0; i < REQUEST_COUNT; i++) { + await sendRequest(port); + } + } finally { + agent.destroy(); + server.close(); + } + } + + run().then(common.mustCall()); +})); + +function sendRequest(port) { + let timeout; + const promise = new Promise((resolve, reject) => { + function done(err) { + clearTimeout(timeout); + if (err) + reject(err); + else + resolve(); + } + + const req = https.request({ + port, + host: '127.0.0.1', + rejectUnauthorized: false, + method: 'POST', + agent, + headers: { + 'Content-Length': '0', + 'Expect': '100-continue', + }, + }, common.mustCall((res) => { + assert.strictEqual(res.statusCode, 200); + res.resume(); + res.once('end', done); + res.once('error', done); + })); + + timeout = setTimeout(() => { + const err = new Error('request timed out'); + req.destroy(err); + done(err); + }, common.platformTimeout(5000)); + + req.once('error', done); + + setTimeout(() => req.end(Buffer.alloc(0)), 0); + }); + return promise.finally(() => clearTimeout(timeout)); +} diff --git a/test/parallel/test-worker-cwd-race-condition.js b/test/parallel/test-worker-cwd-race-condition.js new file mode 100644 index 00000000000000..19394c85f0d1c4 --- /dev/null +++ b/test/parallel/test-worker-cwd-race-condition.js @@ -0,0 +1,70 @@ +// Flags: --expose-internals --no-warnings +'use strict'; + +const common = require('../common'); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('process.chdir is not available in Workers'); +} + +const { internalBinding } = require('internal/test/binding'); + +const assert = require('assert'); +const { Worker } = require('worker_threads'); + +const processBinding = internalBinding('process_methods'); +const originalChdir = processBinding.chdir; + +const cwdOriginal = process.cwd(); +const i32 = new Int32Array(new SharedArrayBuffer(12)); + +processBinding.chdir = common.mustCall(function chdir(path) { + // Signal to the worker that we're inside the chdir call + Atomics.store(i32, 0, 1); + Atomics.notify(i32, 0); + + // Pause the chdir call while the worker calls process.cwd(), + // to simulate a race condition + Atomics.wait(i32, 1, 0); + + return originalChdir(path); +}); + +const worker = new Worker(` + const { + parentPort, + workerData: { i32 }, + } = require('worker_threads'); + + // Wait until the main thread has entered the chdir call + Atomics.wait(i32, 0, 0); + + const cwdDuringChdir = process.cwd(); + + // Signal the main thread to continue the chdir call + Atomics.store(i32, 1, 1); + Atomics.notify(i32, 1); + + // Wait until the main thread has left the chdir call + Atomics.wait(i32, 2, 0); + + const cwdAfterChdir = process.cwd(); + parentPort.postMessage({ cwdDuringChdir, cwdAfterChdir }); +`, { + eval: true, + workerData: { i32 }, +}); + +worker.on('exit', common.mustCall()); +worker.on('error', common.mustNotCall()); +worker.on('message', common.mustCall(({ cwdDuringChdir, cwdAfterChdir }) => { + assert.strictEqual(cwdDuringChdir, cwdOriginal); + assert.strictEqual(cwdAfterChdir, process.cwd()); +})); + +process.chdir('..'); + +// Signal to the worker that the chdir call is completed +Atomics.store(i32, 2, 1); +Atomics.notify(i32, 2); diff --git a/test/parallel/test-zlib-brotli-dictionary.js b/test/parallel/test-zlib-brotli-dictionary.js new file mode 100644 index 00000000000000..1703a5e047041a --- /dev/null +++ b/test/parallel/test-zlib-brotli-dictionary.js @@ -0,0 +1,126 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const zlib = require('zlib'); + +const dictionary = Buffer.from( + `Lorem ipsum dolor sit amet, consectetur adipiscing elit. + Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.` +); + +const input = Buffer.from( + `Lorem ipsum dolor sit amet, consectetur adipiscing elit. + Lorem ipsum dolor sit amet, consectetur adipiscing elit. + Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.` +); + +// Test with convenience methods (async). +zlib.brotliCompress(input, { dictionary }, common.mustSucceed((compressed) => { + assert(compressed.length < input.length, + 'compressed data should be smaller with dictionary'); + zlib.brotliDecompress(compressed, { dictionary }, common.mustSucceed((decompressed) => { + assert.strictEqual(decompressed.toString(), input.toString()); + })); +})); + +// Test with streaming API. +{ + const encoder = zlib.createBrotliCompress({ dictionary }); + const decoder = zlib.createBrotliDecompress({ dictionary }); + + const chunks = []; + decoder.on('data', (chunk) => chunks.push(chunk)); + decoder.on('end', common.mustCall(() => { + const result = Buffer.concat(chunks); + assert.strictEqual(result.toString(), input.toString()); + })); + + encoder.pipe(decoder); + encoder.end(input); +} + +// Test that dictionary improves compression ratio. +{ + const withDict = zlib.brotliCompressSync(input, { dictionary }); + const withoutDict = zlib.brotliCompressSync(input); + + // Dictionary-based compression should be at least as good as without. + assert(withDict.length <= withoutDict.length, + `Dictionary compression (${withDict.length}) should not be ` + + `larger than non-dictionary compression (${withoutDict.length})`); + + // Verify decompression with dictionary works. + const decompressed = zlib.brotliDecompressSync(withDict, { dictionary }); + assert.strictEqual(decompressed.toString(), input.toString()); +} + +// Test that decompression without matching dictionary fails. +{ + const compressed = zlib.brotliCompressSync(input, { dictionary }); + assert.throws(() => { + zlib.brotliDecompressSync(compressed); + }, (err) => { + assert.match(err.code, /ERR_/); + return true; + }); +} + +// Test that decompression with wrong dictionary fails. +{ + const compressed = zlib.brotliCompressSync(input, { dictionary }); + const wrongDictionary = Buffer.from('this is the wrong dictionary'); + assert.throws(() => { + zlib.brotliDecompressSync(compressed, { dictionary: wrongDictionary }); + }, (err) => { + assert.match(err.code, /ERR_/); + return true; + }); +} + +// Test that dictionary works with ArrayBuffer (converted to Buffer). +{ + const arrayBufferDict = dictionary.buffer.slice( + dictionary.byteOffset, + dictionary.byteOffset + dictionary.byteLength, + ); + const compressed = zlib.brotliCompressSync(input, { dictionary: arrayBufferDict }); + const decompressed = zlib.brotliDecompressSync(compressed, { dictionary: arrayBufferDict }); + assert.strictEqual(decompressed.toString(), input.toString()); +} + +// Test that dictionary works with TypedArray (Uint8Array). +{ + const uint8Dict = new Uint8Array(dictionary); + const compressed = zlib.brotliCompressSync(input, { dictionary: uint8Dict }); + const decompressed = zlib.brotliDecompressSync(compressed, { dictionary: uint8Dict }); + assert.strictEqual(decompressed.toString(), input.toString()); +} + +// Test that invalid dictionary type throws ERR_INVALID_ARG_TYPE. +for (const invalidDict of ['string', 123, true, { object: true }, [1, 2, 3]]) { + assert.throws(() => { + zlib.createBrotliCompress({ dictionary: invalidDict }); + }, { code: 'ERR_INVALID_ARG_TYPE' }); + + assert.throws(() => { + zlib.createBrotliDecompress({ dictionary: invalidDict }); + }, { code: 'ERR_INVALID_ARG_TYPE' }); +} + +// Test with streaming API and wrong dictionary emits error event. +{ + const compressed = zlib.brotliCompressSync(input, { dictionary }); + const wrongDict = Buffer.from('wrong dictionary data'); + const decoder = zlib.createBrotliDecompress({ dictionary: wrongDict }); + + decoder.on('error', common.mustCall((err) => { + assert.match(err.code, /ERR_/); + })); + + decoder.write(compressed); + decoder.end(); +} diff --git a/test/wpt/status/FileAPI/blob.json b/test/wpt/status/FileAPI/blob.json index 8ea03bbc019992..3bbd15087a02ac 100644 --- a/test/wpt/status/FileAPI/blob.json +++ b/test/wpt/status/FileAPI/blob.json @@ -4,38 +4,20 @@ }, "Blob-constructor.any.js": { "fail": { - "note": "Depends on File API", "expected": [ - "A plain object with @@iterator should be treated as a sequence for the blobParts argument.", - "A plain object with @@iterator and a length property should be treated as a sequence for the blobParts argument.", - "A String object should be treated as a sequence for the blobParts argument.", - "A Uint8Array object should be treated as a sequence for the blobParts argument.", + "blobParts not an object: boolean with Boolean.prototype[Symbol.iterator]", + "blobParts not an object: number with Number.prototype[Symbol.iterator]", + "blobParts not an object: BigInt with BigInt.prototype[Symbol.iterator]", + "blobParts not an object: Symbol with Symbol.prototype[Symbol.iterator]", "Getters and value conversions should happen in order until an exception is thrown.", - "Changes to the blobParts array should be reflected in the returned Blob (pop).", - "Changes to the blobParts array should be reflected in the returned Blob (unshift).", - "ToString should be called on elements of the blobParts array.", - "ArrayBuffer elements of the blobParts array should be supported.", + "options properties should be accessed in lexicographic order.", + "Arguments should be evaluated from left to right." + ], + "flaky": [ "Passing typed arrays as elements of the blobParts array should work.", + "Passing a Float16Array as element of the blobParts array should work.", "Passing a Float64Array as element of the blobParts array should work.", - "Passing BigInt typed arrays as elements of the blobParts array should work.", - "Array with two blobs", - "Array with two buffers", - "Array with two bufferviews", - "Array with mixed types", - "options properties should be accessed in lexicographic order.", - "Arguments should be evaluated from left to right.", - "Passing null (index 0) for options should use the defaults.", - "Passing null (index 0) for options should use the defaults (with newlines).", - "Passing undefined (index 1) for options should use the defaults.", - "Passing undefined (index 1) for options should use the defaults (with newlines).", - "Passing object \"[object Object]\" (index 2) for options should use the defaults.", - "Passing object \"[object Object]\" (index 2) for options should use the defaults (with newlines).", - "Passing object \"[object Object]\" (index 3) for options should use the defaults.", - "Passing object \"[object Object]\" (index 3) for options should use the defaults (with newlines).", - "Passing object \"/regex/\" (index 4) for options should use the defaults.", - "Passing object \"/regex/\" (index 4) for options should use the defaults (with newlines).", - "Passing function \"function() {}\" (index 5) for options should use the defaults.", - "Passing function \"function() {}\" (index 5) for options should use the defaults (with newlines)." + "Passing BigInt typed arrays as elements of the blobParts array should work." ] } }, @@ -43,6 +25,15 @@ "skip": "Depends on Web Workers API" }, "Blob-slice.any.js": { - "skip": "Depends on File API" + "fail": { + "expected": [ + "Slicing test: slice (1,1).", + "Slicing test: slice (1,3).", + "Slicing test: slice (1,5).", + "Slicing test: slice (1,7).", + "Slicing test: slice (1,8).", + "Slicing test: slice (1,9)." + ] + } } }