diff --git a/common.gypi b/common.gypi index 20acf954bc02d4..3771cb874e0687 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.10', + 'v8_embedder_string': '-node.6', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/.clang-format b/deps/v8/.clang-format index 5318292248e28e..d655b2039f8848 100644 --- a/deps/v8/.clang-format +++ b/deps/v8/.clang-format @@ -3,6 +3,7 @@ BasedOnStyle: Google DerivePointerAlignment: false MaxEmptyLinesToKeep: 1 +SpaceBeforeCaseColon: false IfMacros: - IF - IF_NOT @@ -10,6 +11,7 @@ Macros: # Make clang-format think TurboShaft `ELSE` expands to just `else`, so that # it formats well alongside `if` - ELSE=else + - CASE_=case - FOR_WITH_HANDLE_SCOPE(isolate, init, loop_var, limit_check, increment)=for(init; loop_var; increment) - WHILE_WITH_HANDLE_SCOPE(isolate, limit_check)=while(limit_check) StatementMacros: diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 31449e0177a224..388d39863d83cc 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -26,6 +26,7 @@ .cache .ccls-cache .clangd +.clang-format-cache .cpplint-cache .cproject .DS_Store @@ -153,6 +154,7 @@ bazel-bin bazel-out bazel-testlogs bazel-v8 +launch.json !/third_party/jinja2 !/third_party/markupsafe !/third_party/zlib diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index a9ea710211ad0f..867cc3398e49f6 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -188,7 +188,9 @@ Kang-Hao (Kenny) Lu Karl Skomski Kasper Lund Keith Smiley +Kenta Moriuchi Kevin Gibbons +Kevin Wang Keyhan Vakil Kris Selden Krishna Ravishankar @@ -260,6 +262,7 @@ Refael Ackermann Rene Rebe Reza Yazdani Rick Waldron +Riya Amemiya Rob Wu Robert Meijer Robert Mustacchi @@ -346,3 +349,8 @@ Ryuhei Shima Domagoj Stolfa Zhijin Zeng 王家睿 +Julian Gremminger +Liam Wachter +Flavio Toffalini +Mathias Payer +Christian Wressnegger diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index afe73fe83c3d73..d49da552c42612 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -2923,6 +2923,7 @@ filegroup( "src/maglev/maglev-pipeline-statistics.h", "src/maglev/maglev-post-hoc-optimizations-processors.h", "src/maglev/maglev-pre-regalloc-codegen-processors.h", + "src/maglev/maglev-range-analysis.h", "src/maglev/maglev-reducer-inl.h", "src/maglev/maglev-reducer.h", "src/maglev/maglev-regalloc-data.h", @@ -3450,6 +3451,7 @@ filegroup( "src/compiler/turboshaft/build-graph-phase.h", "src/compiler/turboshaft/builtin-call-descriptors.h", "src/compiler/turboshaft/builtin-compiler.h", + "src/compiler/turboshaft/call-descriptors-util.h", "src/compiler/turboshaft/csa-branch-elimination-phase.cc", "src/compiler/turboshaft/csa-branch-elimination-phase.h", "src/compiler/turboshaft/csa-early-machine-optimization-phase.cc", @@ -3513,6 +3515,8 @@ filegroup( "src/compiler/turboshaft/machine-lowering-phase.h", "src/compiler/turboshaft/machine-lowering-reducer-inl.h", "src/compiler/turboshaft/turbolev-early-lowering-reducer-inl.h", + "src/compiler/turboshaft/turbolev-frontend-pipeline.cc", + "src/compiler/turboshaft/turbolev-frontend-pipeline.h", "src/compiler/turboshaft/turbolev-graph-builder.cc", "src/compiler/turboshaft/turbolev-graph-builder.h", "src/compiler/turboshaft/machine-optimization-reducer.h", @@ -3565,6 +3569,7 @@ filegroup( "src/compiler/turboshaft/typer.h", "src/compiler/turboshaft/types.cc", "src/compiler/turboshaft/types.h", + "src/compiler/turboshaft/typeswitch.h", "src/compiler/turboshaft/undef-assembler-macros.inc", "src/compiler/turboshaft/uniform-reducer-adapter.h", "src/compiler/turboshaft/use-map.cc", @@ -3673,8 +3678,6 @@ filegroup( "src/compiler/turboshaft/wasm-shuffle-reducer.h", "src/compiler/turboshaft/wasm-simd-phase.cc", "src/compiler/turboshaft/wasm-simd-phase.h", - "src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.cc", - "src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.h", "src/compiler/wasm-address-reassociation.cc", "src/compiler/wasm-address-reassociation.h", "src/compiler/wasm-call-descriptors.cc", @@ -3734,6 +3737,7 @@ filegroup( "src/maglev/maglev-ir.h", "src/maglev/maglev-ir-inl.h", "src/maglev/maglev-kna-processor.h", + "src/maglev/maglev-range-analysis.h", "src/maglev/maglev-reducer-inl.h", "src/maglev/maglev-reducer.h", "src/maglev/maglev-register-frame-array.h", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 52a9c860841b53..3a51ee81fadd13 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -77,9 +77,6 @@ declare_args() { # Sets the number of internal fields on array buffer view objects. v8_array_buffer_view_internal_field_count = 0 - # Sets -DENABLE_GDB_JIT_INTERFACE. - v8_enable_gdbjit = "" - # Sets -DENABLE_VTUNE_JIT_INTERFACE. v8_enable_vtunejit = false @@ -280,9 +277,11 @@ declare_args() { # Controls the threshold for on-heap/off-heap Typed Arrays. v8_typed_array_max_size_in_heap = 64 - v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64") && - (is_linux || is_chromeos || is_mac)) || - (v8_current_cpu == "ppc64" && (is_linux || is_chromeos)) + # Sets -DENABLE_GDB_JIT_INTERFACE. + v8_enable_gdbjit = + is_debug && (((v8_current_cpu == "x86" || v8_current_cpu == "x64") && + (is_linux || is_chromeos || is_mac)) || + (v8_current_cpu == "ppc64" && (is_linux || is_chromeos))) # Check that each header can be included in isolation (requires also # setting the "check_v8_header_includes" gclient variable to run a @@ -494,14 +493,15 @@ declare_args() { # Derived defaults. if (cppgc_enable_verify_heap == "") { - cppgc_enable_verify_heap = v8_enable_debugging_features || v8_dcheck_always_on + cppgc_enable_verify_heap = + v8_enable_verification_features || v8_dcheck_always_on } if (v8_enable_verify_heap == "") { - v8_enable_verify_heap = v8_enable_debugging_features + v8_enable_verify_heap = v8_enable_verification_features } if (v8_enable_verify_write_barriers == "") { v8_enable_verify_write_barriers = - (v8_enable_debugging_features || v8_dcheck_always_on) && + (v8_enable_verification_features || v8_dcheck_always_on) && !v8_disable_write_barriers } if (v8_enable_object_print == "") { @@ -517,18 +517,19 @@ if (v8_enable_trace_maps == "") { v8_enable_trace_maps = v8_enable_debugging_features } if (v8_enable_test_features == "") { - v8_enable_test_features = v8_enable_debugging_features || v8_dcheck_always_on + v8_enable_test_features = + v8_enable_verification_features || v8_dcheck_always_on } if (v8_enable_v8_checks == "") { - v8_enable_v8_checks = v8_enable_debugging_features + v8_enable_v8_checks = v8_enable_verification_features } if (v8_enable_memory_accounting_checks == "") { v8_enable_memory_accounting_checks = - v8_enable_debugging_features || v8_dcheck_always_on + v8_enable_verification_features || v8_dcheck_always_on } if (v8_enable_heap_snapshot_verify == "") { v8_enable_heap_snapshot_verify = - v8_enable_debugging_features || v8_dcheck_always_on + v8_enable_verification_features || v8_dcheck_always_on } if (v8_enable_snapshot_code_comments) { assert(v8_code_comments == true || v8_code_comments == "", @@ -538,7 +539,7 @@ if (v8_enable_snapshot_code_comments) { v8_code_comments = v8_enable_debugging_features } if (v8_enable_debug_code == "") { - v8_enable_debug_code = v8_enable_debugging_features || v8_dcheck_always_on + v8_enable_debug_code = v8_enable_verification_features || v8_dcheck_always_on } if (v8_enable_snapshot_native_code_counters == "") { v8_enable_snapshot_native_code_counters = v8_enable_debugging_features @@ -651,9 +652,9 @@ assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized, assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized, "Baseline tracing requires unoptimized tracing to be enabled.") assert( - v8_enable_debugging_features == true || v8_dcheck_always_on || + v8_enable_verification_features == true || v8_dcheck_always_on || !v8_enable_slow_dchecks, - "v8_enable_slow_dchecks requires v8_enable_debugging_features or dcheck_always_on.") + "v8_enable_slow_dchecks requires v8_enable_verification_features or dcheck_always_on.") if (v8_enable_short_builtin_calls && (!v8_enable_pointer_compression && v8_current_cpu != "x64")) { @@ -825,7 +826,7 @@ assert( v8_random_seed = "314159265" v8_toolset_for_shell = "host" -is_DEBUG_defined = v8_enable_debugging_features || v8_dcheck_always_on +is_DEBUG_defined = v8_enable_verification_features || v8_dcheck_always_on ############################################################################### # Configurations @@ -1264,7 +1265,7 @@ config("features") { } if (v8_enable_debug_code) { defines += [ "V8_ENABLE_DEBUG_CODE" ] - if (v8_enable_debugging_features) { + if (v8_enable_verification_features) { defines += [ "V8_ENABLE_SLOW_DEBUG_CODE_BY_DEFAULT" ] } } @@ -1646,7 +1647,7 @@ config("toolchain") { # TODO(infra): Support v8_enable_prof on Windows. # TODO(infra): Add support for compiling with simulators. - if (v8_enable_debugging_features || v8_dcheck_always_on) { + if (v8_enable_verification_features || v8_dcheck_always_on) { defines += [ "DEBUG" ] if (v8_enable_slow_dchecks) { defines += [ "ENABLE_SLOW_DCHECKS" ] @@ -2696,7 +2697,7 @@ template("run_mksnapshot") { args += [ "--no-turbo-rewrite-far-jumps" ] } - if (v8_enable_debugging_features && v8_enable_slow_dchecks) { + if (v8_enable_verification_features && v8_enable_slow_dchecks) { # mksnapshot only accepts this flag if ENABLE_SLOW_DCHECKS is defined. args += [ "--no-enable-slow-asserts" ] } @@ -2830,7 +2831,7 @@ if (v8_verify_builtins_compatibility) { action("v8_dump_build_config") { script = "tools/testrunner/utils/dump_build_config.py" outputs = [ "$root_out_dir/v8_build_config.json" ] - is_full_debug = v8_enable_debugging_features && !v8_optimized_debug + is_full_debug = v8_enable_verification_features && !v8_optimized_debug arch = v8_target_cpu if (v8_target_cpu == "x86") { @@ -2915,6 +2916,7 @@ action("v8_dump_build_config") { "v8_cfi=$v8_control_flow_integrity", "v8_current_cpu=\"$v8_current_cpu\"", "v8_target_cpu=\"$v8_target_cpu\"", + "verification_features=$v8_enable_verification_features", "verify_csa=$v8_enable_verify_csa", "verify_heap=$v8_enable_verify_heap", "verify_predictable=$v8_enable_verify_predictable", @@ -3663,6 +3665,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/branch-elimination-reducer.h", "src/compiler/turboshaft/build-graph-phase.h", "src/compiler/turboshaft/builtin-call-descriptors.h", + "src/compiler/turboshaft/call-descriptors-util.h", "src/compiler/turboshaft/code-elimination-and-simplification-phase.h", "src/compiler/turboshaft/copying-phase.h", "src/compiler/turboshaft/csa-branch-elimination-phase.h", @@ -3730,6 +3733,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/string-view.h", "src/compiler/turboshaft/tracing.h", "src/compiler/turboshaft/turbolev-early-lowering-reducer-inl.h", + "src/compiler/turboshaft/turbolev-frontend-pipeline.h", "src/compiler/turboshaft/turbolev-graph-builder.h", "src/compiler/turboshaft/type-assertions-phase.h", "src/compiler/turboshaft/type-inference-analysis.h", @@ -3739,6 +3743,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/typed-optimizations-reducer.h", "src/compiler/turboshaft/typer.h", "src/compiler/turboshaft/types.h", + "src/compiler/turboshaft/typeswitch.h", "src/compiler/turboshaft/undef-assembler-macros.inc", "src/compiler/turboshaft/uniform-reducer-adapter.h", "src/compiler/turboshaft/use-map.h", @@ -4562,7 +4567,6 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/wasm-shuffle-reducer.h", "src/compiler/turboshaft/wasm-simd-phase.h", "src/compiler/turboshaft/wasm-turboshaft-compiler.h", - "src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.h", "src/compiler/wasm-address-reassociation.h", "src/compiler/wasm-call-descriptors.h", "src/compiler/wasm-compiler-definitions.h", @@ -5259,6 +5263,7 @@ v8_compiler_sources = [ "src/compiler/turboshaft/simplify-tf-loops.cc", "src/compiler/turboshaft/store-store-elimination-phase.cc", "src/compiler/turboshaft/string-escape-analysis-reducer.cc", + "src/compiler/turboshaft/turbolev-frontend-pipeline.cc", "src/compiler/turboshaft/turbolev-graph-builder.cc", "src/compiler/turboshaft/type-assertions-phase.cc", "src/compiler/turboshaft/type-parser.cc", @@ -5385,7 +5390,6 @@ if (v8_enable_webassembly) { "src/compiler/turboshaft/wasm-shuffle-reducer.cc", "src/compiler/turboshaft/wasm-simd-phase.cc", "src/compiler/turboshaft/wasm-turboshaft-compiler.cc", - "src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.cc", "src/compiler/wasm-address-reassociation.cc", "src/compiler/wasm-call-descriptors.cc", "src/compiler/wasm-compiler.cc", diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 7439281d9f41a0..dc8d8abe310597 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -59,7 +59,7 @@ vars = { 'checkout_fuchsia_no_hooks': False, # reclient CIPD package version - 'reclient_version': 're_client_version:0.183.0.3b3097cd-gomaip', + 'reclient_version': 're_client_version:0.185.0.db415f21-gomaip', # Fetch configuration files required for the 'use_remoteexec' gn arg 'download_remoteexec_cfg': False, @@ -75,24 +75,24 @@ vars = { 'build_with_chromium': False, # GN CIPD package version. - 'gn_version': 'git_revision:81b24e01531ecf0eff12ec9359a555ec3944ec4e', + 'gn_version': 'git_revision:07d3c6f4dc290fae5ca6152ebcb37d6815c411ab', # ninja CIPD package version # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja 'ninja_version': 'version:3@1.12.1.chromium.4', # siso CIPD package version - 'siso_version': 'git_revision:f7020b54462c37f1b10a16e68563c338c9f14371', + 'siso_version': 'git_revision:0915813c4c786240e12d03aa3018c02bab4df14f', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:29.20250927.0.1', + 'fuchsia_version': 'version:29.20251023.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling partition_alloc_version # and whatever else without interference from each other. - 'partition_alloc_version': 'cca2b369b2f8895cb14e24740e1f9bf91d5b371e', + 'partition_alloc_version': 'db8446987dfff3cfc0c100b7d58e6a404ef639eb', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -130,9 +130,9 @@ vars = { deps = { 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dd54bc718b7c5363155660d12b7965ea9f87ada9', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '81330a6e45719ab8c0d22e5548afbedb5482b068', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'fe8567e143162ec1a2fc8d13f85d67a8d2dde1b7', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '723d636763a2635105e0e912bae25f662b2acaa8', 'buildtools/linux64': { 'packages': [ { @@ -178,7 +178,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a5e69a1534de88d1eb29b76657d84c8541b72df7', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd2940bdbb0e28fd002ec31b89f8182bbf63da092', 'third_party/android_platform': { 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'e3919359f2387399042d31401817db4a02d756ec', 'condition': 'checkout_android', @@ -232,11 +232,11 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e2b34e6c5df90b060797419372b230d5638a3843', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '4daf7b2cead7fb17ec3da4eedb30215e3bd3674a', 'condition': 'checkout_android', }, 'third_party/clang-format/script': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '37f6e68a107df43b7d7e044fd36a13cbae3413f2', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'c2725e0622e1a86d55f14514f2177a39efea4a0e', 'third_party/colorama/src': { 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '3de9f013df4b470069d03d250224062e8cf15c49', 'condition': 'checkout_android', @@ -246,7 +246,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '73e46667ed3a1326cf564747737b4e11137d7f29', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '8a1ec6a0213ae033f6749f261e1c528488349991', 'third_party/dragonbox/src': Var('chromium_url') + '/external/github.com/jk-jeon/dragonbox.git' + '@' + '6c7c925b571d54486b9ffae8d9d18a822801cbda', 'third_party/fp16/src': @@ -272,7 +272,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/google_benchmark_chrome': { - 'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + 'd6e7f141ed7c93a66890f3750ab634b8b52057a5', + 'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + 'fa1929c5500ccfc01852ba50ff9258303e93601e', }, 'third_party/google_benchmark_chrome/src': { 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '761305ec3b33abf30e08d50eb829e19a802581cc', @@ -280,13 +280,13 @@ deps = { 'third_party/fuzztest': Var('chromium_url') + '/chromium/src/third_party/fuzztest.git' + '@' + 'aa6ba9074b8d66a2e2853a0a0992c25966022e13', 'third_party/fuzztest/src': - Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + 'e101ca021a40733d0fa76a3bd9b49b5f76da4f8a', + Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '7940ee9a7ebce6419c6391eef8b289524b16f198', 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '244cec869d12e53378fa0efb610cd4c32a454ec8', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'b2b9072ecbe874f5937054653ef8f2731eb0f010', 'third_party/highway/src': Var('chromium_url') + '/external/github.com/google/highway.git' + '@' + '84379d1c73de9681b54fbe1c035a23c7bd5d272d', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1b2e3e8a421efae36141a7b932b41e315b089af8', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'f27805b7d7d8618fa73ce89e9d28e0a8b2216fec', 'third_party/instrumented_libs': { 'url': Var('chromium_url') + '/chromium/third_party/instrumented_libraries.git' + '@' + '69015643b3f68dbd438c010439c59adc52cac808', 'condition': 'checkout_instrumented_libraries', @@ -302,169 +302,173 @@ deps = { 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', 'third_party/libc++/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '4b4a57f5cf627639c041368120af9d69ed40032c', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'cdb24138c1591d12b07d5147825ec7dfeb495276', 'third_party/libc++abi/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '864f61dc9253d56586ada34c388278565ef513f6', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'a02fa0058d8d52aca049868d229808a3e5dadbad', + 'third_party/libpfm4': + Var('chromium_url') + '/chromium/src/third_party/libpfm4.git' + '@' + '25c29f04c9127e1ca09e6c1181f74850aa7f118b', + 'third_party/libpfm4/src': + Var('chromium_url') + '/external/git.code.sf.net/p/perfmon2/libpfm4.git' + '@' + '964baf9d35d5f88d8422f96d8a82c672042e7064', 'third_party/libunwind/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '322be580a5a193a921c349a15747eeeb9a716ad1', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '61ba011ba3c4ed238af93ebad476d3ab5a2fb5ab', 'third_party/llvm-libc/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libc.git' + '@' + '7b45735a2b2c0c11c7c4e02fc6bae12ea95aec48', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libc.git' + '@' + 'db35841a6fcbeee98e4d7fe6ba3df1a876a18a62', 'third_party/llvm-build/Release+Asserts': { 'dep_type': 'gcs', 'bucket': 'chromium-browser-clang', 'objects': [ { - 'object_name': 'Linux_x64/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '35a8629534f534aa6771470945fc0baa6906b3fffb28433bc08674d343b84c90', - 'size_bytes': 55674480, - 'generation': 1758743123214066, + 'object_name': 'Linux_x64/clang-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'f6a487ffd0e56ba7a39b063d85d1f8ff7846514f50635785730cffb7368872ce', + 'size_bytes': 55669844, + 'generation': 1759771493989631, 'condition': 'host_os == "linux"', }, { - 'object_name': 'Linux_x64/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '0342c1f9f546b2c87010418c37eaf494b3bcee24e60a351a880046951bf4d47b', - 'size_bytes': 14059964, - 'generation': 1758743123322050, + 'object_name': 'Linux_x64/clang-tidy-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '4fc7aacf4c25e50a25a941f1186a9e042ae26a2c5c698f359907798fa68106c8', + 'size_bytes': 14053336, + 'generation': 1759771494041411, 'condition': 'host_os == "linux" and checkout_clang_tidy', }, { - 'object_name': 'Linux_x64/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '8b9513babd89f706e928be51b9a4c08a4511dae1c152285808d7a25b299ae94b', - 'size_bytes': 14210752, - 'generation': 1758743123414815, + 'object_name': 'Linux_x64/clangd-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '238897cb0b55ffcb7f6b8f6a10055e44e05023642441a800895704ced91d37d1', + 'size_bytes': 14197108, + 'generation': 1759771494144266, 'condition': 'host_os == "linux" and checkout_clangd', }, { - 'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '56bb0093e2e8f71e682f03b0e379d7dac0bacfcc83bfccfd42a4fcd1310fbe75', - 'size_bytes': 2272396, - 'generation': 1758743123592944, + 'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '2c5b0bf210ca982d8ec37cacf3d06d9c45bd6e68b33dcaabce0d108d6c266a36', + 'size_bytes': 2272128, + 'generation': 1759771494296549, 'condition': 'host_os == "linux" and checkout_clang_coverage_tools', }, { - 'object_name': 'Linux_x64/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '9236697d64fc9444b22c90a112f6b3a76ee1edf5b3891af67de0849deb274514', - 'size_bytes': 5666148, - 'generation': 1758743123461779, + 'object_name': 'Linux_x64/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'fd644634db56977b072d951f26571ac41c9c298bf5989e99efeb150ee8427364', + 'size_bytes': 5666140, + 'generation': 1759771494159187, 'condition': '(checkout_linux or checkout_mac or checkout_android) and host_os == "linux"', }, { - 'object_name': 'Mac/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '90e1a97b91d9a39bafc719f5e3b4c3cd8bf457c39f1dc4a27e4bfc59b9331bc5', - 'size_bytes': 53576996, - 'generation': 1758743125100350, + 'object_name': 'Mac/clang-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '44811b6ed6868142c088807f6bcc0d08811a7b11d3f2bc2124c45868037e8cc3', + 'size_bytes': 53583464, + 'generation': 1759771495565305, 'condition': 'host_os == "mac" and host_cpu == "x64"', }, { - 'object_name': 'Mac/clang-mac-runtime-library-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '7140b54db5936c79bb6f216ea176be70c7e6711f0dec2224369fba76cb9c1572', - 'size_bytes': 1004900, - 'generation': 1758743135101043, + 'object_name': 'Mac/clang-mac-runtime-library-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '8a2e16410bede5d52c77a012f182dde2350b05e647f7c1acaf7823ce816b4422', + 'size_bytes': 1005144, + 'generation': 1759771503758969, 'condition': 'checkout_mac and not host_os == "mac"', }, { - 'object_name': 'Mac/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '0ef9124d5c56825ebbd10539298400a0b0d1d8d67e0902a7e89b3fecff7f9b0c', - 'size_bytes': 14141008, - 'generation': 1758743125225488, + 'object_name': 'Mac/clang-tidy-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '66633fe8846fddeda86b5ee992b945939bfe46567c9c685900c39531d22ce5cf', + 'size_bytes': 14133312, + 'generation': 1759771495642847, 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_tidy', }, { - 'object_name': 'Mac/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': 'aec08495162681dbfe4e78bd6c728e6f1f410f3fe6c0e070c095dcf4bfda1382', - 'size_bytes': 15632104, - 'generation': 1758743125301839, + 'object_name': 'Mac/clangd-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '44088b951aa7ddc96c0f32703b076311a7e7b803b3adfe0bfe9725f78c4fab29', + 'size_bytes': 15627392, + 'generation': 1759771495653658, 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clangd', }, { - 'object_name': 'Mac/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '36b279a1a6dc9d90e932823138f522e3c2741005e34732bce60fea60881a3963', - 'size_bytes': 2321200, - 'generation': 1758743125546947, + 'object_name': 'Mac/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '79d62c78d256a508a0f3dbe59aa0fdf0391a9d462bf74e56adc1dee82efa83ac', + 'size_bytes': 2321940, + 'generation': 1759771495825689, 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_coverage_tools', }, { - 'object_name': 'Mac/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '29e8b2d333ecb6640cf99d9103b999ff2be0bb13fe8300528b4245bf6b88869c', - 'size_bytes': 5582716, - 'generation': 1758743125362967, + 'object_name': 'Mac/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'a10d075e19e7b614ffd8c5a65f04fbd45011ec74c735dda89f0b3780ab397329', + 'size_bytes': 5567160, + 'generation': 1759771495741126, 'condition': 'host_os == "mac" and host_cpu == "x64"', }, { - 'object_name': 'Mac_arm64/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '29d82cb9830396c21b967a5784f838dcb3d62abfebd08d67d36821dba6eb4ce8', - 'size_bytes': 44576940, - 'generation': 1758743136591599, + 'object_name': 'Mac_arm64/clang-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'c97e4f62cdd77edf725ccbf4cd63b589302605bf643c871f83214f39e629b2ea', + 'size_bytes': 44593804, + 'generation': 1759771504972271, 'condition': 'host_os == "mac" and host_cpu == "arm64"', }, { - 'object_name': 'Mac_arm64/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '8d7781b19bd032eeda7a94810e5429e0501392ac5585fcd16499a3d72e12ab9e', - 'size_bytes': 12142468, - 'generation': 1758743136678250, + 'object_name': 'Mac_arm64/clang-tidy-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '3a0eb0fb3a4633c8b4b143e826c5476c41cdd6bd0db8e93a74bbee6520b02b79', + 'size_bytes': 12136348, + 'generation': 1759771505073378, 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_tidy', }, { - 'object_name': 'Mac_arm64/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '16617a896e7304ba76af9cbcab00edeb63753804237fc5055810b2049d00b3dc', - 'size_bytes': 12474420, - 'generation': 1758743136764487, + 'object_name': 'Mac_arm64/clangd-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '2a5dc1f385bacd25b974b8aa15c57008e33bc384521e2d705a940acbb3292356', + 'size_bytes': 12479180, + 'generation': 1759771505148040, 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clangd', }, { - 'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '3ae73edf0d6b69d6aa41247c8268aaf292630f708036d55f3e0e5fa2ce340497', - 'size_bytes': 1947856, - 'generation': 1758743136945536, + 'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '00bf0f82ca9aff15f32e7f0cf7e7b25d36a5a672a1a9bc345c1b7e140a478f93', + 'size_bytes': 1948520, + 'generation': 1759771505303586, 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_coverage_tools', }, { - 'object_name': 'Mac_arm64/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '069266d0ab2b9029908edc0b958af5d5ec3d9cd939b063da7aeeb53548137df9', - 'size_bytes': 5277360, - 'generation': 1758743136838343, + 'object_name': 'Mac_arm64/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '7aa959752d6beafc74129e4822912021f855584e55a55600044f1d42b889f8b0', + 'size_bytes': 5292960, + 'generation': 1759771505201957, 'condition': 'host_os == "mac" and host_cpu == "arm64"', }, { - 'object_name': 'Win/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': 'bec899a7163ba0d446a5355e554cf8644b5e3db729404c6defb077549bc9f1b4', - 'size_bytes': 47645664, - 'generation': 1758743148772393, + 'object_name': 'Win/clang-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'fc756186dea61e700bd0f885b585050d9356bbd7f942dafae25d38eef4671adf', + 'size_bytes': 47657436, + 'generation': 1759771514781908, 'condition': 'host_os == "win"', }, { - 'object_name': 'Win/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '10770b3b7b34a0e968cbeb1838b1446080897941c2bb5d192aa6596bbb386c27', - 'size_bytes': 14025008, - 'generation': 1758743148836717, + 'object_name': 'Win/clang-tidy-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'f7ecd7e8d555e8622e0096ea1aca3ddb3fb4e89e91228c3c87289a4b8ca7919c', + 'size_bytes': 14016476, + 'generation': 1759771514824669, 'condition': 'host_os == "win" and checkout_clang_tidy', }, { - 'object_name': 'Win/clang-win-runtime-library-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': 'fedf17614b4cba1c8edc7f3ad1c4636bb79535068e76ad6fed75fe65515dc4b8', - 'size_bytes': 2503180, - 'generation': 1758743159444585, + 'object_name': 'Win/clang-win-runtime-library-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '0a426702c9e0f92ea27f9611a1665cc5df9a58820360d3fa6a4026b9a0e5120f', + 'size_bytes': 2501292, + 'generation': 1759771523074183, 'condition': 'checkout_win and not host_os == "win"', }, { - 'object_name': 'Win/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '6dcd1c2f3bd7dbd547f8b93b014a3bc9f9d84b0920fc7632f45a6bfc1b359ae1', - 'size_bytes': 14366920, - 'generation': 1758743148925930, + 'object_name': 'Win/clangd-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'b172d0246511cdeffbc5a4fa44ad402a6b9eacd9d3e2e77d88a9965f80d344d5', + 'size_bytes': 14364312, + 'generation': 1759771514873065, 'condition': 'host_os == "win" and checkout_clangd', }, { - 'object_name': 'Win/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '57e86c6eeeccb1e6e5b87d87c2231f01e006d9067e2f3ad50530e32674599ad6', - 'size_bytes': 2366460, - 'generation': 1758743149180966, + 'object_name': 'Win/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': 'b70528795afd95729971b74939e512c638a8a93fd1ee1c9205a6240f7af28802', + 'size_bytes': 2368144, + 'generation': 1759771515105244, 'condition': 'host_os == "win" and checkout_clang_coverage_tools', }, { - 'object_name': 'Win/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz', - 'sha256sum': '3f398db586e4f75a48eda2a508be4577a9c54cda78cf03afa57b454801ed5bde', - 'size_bytes': 5668924, - 'generation': 1758743148999346, + 'object_name': 'Win/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-84.tar.xz', + 'sha256sum': '94c068f109e220e028a38f5beced7d6acd67725fc0b1da9fa8ed1b959f12d799', + 'size_bytes': 5673824, + 'generation': 1759771514962844, 'condition': '(checkout_linux or checkout_mac or checkout_android) and host_os == "win"', }, ], @@ -490,19 +494,19 @@ deps = { 'third_party/perfetto': Var('android_url') + '/platform/external/perfetto.git' + '@' + '40b529923598b739b2892a536a7692eedbed5685', 'third_party/protobuf': - Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + 'fef7a765bb0d1122d32b99f588537b83e2dffe7b', + Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + 'fcb7931541e4fe633b796db3f3e6f54c2dd297a8', 'third_party/re2/src': - Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + '6569a9a3df256f4c0c3813cb8ee2f8eef6e2c1fb', + Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + '61c4644171ee6b480540bf9e569cba06d9090b4b', 'third_party/requests': { 'url': Var('chromium_url') + '/external/github.com/kennethreitz/requests.git' + '@' + 'c7e0fc087ceeadb8b4c84a0953a422c474093d6d', 'condition': 'checkout_android', }, 'tools/rust': - Var('chromium_url') + '/chromium/src/tools/rust' + '@' + 'f93e7ca2a64938e9b4759ec3297f02ca7b3f605f', + Var('chromium_url') + '/chromium/src/tools/rust' + '@' + '12557fcc00d7e94caa5e270d7343b566e48a68ae', 'tools/win': - Var('chromium_url') + '/chromium/src/tools/win' + '@' + '2cbfc8d2e5ef4a6afd9774e9a9eaebd921a9f248', + Var('chromium_url') + '/chromium/src/tools/win' + '@' + '24494b071e019a2baea4355d9870ffc5fc0bbafe', 'third_party/rust': - Var('chromium_url') + '/chromium/src/third_party/rust' + '@' + '667365a2aef07b2a9065a53beddbc8ea60ff5c6d', + Var('chromium_url') + '/chromium/src/third_party/rust' + '@' + '4d93511ebaceb09ebdd83c8876a4a936b75fa04d', 'third_party/rust-toolchain': { 'dep_type': 'gcs', 'bucket': 'chromium-browser-clang', @@ -550,11 +554,11 @@ deps = { 'third_party/zlib': Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '85f05b0835f934e52772efc308baa80cdd491838', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b1d66053e6460f04dbe81d77cfeaa9a5d50dee3e', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '97f0845783b3d8ebca7541afb46ec53c3f4bd3ac', 'tools/protoc_wrapper': Var('chromium_url') + '/chromium/src/tools/protoc_wrapper.git' + '@' + '3438d4183bfc7c0d6850e8b970204cc8189f0323', 'third_party/abseil-cpp': { - 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + 'c3655ab8bb514aa318207c2685b3ba557a048201', + 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '3fb321d9764442ceaf2e17b6e68ab6b6836bc78a', 'condition': 'not build_with_chromium', }, 'third_party/zoslib': { diff --git a/deps/v8/GEMINI.md b/deps/v8/GEMINI.md index a0231b10b29afb..a05de91ed9d0dc 100644 --- a/deps/v8/GEMINI.md +++ b/deps/v8/GEMINI.md @@ -6,9 +6,9 @@ Documentation can be found at https://v8.dev/docs. ## Key Commands -- **Build (Debug):** `tools/dev/gm.py quiet x64.debug` -- **Build (Optimized Debug):** `tools/dev/gm.py quiet x64.optdebug` -- **Build (Release):** `tools/dev/gm.py quiet x64.release` +- **Build (Debug):** `tools/dev/gm.py quiet x64.debug tests` +- **Build (Optimized Debug):** `tools/dev/gm.py quiet x64.optdebug tests` +- **Build (Release):** `tools/dev/gm.py quiet x64.release tests` - **Run All Tests:** `tools/run-tests.py --progress dots --exit-after-n-failures=5 --outdir=out/x64.optdebug` - **Run C++ Tests:** `tools/run-tests.py --progress dots --exit-after-n-failures=5 --outdir=out/x64.optdebug cctest unittests` - **Run JavaScript Tests:** `tools/run-tests.py --progress dots --exit-after-n-failures=5 --outdir=out/x64.optdebug mjsunit` @@ -139,7 +139,7 @@ If there are any failing tests, they will be reported along their stderr and a c # ...stack trace... Received signal 6 -Command: out/x64.optdebug/d8 --test test/mjsunit/mjsunit.js test/mjsunit/maglev/regress-429656023.js --random-seed=-190258694 --nohard-abort --verify-heap --testing-d8-test-runner --allow-natives-syntax +Command: out/x64.optdebug/d8 --test test/mjsunit/mjsunit.js test/mjsunit/maglev/regress-429656023.js --random-seed=-190258694 --nohard-abort --verify-heap --allow-natives-syntax ``` You can retry the test either by running the test name with `tools/run-tests.py`, e.g. `tools/run-tests.py --progress dots --outdir=out/x64.optdebug mjsunit/maglev/regress-429656023`, or by running the command directly. When running the command directly, you can add additional flags to help debug the issue, and you can try running a different build (e.g. running a debug build if a release build fails). diff --git a/deps/v8/RISCV_OWNERS b/deps/v8/RISCV_OWNERS index 6650355e2edfe9..cf613e2627324f 100644 --- a/deps/v8/RISCV_OWNERS +++ b/deps/v8/RISCV_OWNERS @@ -1,5 +1,3 @@ -brice.dobry@futurewei.com kasperl@rivosinc.com -peng.w@rioslab.org qiuji@iscas.ac.cn yahan@iscas.ac.cn diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 44f056cf897bcd..d198859576a4bd 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -46,7 +46,8 @@ declare_args() { v8_enable_builtins_optimization = "" # Turns on all V8 debug features. Enables running V8 in a pseudo debug mode - # within a release Chrome. + # within a release Chrome. This implies verification features to detect more + # bugs and additionally includes features that aid developers when debugging. v8_enable_debugging_features = is_debug # Enable ECMAScript Internationalization API. Enabling this feature will @@ -160,6 +161,9 @@ declare_args() { # Add fuzzilli fuzzer support. v8_fuzzilli = false + # Force optimizing with -O3 for increased inlining. + v8_force_optimize_speed = false + # Enable FuzzTest v8_enable_fuzztest = !build_with_v8_embedder && !(defined(build_with_node) && build_with_node) && @@ -247,6 +251,14 @@ declare_args() { v8_wasm_random_fuzzers = "" } +declare_args() { + # Enable more (costly) verification features that help to detect bugs. + v8_enable_verification_features = v8_enable_debugging_features +} + +assert(!v8_enable_debugging_features || v8_enable_verification_features, + "Debugging features require verification features.") + if (v8_use_external_startup_data == "") { # If not specified as a gn arg, use external startup data by default if # we're not on ios. @@ -332,7 +344,7 @@ v8_inspector_js_protocol = v8_path_prefix + "/include/js_protocol.pdl" # # Common configs to remove or add in all v8 targets. -v8_remove_configs = [] +v8_remove_configs = [ "//build/config/compiler:default_optimization" ] v8_add_configs = [ v8_path_prefix + ":features", v8_path_prefix + ":toolchain", @@ -340,19 +352,18 @@ v8_add_configs = [ v8_path_prefix + ":sanitizer_defines", ] -if (is_debug && !v8_optimized_debug) { - v8_remove_configs += [ "//build/config/compiler:default_optimization" ] +if (v8_force_optimize_speed) { + # Switch to force -O3 optimization regardless of platform or sanitizer + # usage. + v8_add_configs += [ "//build/config/compiler:optimize_speed" ] +} else if (is_debug && !v8_optimized_debug) { v8_add_configs += [ "//build/config/compiler:no_optimize" ] -} else { - v8_remove_configs += [ "//build/config/compiler:default_optimization" ] - +} else if (((is_posix && !is_android) || is_win) && !using_sanitizer) { # TODO(crbug.com/621335) Rework this so that we don't have the confusion # between "optimize_speed" and "optimize_max". - if (((is_posix && !is_android) || is_win) && !using_sanitizer) { - v8_add_configs += [ "//build/config/compiler:optimize_speed" ] - } else { - v8_add_configs += [ "//build/config/compiler:optimize_max" ] - } + v8_add_configs += [ "//build/config/compiler:optimize_speed" ] +} else { + v8_add_configs += [ "//build/config/compiler:optimize_max" ] } if (!is_debug) { diff --git a/deps/v8/include/cppgc/heap-statistics.h b/deps/v8/include/cppgc/heap-statistics.h index c357f916f16a5b..93f4f9b654cbe7 100644 --- a/deps/v8/include/cppgc/heap-statistics.h +++ b/deps/v8/include/cppgc/heap-statistics.h @@ -55,8 +55,12 @@ struct HeapStatistics final { size_t resident_size_bytes = 0; /** Amount of memory actually used on the page. */ size_t used_size_bytes = 0; - /** Statistics for object allocated on the page. Filled only when - * NameProvider::SupportsCppClassNamesAsObjectNames() is true. */ + /** + * Statistics for object allocated on the page. If an object provides a + * name by inheriting from NameProvider, its name will be recorded in the + * statistics. Other objects, without an explicit name, are merged under a + * single type unless the CPPGC_SUPPORTS_OBJECT_NAME build flag is enabled. + */ std::vector object_statistics; }; diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h index 765d9b137af484..29c29eb937df2f 100644 --- a/deps/v8/include/cppgc/visitor.h +++ b/deps/v8/include/cppgc/visitor.h @@ -167,6 +167,7 @@ class V8_EXPORT Visitor { */ template void Trace(const T& object) { + static_assert(!IsGarbageCollectedOrMixinTypeV); #if V8_ENABLE_CHECKS // This object is embedded in potentially multiple nested objects. The // outermost object must not be in construction as such objects are (a) not diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h index 09fb5843d724e4..850b7ccbd4f210 100644 --- a/deps/v8/include/v8-callbacks.h +++ b/deps/v8/include/v8-callbacks.h @@ -233,6 +233,10 @@ struct OOMDetails { using OOMErrorCallback = void (*)(const char* location, const OOMDetails& details); +using OOMErrorCallbackWithData = void (*)(const char* location, + const OOMDetails& details, + void* data); + using MessageCallback = void (*)(Local message, Local data); // --- Tracing --- @@ -334,6 +338,7 @@ using SharedArrayBufferConstructorEnabledCallback = */ enum class ModuleImportPhase { kSource, + kDefer, kEvaluation, }; diff --git a/deps/v8/include/v8-external.h b/deps/v8/include/v8-external.h index 2e245036f42231..c3feb1daad7e5f 100644 --- a/deps/v8/include/v8-external.h +++ b/deps/v8/include/v8-external.h @@ -12,13 +12,28 @@ namespace v8 { class Isolate; +/** + * A tag for external pointers. Objects with different C++ types should use + * different values of ExternalPointerTypeTag when using v8::External. The + * allowed range is 0..V8_EXTERNAL_POINTER_TAG_COUNT - 1. If this is not + * sufficient, V8_EXTERNAL_POINTER_TAG_COUNT can be increased. + */ +using ExternalPointerTypeTag = uint16_t; + +constexpr ExternalPointerTypeTag kExternalPointerTypeTagDefault = 0; + /** * A JavaScript value that wraps a C++ void*. This type of value is mainly used * to associate C++ data structures with JavaScript objects. */ class V8_EXPORT External : public Value { public: - static Local New(Isolate* isolate, void* value); + V8_DEPRECATE_SOON("Use the version with the type tag.") + static Local New(Isolate* isolate, void* value) { + return New(isolate, value, kExternalPointerTypeTagDefault); + } + static Local New(Isolate* isolate, void* value, + ExternalPointerTypeTag tag); V8_INLINE static External* Cast(Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); @@ -26,7 +41,10 @@ class V8_EXPORT External : public Value { return static_cast(value); } - void* Value() const; + V8_DEPRECATE_SOON("Use the version with the type tag.") + void* Value() const { return Value(kExternalPointerTypeTagDefault); } + + void* Value(ExternalPointerTypeTag tag) const; private: static void CheckCast(v8::Value* obj); diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index 7f70eee2d542a6..58ec34fa050c91 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -209,6 +209,7 @@ #include #include +#include "v8-external.h" // NOLINT(build/include_directory) #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8-typed-array.h" // NOLINT(build/include_directory) @@ -775,6 +776,9 @@ TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), double>(Local src, double* dst, uint32_t max_length); +constexpr v8::ExternalPointerTypeTag kFastAPIPointerTag = + V8_EXTERNAL_POINTER_TAG_COUNT - 1; + } // namespace v8 #endif // INCLUDE_V8_FAST_API_CALLS_H_ diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index 945c6cb61c9f46..8d28f22018609a 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -10,6 +10,8 @@ #include #include +#include +#include #include #include #include @@ -18,22 +20,6 @@ #include "v8config.h" // NOLINT(build/include_directory) -// TODO(pkasting): Use /spaceship unconditionally after dropping -// support for old libstdc++ versions. -#if __has_include() -#include -#endif -#if defined(__cpp_lib_three_way_comparison) && \ - __cpp_lib_three_way_comparison >= 201711L && \ - defined(__cpp_lib_concepts) && __cpp_lib_concepts >= 202002L -#include -#include - -#define V8_HAVE_SPACESHIP_OPERATOR 1 -#else -#define V8_HAVE_SPACESHIP_OPERATOR 0 -#endif - namespace v8 { class Array; @@ -426,6 +412,11 @@ constexpr size_t kMaxCppHeapPointers = 0; // currently used in Chrome. #define V8_EMBEDDER_DATA_TAG_COUNT 15 +// The number of tags reserved for pointers stored in v8::External. The value is +// picked arbitrarily, and is slightly larger than the number of tags currently +// used in Chrome. +#define V8_EXTERNAL_POINTER_TAG_COUNT 40 + // Generic tag range struct to represent ranges of type tags. // // When referencing external objects via pointer tables, type tags are @@ -566,10 +557,15 @@ enum ExternalPointerTag : uint16_t { // Placeholders for embedder data. kFirstEmbedderDataTag, kLastEmbedderDataTag = kFirstEmbedderDataTag + V8_EMBEDDER_DATA_TAG_COUNT - 1, - // This tag essentially stands for a `void*` pointer in the V8 API, and it is - // the Embedder's responsibility to ensure type safety (against substitution) - // and lifetime validity of these objects. - kExternalObjectValueTag, + + // Placeholders for pointers store in v8::External. + kFirstExternalTypeTag, + kLastExternalTypeTag = + kFirstExternalTypeTag + V8_EXTERNAL_POINTER_TAG_COUNT - 1, + // This tag is used when a fast-api callback as a parameter of type + // `kPointer`. The V8 fast API is only able to use this generic tag, and is + // therefore not supposed to be used in Chrome. + kFastApiExternalTypeTag = kLastExternalTypeTag, kFirstMaybeReadOnlyExternalPointerTag, kFunctionTemplateInfoCallbackTag = kFirstMaybeReadOnlyExternalPointerTag, kAccessorInfoGetterTag, @@ -1027,9 +1023,9 @@ class Internals { static constexpr Tagged_t kBuildDependentTheHoleValue = 0x20001; #else #ifdef V8_INTL_SUPPORT - static constexpr Tagged_t kBuildDependentTheHoleValue = 0x6559; + static constexpr Tagged_t kBuildDependentTheHoleValue = 0x6581; #else - static constexpr Tagged_t kBuildDependentTheHoleValue = 0x58bd; + static constexpr Tagged_t kBuildDependentTheHoleValue = 0x58d1; #endif #endif @@ -1450,12 +1446,7 @@ class V8_EXPORT StrongRootAllocatorBase { public: Heap* heap() const { return heap_; } - friend bool operator==(const StrongRootAllocatorBase& a, - const StrongRootAllocatorBase& b) { - // TODO(pkasting): Replace this body with `= default` after dropping support - // for old gcc versions. - return a.heap_ == b.heap_; - } + constexpr bool operator==(const StrongRootAllocatorBase&) const = default; protected: explicit StrongRootAllocatorBase(Heap* heap) : heap_(heap) {} @@ -1491,45 +1482,29 @@ class StrongRootAllocator : private std::allocator { using std::allocator::deallocate; }; -// TODO(pkasting): Replace with `requires` clauses after dropping support for -// old gcc versions. -template -inline constexpr bool kHaveIteratorConcept = false; template -inline constexpr bool kHaveIteratorConcept< - Iterator, std::void_t> = true; +concept HasIteratorConcept = requires { typename Iterator::iterator_concept; }; -template -inline constexpr bool kHaveIteratorCategory = false; template -inline constexpr bool kHaveIteratorCategory< - Iterator, std::void_t> = true; +concept HasIteratorCategory = + requires { typename Iterator::iterator_category; }; // Helper struct that contains an `iterator_concept` type alias only when either // `Iterator` or `std::iterator_traits` do. // Default: no alias. -template +template struct MaybeDefineIteratorConcept {}; // Use `Iterator::iterator_concept` if available. -template -struct MaybeDefineIteratorConcept< - Iterator, std::enable_if_t>> { +template +struct MaybeDefineIteratorConcept { using iterator_concept = typename Iterator::iterator_concept; }; // Otherwise fall back to `std::iterator_traits` if possible. template -struct MaybeDefineIteratorConcept< - Iterator, std::enable_if_t && - !kHaveIteratorConcept>> { - // There seems to be no feature-test macro covering this, so use the - // presence of `` as a crude proxy, since it was added to the - // standard as part of the Ranges papers. - // TODO(pkasting): Add this unconditionally after dropping support for old - // libstdc++ versions. -#if __has_include() + requires(HasIteratorCategory && !HasIteratorConcept) +struct MaybeDefineIteratorConcept { using iterator_concept = typename std::iterator_traits::iterator_concept; -#endif }; // A class of iterators that wrap some different iterator type. @@ -1566,11 +1541,8 @@ class WrappedIterator : public MaybeDefineIteratorConcept { constexpr WrappedIterator() noexcept = default; constexpr explicit WrappedIterator(Iterator it) noexcept : it_(it) {} - // TODO(pkasting): Switch to `requires` and concepts after dropping support - // for old gcc and libstdc++ versions. - template >> + template + requires std::is_convertible_v constexpr WrappedIterator( const WrappedIterator& other) noexcept : it_(other.base()) {} @@ -1590,7 +1562,7 @@ class WrappedIterator : public MaybeDefineIteratorConcept { const noexcept { return it_ == other.base(); } -#if V8_HAVE_SPACESHIP_OPERATOR + template [[nodiscard]] constexpr auto operator<=>( const WrappedIterator& other) @@ -1614,41 +1586,6 @@ class WrappedIterator : public MaybeDefineIteratorConcept { : std::partial_ordering::unordered; } } -#else - // Assume that if spaceship isn't present, operator rewriting might not be - // either. - template - [[nodiscard]] constexpr bool operator!=( - const WrappedIterator& other) - const noexcept { - return it_ != other.base(); - } - - template - [[nodiscard]] constexpr bool operator<( - const WrappedIterator& other) - const noexcept { - return it_ < other.base(); - } - template - [[nodiscard]] constexpr bool operator<=( - const WrappedIterator& other) - const noexcept { - return it_ <= other.base(); - } - template - [[nodiscard]] constexpr bool operator>( - const WrappedIterator& other) - const noexcept { - return it_ > other.base(); - } - template - [[nodiscard]] constexpr bool operator>=( - const WrappedIterator& other) - const noexcept { - return it_ >= other.base(); - } -#endif constexpr WrappedIterator& operator++() noexcept { ++it_; diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index bb9676d982a0e3..1efaa547bc0ed3 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -360,10 +361,12 @@ class V8_EXPORT Isolate { * The following parameters describe the offsets for addressing type info * for wrapped API objects and are used by the fast C API * (for details see v8-fast-api-calls.h). + * + * V8_DEPRECATED was applied in v14.3. */ - V8_DEPRECATE_SOON("This field is unused.") + V8_DEPRECATED("This field is unused.") int embedder_wrapper_type_index = -1; - V8_DEPRECATE_SOON("This field is unused.") + V8_DEPRECATED("This field is unused.") int embedder_wrapper_object_index = -1; /** @@ -668,6 +671,7 @@ class V8_EXPORT Isolate { kWithStatement = 180, kHtmlWrapperMethods = 181, kWasmCustomDescriptors = 182, + kWasmResizableBuffers = 183, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to @@ -861,6 +865,12 @@ class V8_EXPORT Isolate { */ void MemoryPressureNotification(MemoryPressureLevel level); + /** + * This triggers garbage collections until either `allocate` succeeds, or + * until v8 gives up and triggers an OOM error. + */ + bool RetryCustomAllocate(std::function allocate); + /** * Optional request from the embedder to tune v8 towards energy efficiency * rather than speed if `battery_saver_mode_enabled` is true, because the @@ -1655,6 +1665,13 @@ class V8_EXPORT Isolate { /** Set the callback to invoke in case of OOM errors. */ void SetOOMErrorHandler(OOMErrorCallback that); + /** + * \copydoc SetOOMErrorHandler(OOMErrorCallback) + * + * \param data Additional data that should be passed to the callback. + */ + void SetOOMErrorHandler(OOMErrorCallbackWithData that, void* data); + /** * Add a callback to invoke in case the heap size is close to the heap limit. * If multiple callbacks are added, only the most recently added callback is diff --git a/deps/v8/include/v8-local-handle.h b/deps/v8/include/v8-local-handle.h index bdf1a1d6ff8cb8..a1762eb25735c9 100644 --- a/deps/v8/include/v8-local-handle.h +++ b/deps/v8/include/v8-local-handle.h @@ -389,7 +389,7 @@ class V8_TRIVIAL_ABI Local : public LocalBase, * objects to which they refer are physically equal. * * If both handles refer to JS objects, this is the same as strict - * non-equality. For primitives, such as numbers or strings, a `true` return + * non-equality. For primitives, such as numbers or strings, a `false` return * value does not indicate that the values aren't equal in the JavaScript * sense. Use `Value::StrictEquals()` to check primitives for equality. */ diff --git a/deps/v8/include/v8-memory-span.h b/deps/v8/include/v8-memory-span.h index 1b72b99fef556d..61f0ccf1efb34f 100644 --- a/deps/v8/include/v8-memory-span.h +++ b/deps/v8/include/v8-memory-span.h @@ -8,29 +8,14 @@ #include #include +#include #include #include +#include #include #include "v8config.h" // NOLINT(build/include_directory) -// TODO(pkasting): Use /spaceship unconditionally after dropping -// support for old libstdc++ versions. -#if __has_include() -#include -#endif -#if defined(__cpp_lib_three_way_comparison) && \ - __cpp_lib_three_way_comparison >= 201711L -#define V8_HAVE_SPACESHIP_OPERATOR 1 -#else -#define V8_HAVE_SPACESHIP_OPERATOR 0 -#endif - -// TODO(pkasting): Make this block unconditional after dropping support for old -// libstdc++ versions. -#if __has_include() -#include - namespace v8 { template @@ -46,7 +31,6 @@ inline constexpr bool std::ranges::enable_view> = true; template inline constexpr bool std::ranges::enable_borrowed_range> = true; -#endif namespace v8 { @@ -164,52 +148,13 @@ class V8_EXPORT MemorySpan { using pointer = value_type*; using reference = value_type&; using iterator_category = std::random_access_iterator_tag; - // There seems to be no feature-test macro covering this, so use the - // presence of `` as a crude proxy, since it was added to the - // standard as part of the Ranges papers. - // TODO(pkasting): Add this unconditionally after dropping support for old - // libstdc++ versions. -#if __has_include() using iterator_concept = std::contiguous_iterator_tag; -#endif // Required to satisfy `std::semiregular<>`. constexpr Iterator() = default; - [[nodiscard]] friend constexpr bool operator==(const Iterator& a, - const Iterator& b) { - // TODO(pkasting): Replace this body with `= default` after dropping - // support for old gcc versions. - return a.ptr_ == b.ptr_; - } -#if V8_HAVE_SPACESHIP_OPERATOR - [[nodiscard]] friend constexpr auto operator<=>(const Iterator&, - const Iterator&) = default; -#else - // Assume that if spaceship isn't present, operator rewriting might not be - // either. - [[nodiscard]] friend constexpr bool operator!=(const Iterator& a, - const Iterator& b) { - return a.ptr_ != b.ptr_; - } - - [[nodiscard]] friend constexpr bool operator<(const Iterator& a, - const Iterator& b) { - return a.ptr_ < b.ptr_; - } - [[nodiscard]] friend constexpr bool operator<=(const Iterator& a, - const Iterator& b) { - return a.ptr_ <= b.ptr_; - } - [[nodiscard]] friend constexpr bool operator>(const Iterator& a, - const Iterator& b) { - return a.ptr_ > b.ptr_; - } - [[nodiscard]] friend constexpr bool operator>=(const Iterator& a, - const Iterator& b) { - return a.ptr_ >= b.ptr_; - } -#endif + [[nodiscard]] constexpr bool operator==(const Iterator&) const = default; + [[nodiscard]] constexpr auto operator<=>(const Iterator&) const = default; constexpr Iterator& operator++() { ++ptr_; diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h index f62490a83353c9..1e8a546b8418bb 100644 --- a/deps/v8/include/v8-metrics.h +++ b/deps/v8/include/v8-metrics.h @@ -125,63 +125,43 @@ struct GarbageCollectionYoungCycle { #endif // defined(CPPGC_YOUNG_GENERATION) }; +// Note: These structs do not define any constructor, and declare most fields as +// const, to force initializing them when using aggregate (designated) +// initialization. +// Those structs are meant to be created in V8 and read by embedders. struct WasmModuleDecoded { - WasmModuleDecoded() = default; - WasmModuleDecoded(bool async, bool streamed, bool success, - size_t module_size_in_bytes, size_t function_count, - int64_t wall_clock_duration_in_us) - : async(async), - streamed(streamed), - success(success), - module_size_in_bytes(module_size_in_bytes), - function_count(function_count), - wall_clock_duration_in_us(wall_clock_duration_in_us) {} - - bool async = false; - bool streamed = false; - bool success = false; - size_t module_size_in_bytes = 0; - size_t function_count = 0; + const bool async; + const bool streamed; + const bool success; + const size_t module_size_in_bytes; + const size_t function_count; + // Optional field; only set if a high-resolution clock is available. int64_t wall_clock_duration_in_us = -1; }; struct WasmModuleCompiled { - WasmModuleCompiled() = default; - - WasmModuleCompiled(bool async, bool streamed, bool cached, bool deserialized, - bool lazy, bool success, size_t code_size_in_bytes, - size_t liftoff_bailout_count, - int64_t wall_clock_duration_in_us) - : async(async), - streamed(streamed), - cached(cached), - deserialized(deserialized), - lazy(lazy), - success(success), - code_size_in_bytes(code_size_in_bytes), - liftoff_bailout_count(liftoff_bailout_count), - wall_clock_duration_in_us(wall_clock_duration_in_us) {} - - bool async = false; - bool streamed = false; - bool cached = false; - bool deserialized = false; - bool lazy = false; - bool success = false; - size_t code_size_in_bytes = 0; - size_t liftoff_bailout_count = 0; + const bool async; + const bool streamed; + const bool cached; + const bool deserialized; + const bool lazy; + const bool success; + const size_t code_size_in_bytes; + const size_t liftoff_bailout_count; + // Optional field; only set if a high-resolution clock is available. int64_t wall_clock_duration_in_us = -1; }; struct WasmModuleInstantiated { - bool async = false; - bool success = false; - size_t imported_function_count = 0; + const bool async; + const bool success; + const size_t imported_function_count; + // Optional field; only set if a high-resolution clock is available. int64_t wall_clock_duration_in_us = -1; }; struct WasmModulesPerIsolate { - size_t count = 0; + const size_t count; }; /** diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h index 444e18a8c23bfe..65a144474cb095 100644 --- a/deps/v8/include/v8-object.h +++ b/deps/v8/include/v8-object.h @@ -437,22 +437,10 @@ class V8_EXPORT Object : public Value { Local context, PropertyFilter filter, KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers); - /** - * Get the prototype object. This does not skip objects marked to - * be skipped by __proto__ and it does not consult the security - * handler. - */ - V8_DEPRECATED( - "V8 will stop providing access to hidden prototype (i.e. " - "JSGlobalObject). Use GetPrototypeV2() instead. " - "See http://crbug.com/333672197.") - Local GetPrototype(); - /** * Get the prototype object (same as calling Object.getPrototypeOf(..)). * This does not consult the security handler. - * TODO(333672197): rename back to GetPrototype() once the old version goes - * through the deprecation process and is removed. + * TODO(http://crbug.com/333672197): rename back to GetPrototype(). */ Local GetPrototypeV2(); diff --git a/deps/v8/include/v8-persistent-handle.h b/deps/v8/include/v8-persistent-handle.h index 126fbdb8ff5a21..aae1f83e29037d 100644 --- a/deps/v8/include/v8-persistent-handle.h +++ b/deps/v8/include/v8-persistent-handle.h @@ -485,20 +485,13 @@ V8_INLINE void PersistentBase::SetWeak( P* parameter, typename WeakCallbackInfo

::Callback callback, WeakCallbackType type) { using Callback = WeakCallbackInfo::Callback; -#if (__GNUC__ >= 8) && !defined(__clang__) +#if (__GNUC__ >= 8) || defined(__clang__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-function-type" -#endif -#if __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wcast-function-type" #endif api_internal::MakeWeak(this->slot(), parameter, reinterpret_cast(callback), type); -#if __clang__ -#pragma clang diagnostic pop -#endif -#if (__GNUC__ >= 8) && !defined(__clang__) +#if (__GNUC__ >= 8) || defined(__clang__) #pragma GCC diagnostic pop #endif } diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 6aadcab4ba987e..8d87fe973bc945 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -1038,12 +1038,17 @@ class VirtualAddressSpace { * \param key Optional memory protection key for the subspace. If used, the * returned subspace will use this key for all its memory pages. * + * \param handle Optional file descriptor for the subspace. If used, the + * returned subspace will use this file descriptor with 0 offset as the + * space's underlying file. + * * \returns a new subspace or nullptr on failure. */ virtual std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key = std::nullopt) = 0; + std::optional key = std::nullopt, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle) = 0; // // TODO(v8) maybe refactor the methods below before stabilizing the API. For diff --git a/deps/v8/include/v8-primitive.h b/deps/v8/include/v8-primitive.h index 1bf21e60ad9a99..87215cf6ece31d 100644 --- a/deps/v8/include/v8-primitive.h +++ b/deps/v8/include/v8-primitive.h @@ -136,15 +136,9 @@ class V8_EXPORT String : public Name { */ int Length() const; - /** - * Returns the number of bytes in the UTF-8 encoded - * representation of this string. - */ - V8_DEPRECATED("Use Utf8LengthV2 instead.") - int Utf8Length(Isolate* isolate) const; - /** * Returns the number of bytes needed for the Utf8 encoding of this string. + * TODO(http://crbug.com/373485796): rename back to Utf8Length(). */ size_t Utf8LengthV2(Isolate* isolate) const; @@ -163,55 +157,6 @@ class V8_EXPORT String : public Name { */ bool ContainsOnlyOneByte() const; - /** - * Write the contents of the string to an external buffer. - * If no arguments are given, expects the buffer to be large - * enough to hold the entire string and NULL terminator. Copies - * the contents of the string and the NULL terminator into the - * buffer. - * - * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop - * before the end of the buffer. - * - * Copies up to length characters into the output buffer. - * Only null-terminates if there is enough space in the buffer. - * - * \param buffer The buffer into which the string will be copied. - * \param start The starting position within the string at which - * copying begins. - * \param length The number of characters to copy from the string. For - * WriteUtf8 the number of bytes in the buffer. - * \param nchars_ref The number of characters written, can be NULL. - * \param options Various options that might affect performance of this or - * subsequent operations. - * \return The number of characters copied to the buffer excluding the null - * terminator. For WriteUtf8: The number of bytes copied to the buffer - * including the null terminator (if written). - */ - enum WriteOptions { - NO_OPTIONS = 0, - HINT_MANY_WRITES_EXPECTED = 1, - NO_NULL_TERMINATION = 2, - PRESERVE_ONE_BYTE_NULL = 4, - // Used by WriteUtf8 to replace orphan surrogate code units with the - // unicode replacement character. Needs to be set to guarantee valid UTF-8 - // output. - REPLACE_INVALID_UTF8 = 8 - }; - - // 16-bit character codes. - V8_DEPRECATED("Use WriteV2 instead.") - int Write(Isolate* isolate, uint16_t* buffer, int start = 0, int length = -1, - int options = NO_OPTIONS) const; - // One byte characters. - V8_DEPRECATED("Use WriteOneByteV2 instead.") - int WriteOneByte(Isolate* isolate, uint8_t* buffer, int start = 0, - int length = -1, int options = NO_OPTIONS) const; - // UTF-8 encoded characters. - V8_DEPRECATED("Use WriteUtf8V2 instead.") - int WriteUtf8(Isolate* isolate, char* buffer, int length = -1, - int* nchars_ref = nullptr, int options = NO_OPTIONS) const; - struct WriteFlags { enum { kNone = 0, @@ -237,6 +182,8 @@ class V8_EXPORT String : public Name { * \param length The number of characters to copy from the string. * \param buffer The buffer into which the string will be copied. * \param flags Various flags that influence the behavior of this operation. + * TODO(http://crbug.com/373485796): rename back to Write() and + * WriteOneByte(). */ void WriteV2(Isolate* isolate, uint32_t offset, uint32_t length, uint16_t* buffer, int flags = WriteFlags::kNone) const; @@ -261,6 +208,7 @@ class V8_EXPORT String : public Name { * the buffer. * \return The number of bytes copied to the buffer including the null * terminator (if written). + * TODO(http://crbug.com/373485796): rename back to WriteUtf8(). */ size_t WriteUtf8V2(Isolate* isolate, char* buffer, size_t capacity, int flags = WriteFlags::kNone, diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h index 4c8bf6d68c17f4..3eff0105f18a86 100644 --- a/deps/v8/include/v8-template.h +++ b/deps/v8/include/v8-template.h @@ -183,12 +183,8 @@ enum class Intercepted : uint8_t { kNo = 0, kYes = 1 }; */ using NamedPropertyGetterCallback = Intercepted (*)( Local property, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -// -// Use `info.GetReturnValue().Set()` to set the return value of the -// intercepted get request. If the property does not exist the callback should -// not set the result and must not produce side effects. -using GenericNamedPropertyGetterCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using GenericNamedPropertyGetterCallback V8_DEPRECATED( "Use NamedPropertyGetterCallback instead") = void (*)(Local property, const PropertyCallbackInfo& info); @@ -214,15 +210,8 @@ using GenericNamedPropertyGetterCallback V8_DEPRECATE_SOON( using NamedPropertySetterCallback = Intercepted (*)(Local property, Local value, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -// -// Use `info.GetReturnValue()` to indicate whether the request was intercepted -// or not. If the setter successfully intercepts the request, i.e., if the -// request should not be further executed, call -// `info.GetReturnValue().Set(value)`. If the setter did not intercept the -// request, i.e., if the request should be handled as if no interceptor is -// present, do not not call `Set()` and do not produce side effects. -using GenericNamedPropertySetterCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using GenericNamedPropertySetterCallback V8_DEPRECATED( "Use NamedPropertySetterCallback instead") = void (*)(Local property, Local value, const PropertyCallbackInfo& info); @@ -255,13 +244,8 @@ using GenericNamedPropertySetterCallback V8_DEPRECATE_SOON( */ using NamedPropertyQueryCallback = Intercepted (*)( Local property, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -// -// Use `info.GetReturnValue().Set(value)` to set the property attributes. The -// value is an integer encoding a `v8::PropertyAttribute`. If the property does -// not exist the callback should not set the result and must not produce side -// effects. -using GenericNamedPropertyQueryCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using GenericNamedPropertyQueryCallback V8_DEPRECATED( "Use NamedPropertyQueryCallback instead") = void (*)(Local property, const PropertyCallbackInfo& info); @@ -291,15 +275,8 @@ using GenericNamedPropertyQueryCallback V8_DEPRECATE_SOON( */ using NamedPropertyDeleterCallback = Intercepted (*)( Local property, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -// -// Use `info.GetReturnValue()` to indicate whether the request was intercepted -// or not. If the deleter successfully intercepts the request, i.e., if the -// request should not be further executed, call -// `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is -// used as the return value of `delete`. If the deleter does not intercept the -// request then it should not set the result and must not produce side effects. -using GenericNamedPropertyDeleterCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using GenericNamedPropertyDeleterCallback V8_DEPRECATED( "Use NamedPropertyDeleterCallback instead") = void (*)(Local property, const PropertyCallbackInfo& info); @@ -311,9 +288,9 @@ using GenericNamedPropertyDeleterCallback V8_DEPRECATE_SOON( */ using NamedPropertyEnumeratorCallback = void (*)(const PropertyCallbackInfo& info); -// This variant will be deprecated soon. +// This variant will be deleted soon. // This is just a renaming of the typedef. -using GenericNamedPropertyEnumeratorCallback V8_DEPRECATE_SOON( +using GenericNamedPropertyEnumeratorCallback V8_DEPRECATED( "Use NamedPropertyEnumeratorCallback instead") = NamedPropertyEnumeratorCallback; @@ -339,15 +316,8 @@ using GenericNamedPropertyEnumeratorCallback V8_DEPRECATE_SOON( using NamedPropertyDefinerCallback = Intercepted (*)(Local property, const PropertyDescriptor& desc, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -// -// Use `info.GetReturnValue()` to indicate whether the request was intercepted -// or not. If the definer successfully intercepts the request, i.e., if the -// request should not be further executed, call -// `info.GetReturnValue().Set(value)`. If the definer did not intercept the -// request, i.e., if the request should be handled as if no interceptor is -// present, do not not call `Set()` and do not produce side effects. -using GenericNamedPropertyDefinerCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using GenericNamedPropertyDefinerCallback V8_DEPRECATED( "Use NamedPropertyDefinerCallback instead") = void (*)(Local property, const PropertyDescriptor& desc, const PropertyCallbackInfo& info); @@ -378,13 +348,8 @@ using GenericNamedPropertyDefinerCallback V8_DEPRECATE_SOON( */ using NamedPropertyDescriptorCallback = Intercepted (*)( Local property, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -// -// Use `info.GetReturnValue().Set()` to set the return value of the -// intercepted request. The return value must be an object that -// can be converted to a PropertyDescriptor, e.g., a `v8::Value` returned from -// `v8::Object::getOwnPropertyDescriptor`. -using GenericNamedPropertyDescriptorCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using GenericNamedPropertyDescriptorCallback V8_DEPRECATED( "Use NamedPropertyDescriptorCallback instead") = void (*)(Local property, const PropertyCallbackInfo& info); @@ -397,8 +362,8 @@ using GenericNamedPropertyDescriptorCallback V8_DEPRECATE_SOON( */ using IndexedPropertyGetterCallbackV2 = Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -using IndexedPropertyGetterCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using IndexedPropertyGetterCallback V8_DEPRECATED( "Use IndexedPropertyGetterCallbackV2 instead") = void (*)(uint32_t index, const PropertyCallbackInfo& info); @@ -407,8 +372,8 @@ using IndexedPropertyGetterCallback V8_DEPRECATE_SOON( */ using IndexedPropertySetterCallbackV2 = Intercepted (*)( uint32_t index, Local value, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -using IndexedPropertySetterCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using IndexedPropertySetterCallback V8_DEPRECATED( "Use IndexedPropertySetterCallbackV2 instead") = void (*)(uint32_t index, Local value, const PropertyCallbackInfo& info); @@ -418,8 +383,8 @@ using IndexedPropertySetterCallback V8_DEPRECATE_SOON( */ using IndexedPropertyQueryCallbackV2 = Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -using IndexedPropertyQueryCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using IndexedPropertyQueryCallback V8_DEPRECATED( "Use IndexedPropertyQueryCallbackV2 instead") = void (*)(uint32_t index, const PropertyCallbackInfo& info); @@ -428,8 +393,8 @@ using IndexedPropertyQueryCallback V8_DEPRECATE_SOON( */ using IndexedPropertyDeleterCallbackV2 = Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -using IndexedPropertyDeleterCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using IndexedPropertyDeleterCallback V8_DEPRECATED( "Use IndexedPropertyDeleterCallbackV2 instead") = void (*)(uint32_t index, const PropertyCallbackInfo& info); @@ -448,8 +413,8 @@ using IndexedPropertyEnumeratorCallback = using IndexedPropertyDefinerCallbackV2 = Intercepted (*)(uint32_t index, const PropertyDescriptor& desc, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -using IndexedPropertyDefinerCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using IndexedPropertyDefinerCallback V8_DEPRECATED( "Use IndexedPropertyDefinerCallbackV2 instead") = void (*)(uint32_t index, const PropertyDescriptor& desc, const PropertyCallbackInfo& info); @@ -459,8 +424,8 @@ using IndexedPropertyDefinerCallback V8_DEPRECATE_SOON( */ using IndexedPropertyDescriptorCallbackV2 = Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); -// This variant will be deprecated soon. -using IndexedPropertyDescriptorCallback V8_DEPRECATE_SOON( +// This variant will be deleted soon. +using IndexedPropertyDescriptorCallback V8_DEPRECATED( "Use IndexedPropertyDescriptorCallbackV2 instead") = void (*)(uint32_t index, const PropertyCallbackInfo& info); diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index eb1bdf134b8fbe..7bc931029679d9 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 14 -#define V8_MINOR_VERSION 2 -#define V8_BUILD_NUMBER 231 -#define V8_PATCH_LEVEL 16 +#define V8_MINOR_VERSION 3 +#define V8_BUILD_NUMBER 127 +#define V8_PATCH_LEVEL 6 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8-wasm.h b/deps/v8/include/v8-wasm.h index 3afaab9aa00830..5483421f40ff50 100644 --- a/deps/v8/include/v8-wasm.h +++ b/deps/v8/include/v8-wasm.h @@ -134,6 +134,22 @@ class V8_EXPORT WasmStreaming final { internal::kWasmWasmStreamingTag; class WasmStreamingImpl; + class ModuleCachingInterface { + public: + // Get the full wire bytes, to check against the cached version. + virtual MemorySpan GetWireBytes() const = 0; + // Pass serialized (cached) compiled module bytes, to be deserialized and + // used as the result of this streaming compilation. + // The passed bytes will only be accessed inside this callback, i.e. + // lifetime can end after the call. + // The return value indicates whether V8 could use the passed bytes; {false} + // would be returned on e.g. version mismatch. + // This method can only be called once. + virtual bool SetCachedCompiledModuleBytes(MemorySpan) = 0; + }; + + using ModuleCachingCallback = std::function; + explicit WasmStreaming(std::unique_ptr impl); ~WasmStreaming(); @@ -153,7 +169,28 @@ class V8_EXPORT WasmStreaming final { * If {can_use_compiled_module} is false, the compiled module bytes previously * set by {SetCompiledModuleBytes} should not be used. */ - void Finish(bool can_use_compiled_module = true); + V8_DEPRECATE_SOON( + "Use the new variant of Finish which takes the caching callback argument") + void Finish(bool can_use_compiled_module = true) { + ModuleCachingCallback callback; + if (can_use_compiled_module && !cached_compiled_module_bytes_.empty()) { + callback = [bytes = cached_compiled_module_bytes_]( + ModuleCachingInterface& caching_interface) { + caching_interface.SetCachedCompiledModuleBytes(bytes); + }; + } + Finish(callback); + } + + /** + * {Finish} should be called after all received bytes where passed to + * {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish} + * must not be called after {Abort} has been called already. + * If {SetHasCompiledModuleBytes()} was called before, a {caching_callback} + * can be passed which can inspect the full received wire bytes and set cached + * module bytes which will be deserialized then. + */ + void Finish(const ModuleCachingCallback& caching_callback); /** * Abort streaming compilation. If {exception} has a value, then the promise @@ -172,7 +209,25 @@ class V8_EXPORT WasmStreaming final { * The compiled module bytes should not be used until {Finish(true)} is * called, because they can be invalidated later by {Finish(false)}. */ - bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size); + V8_DEPRECATE_SOON( + "Use SetHasCompiledModule in combination with the new variant of Finish") + bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size) { + SetHasCompiledModuleBytes(); + cached_compiled_module_bytes_ = {bytes, size}; + // Optimistically return true here, even though we might later find out that + // we cannot use the bytes. + return true; + } + + /** + * Mark that the embedder has (potentially) cached compiled module bytes (i.e. + * a serialized {CompiledWasmModule}) that could match this streaming request. + * This will cause V8 to skip streaming compilation. + * The embedder should then pass a callback to the {Finish} method to pass the + * serialized bytes, after potentially checking their validity against the + * full received wire bytes. + */ + void SetHasCompiledModuleBytes(); /** * Sets a callback which is called whenever a significant number of new @@ -197,6 +252,9 @@ class V8_EXPORT WasmStreaming final { private: std::unique_ptr impl_; + // Temporarily store the compiled module bytes here until the deprecation (see + // methods above) has gone through. + MemorySpan cached_compiled_module_bytes_; }; /** diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 59e1bce6016639..1117068160c9f1 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -288,7 +288,7 @@ }, 'tests': [ {'name': 'mozilla', 'shards': 3}, - {'name': 'test262', 'variant': 'default', 'shards': 2}, + {'name': 'test262', 'variant': 'default', 'shards': 3}, {'name': 'v8testing', 'shards': 10}, {'name': 'v8testing', 'variant': 'extra', 'shards': 10}, ], @@ -307,7 +307,7 @@ }, 'tests': [ {'name': 'mozilla', 'shards': 2}, - {'name': 'test262', 'variant': 'default', 'shards': 2}, + {'name': 'test262', 'variant': 'default', 'shards': 3}, {'name': 'v8testing', 'shards': 10}, {'name': 'v8testing', 'variant': 'extra', 'shards': 10}, {'name': 'gcmole_v3', 'variant': 'arm', 'shards': 6}, @@ -402,7 +402,7 @@ {'name': 'v8testing', 'shards': 5}, {'name': 'benchmarks', 'shards': 5}, {'name': 'mozilla', 'shards': 5}, - {'name': 'test262', 'shards': 10}, + {'name': 'test262', 'shards': 12}, ], }, 'v8_linux64_dbg': { @@ -426,6 +426,7 @@ {'name': 'v8testing', 'variant': 'stress_instruction_scheduling', 'shards': 2}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2}, {'name': 'v8testing', 'variant': 'stress_concurrent_inlining','shards': 2}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode', 'shards': 2}, {'name': 'mjsunit', 'variant': 'stress_maglev_tests_with_turbofan', 'shards': 2}, # Maglev -- move to extra once more architectures are supported. {'name': 'v8testing', 'variant': 'maglev', 'shards': 2}, @@ -557,6 +558,12 @@ 'test_args': ['--gc-stress'], 'shards': 2 }, + { + 'name': 'mjsunit', + 'variant': 'scavenger_chaos_mode', + 'test_args': ['--gc-stress'], + 'shards': 2 + }, ], }, 'v8_linux64_gcc_rel': { @@ -623,6 +630,7 @@ {'name': 'test262', 'variant': 'extra', 'shards': 4}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'assert_types'}, + {'name': 'v8testing', 'variant': 'wasm_assert_types'}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, @@ -638,6 +646,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, { 'name': 'v8testing', 'suffix': 'noavx', @@ -760,6 +769,7 @@ {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'slow_path'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, {'name': 'mjsunit', 'variant': 'stress_maglev_tests_with_turbofan'}, # Maglev -- move to extra once more architectures are supported. {'name': 'v8testing', 'variant': 'maglev'}, @@ -825,6 +835,7 @@ {'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning', 'shards': 2}, {'name': 'v8testing', 'variant': 'precise_pinning', 'shards': 2}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_linux64_tsan_dbg': { @@ -834,7 +845,7 @@ 'tests': [ {'name': 'benchmarks', 'shards': 2}, {'name': 'mozilla', 'shards': 4}, - {'name': 'test262', 'variant': 'default', 'shards': 5}, + {'name': 'test262', 'variant': 'default', 'shards': 10}, {'name': 'v8testing', 'shards': 12}, {'name': 'v8testing', 'variant': 'extra', 'shards': 9}, ], @@ -1047,6 +1058,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_win64_drumbrake_dbg': { @@ -1072,6 +1084,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_win64_rel': { @@ -1087,6 +1100,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, ############################################################################## @@ -1113,6 +1127,7 @@ {'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning', 'shards': 2}, {'name': 'v8testing', 'variant': 'precise_pinning', 'shards': 2}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_mac_arm64_gc_stress_dbg': { @@ -1146,6 +1161,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_mac_arm64_rel': { @@ -1168,6 +1184,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_mac_arm64_dbg': { @@ -1190,6 +1207,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_mac_arm64_full_dbg': { @@ -1212,6 +1230,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'v8_mac_arm64_no_pointer_compression_dbg': { @@ -1507,12 +1526,14 @@ {'name': 'v8initializers'}, {'name': 'v8testing'}, {'name': 'v8testing', 'variant': 'assert_types'}, + {'name': 'v8testing', 'variant': 'wasm_assert_types'}, {'name': 'v8testing', 'variant': 'extra'}, {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, {'name': 'mjsunit', 'variant': 'stress_maglev_tests_with_turbofan'}, # Maglev -- move to extra once more architectures are supported. {'name': 'v8testing', 'variant': 'maglev'}, @@ -1625,6 +1646,7 @@ {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'}, {'name': 'v8testing', 'variant': 'stress_concurrent_inlining', 'shards': 2}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, {'name': 'mjsunit', 'variant': 'stress_maglev_tests_with_turbofan'}, # Maglev -- move to extra once more architectures are supported. {'name': 'v8testing', 'variant': 'maglev'}, @@ -1859,6 +1881,12 @@ 'test_args': ['--gc-stress'], 'shards': 2 }, + { + 'name': 'mjsunit', + 'variant': 'scavenger_chaos_mode', + 'test_args': ['--gc-stress'], + 'shards': 2 + }, ], }, 'V8 Linux64 - internal snapshot': { @@ -1967,8 +1995,8 @@ }, 'tests': [ {'name': 'test262', 'shards': 7}, - {'name': 'v8testing', 'shards': 2}, - {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'shards': 3}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, ], }, @@ -1980,7 +2008,7 @@ {'name': 'v8testing', 'shards': 5}, {'name': 'benchmarks', 'shards': 5}, {'name': 'mozilla', 'shards': 5}, - {'name': 'test262', 'shards': 10}, + {'name': 'test262', 'shards': 12}, ], }, 'V8 Linux64 GC Stress - custom snapshot': { @@ -2018,6 +2046,7 @@ {'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning', 'shards': 2}, {'name': 'v8testing', 'variant': 'precise_pinning', 'shards': 2}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode', 'shards': 2}, ], }, 'V8 Linux64 TSAN - debug': { @@ -2027,7 +2056,7 @@ 'tests': [ {'name': 'benchmarks', 'shards': 2}, {'name': 'mozilla', 'shards': 4}, - {'name': 'test262', 'variant': 'default', 'shards': 6}, + {'name': 'test262', 'variant': 'default', 'shards': 10}, {'name': 'v8testing', 'shards': 12}, {'name': 'v8testing', 'variant': 'extra', 'shards': 10}, ], @@ -2106,6 +2135,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'V8 Mac64 - debug': { @@ -2121,6 +2151,7 @@ {'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning', 'shards': 2}, {'name': 'v8testing', 'variant': 'precise_pinning', 'shards': 2}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode', 'shards': 2}, ], }, 'V8 Mac64 ASAN': { @@ -2162,6 +2193,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'V8 Mac - arm64 - release': { @@ -2185,6 +2217,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'V8 Mac - arm64 - debug': { @@ -2208,6 +2241,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'V8 Mac - arm64 - no pointer compression debug': { @@ -2263,6 +2297,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'V8 Win64 - CET shadow stack': { @@ -2287,6 +2322,7 @@ {'name': 'v8testing', 'variant': 'minor_ms'}, {'name': 'v8testing', 'variant': 'conservative_stack_scanning'}, {'name': 'v8testing', 'variant': 'precise_pinning'}, + {'name': 'v8testing', 'variant': 'scavenger_chaos_mode'}, ], }, 'V8 Win64 - drumbrake - debug': { @@ -2392,7 +2428,7 @@ }, 'tests': [ {'name': 'mozilla', 'shards': 3}, - {'name': 'test262', 'variant': 'default', 'shards': 2}, + {'name': 'test262', 'variant': 'default', 'shards': 3}, {'name': 'v8testing', 'shards': 10}, {'name': 'v8testing', 'variant': 'extra', 'shards': 9}, {'name': 'v8testing', 'variant': 'turbolev'}, @@ -2428,7 +2464,7 @@ 'suffix': 'novfp3', 'variant': 'default', 'test_args': ['--novfp3'], - 'shards': 2 + 'shards': 3 }, { 'name': 'v8testing', @@ -2758,7 +2794,8 @@ 'suffix': 'combined', 'test_args': [ '--total-timeout-sec=4200', - '--allocation-offset=2', + '--allocation-offset=1', + '--scavenge-chaos=4', '--stress-bytecode-budget=1', '--stress-delay-tasks=4', '--stress-deopt=2', @@ -2770,13 +2807,18 @@ '--stress-stack-size=1', '--stress-interrupt-budget=1', ], - 'shards': 4 + 'shards': 5 }, { 'name': 'numfuzz', 'suffix': 'scavenge', 'test_args': ['--total-timeout-sec=4200', '--stress-scavenge=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'scavenge chaos', + 'test_args': ['--total-timeout-sec=4200', '--scavenge-chaos=1'] + }, ], }, 'V8 NumFuzz - debug': { @@ -2836,7 +2878,8 @@ 'suffix': 'combined', 'test_args': [ '--total-timeout-sec=4200', - '--allocation-offset=2', + '--allocation-offset=1', + '--scavenge-chaos=4', '--stress-bytecode-budget=1', '--stress-delay-tasks=4', '--stress-deopt=2', @@ -2848,13 +2891,18 @@ '--stress-stack-size=1', '--stress-interrupt-budget=1', ], - 'shards': 3 + 'shards': 5 }, { 'name': 'numfuzz', 'suffix': 'scavenge', 'test_args': ['--total-timeout-sec=4200', '--stress-scavenge=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'scavenge chaos', + 'test_args': ['--total-timeout-sec=4200', '--scavenge-chaos=1'] + }, { 'name': 'numfuzz', 'suffix': 'deopt', @@ -2934,7 +2982,8 @@ 'suffix': 'combined', 'test_args': [ '--total-timeout-sec=900', - '--allocation-offset=2', + '--allocation-offset=1', + '--scavenge-chaos=4', '--stress-bytecode-budget=1', '--stress-delay-tasks=4', '--stress-deopt=2', @@ -2952,6 +3001,11 @@ 'suffix': 'scavenge', 'test_args': ['--total-timeout-sec=900', '--stress-scavenge=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'scavenge chaos', + 'test_args': ['--total-timeout-sec=900', '--scavenge-chaos=1'] + }, ], }, 'v8_numfuzz_dbg': { @@ -3003,7 +3057,8 @@ 'suffix': 'combined', 'test_args': [ '--total-timeout-sec=900', - '--allocation-offset=2', + '--allocation-offset=1', + '--scavenge-chaos=4', '--stress-bytecode-budget=1', '--stress-delay-tasks=4', '--stress-deopt=2', @@ -3021,6 +3076,11 @@ 'suffix': 'scavenge', 'test_args': ['--total-timeout-sec=900', '--stress-scavenge=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'scavenge chaos', + 'test_args': ['--total-timeout-sec=900', '--scavenge-chaos=1'] + }, { 'name': 'numfuzz', 'suffix': 'deopt', diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc index af57f39a073a62..e48b16c6a896c5 100644 --- a/deps/v8/samples/process.cc +++ b/deps/v8/samples/process.cc @@ -341,15 +341,19 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() { process_.Reset(); } - Global JsHttpRequestProcessor::request_template_; Global JsHttpRequestProcessor::map_template_; - // ----------------------------------- // --- A c c e s s i n g M a p s --- // ----------------------------------- +namespace { +// This tag value has been picked arbitrarily between 0 and +// V8_EXTERNAL_POINTER_TAG_COUNT. +constexpr v8::ExternalPointerTypeTag kMapTag = 6; +} // namespace + // Utility function that wraps a C++ http request object in a // JavaScript object. Local JsHttpRequestProcessor::WrapMap(map* obj) { @@ -371,7 +375,7 @@ Local JsHttpRequestProcessor::WrapMap(map* obj) { // Wrap the raw C++ pointer in an External so it can be referenced // from within JavaScript. - Local map_ptr = External::New(GetIsolate(), obj); + Local map_ptr = External::New(GetIsolate(), obj, kMapTag); // Store the map pointer in the JavaScript wrapper. result->SetInternalField(0, map_ptr); @@ -383,16 +387,14 @@ Local JsHttpRequestProcessor::WrapMap(map* obj) { return handle_scope.Escape(result); } - // Utility function that extracts the C++ map pointer from a wrapper // object. map* JsHttpRequestProcessor::UnwrapMap(Local obj) { Local field = obj->GetInternalField(0).As().As(); - void* ptr = field->Value(); + void* ptr = field->Value(kMapTag); return static_cast*>(ptr); } - // Convert a JavaScript string to a std::string. To not bother too // much with string encodings we just use ascii. string ObjectToString(v8::Isolate* isolate, Local value) { @@ -454,11 +456,14 @@ Local JsHttpRequestProcessor::MakeMapTemplate( return handle_scope.Escape(result); } - // ------------------------------------------- // --- A c c e s s i n g R e q u e s t s --- // ------------------------------------------- +namespace { +constexpr v8::ExternalPointerTypeTag kHttpRequestTag = 7; +} // namespace + /** * Utility function that wraps a C++ http request object in a * JavaScript object. @@ -482,7 +487,8 @@ Local JsHttpRequestProcessor::WrapRequest(HttpRequest* request) { // Wrap the raw C++ pointer in an External so it can be referenced // from within JavaScript. - Local request_ptr = External::New(GetIsolate(), request); + Local request_ptr = + External::New(GetIsolate(), request, kHttpRequestTag); // Store the request pointer in the JavaScript wrapper. result->SetInternalField(0, request_ptr); @@ -494,14 +500,13 @@ Local JsHttpRequestProcessor::WrapRequest(HttpRequest* request) { return handle_scope.Escape(result); } - /** * Utility function that extracts the C++ http request object from a * wrapper object. */ HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Local obj) { Local field = obj->GetInternalField(0).As().As(); - void* ptr = field->Value(); + void* ptr = field->Value(kHttpRequestTag); return static_cast(ptr); } diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 33a4acad5f55db..5a879e9ff5d9e8 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -259,13 +259,6 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* i_isolate, const char* location, FATAL("API fatal error handler returned after process out of memory"); } -void i::V8::FatalProcessOutOfMemory(i::Isolate* i_isolate, const char* location, - const char* detail) { - OOMDetails details; - details.detail = detail; - FatalProcessOutOfMemory(i_isolate, location, details); -} - void Utils::ReportApiFailure(const char* location, const char* message) { i::Isolate* i_isolate = i::Isolate::TryGetCurrent(); FatalErrorCallback callback = nullptr; @@ -285,7 +278,7 @@ void Utils::ReportApiFailure(const char* location, const char* message) { void Utils::ReportOOMFailure(i::Isolate* i_isolate, const char* location, const OOMDetails& details) { if (auto oom_callback = i_isolate->oom_behavior()) { - oom_callback(location, details); + oom_callback(location, details, i_isolate->oom_callback_data()); } else { // TODO(wfh): Remove this fallback once Blink is setting OOM handler. See // crbug.com/614440. @@ -543,10 +536,10 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, uint64_t virtual_memory_limit) { physical_memory_size_ = physical_memory; - size_t heap_size = i::Heap::HeapSizeFromPhysicalMemory(physical_memory); - size_t young_generation, old_generation; - i::Heap::GenerationSizesFromHeapSize(physical_memory, heap_size, - &young_generation, &old_generation); + size_t old_generation = + i::Heap::OldGenerationSizeFromPhysicalMemory(physical_memory); + size_t young_generation = i::Heap::YoungGenerationSizeFromOldGenerationSize( + physical_memory, old_generation); set_max_young_generation_size_in_bytes(young_generation); set_max_old_generation_size_in_bytes(old_generation); @@ -1321,7 +1314,18 @@ i::DirectHandle MakeAccessorInfo(i::Isolate* i_isolate, obj->set_getter(i_isolate, reinterpret_cast(getter)); DCHECK_IMPLIES(replace_on_access, setter == nullptr); if (setter == nullptr) { +#if (__GNUC__ >= 8) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-function-type" +#endif + // Cast from 'void (*)(v8::Local, v8::Local, const + // v8::PropertyCallbackInfo &)' to 'void + // (*)(v8::Local, v8::Local, const + // v8::PropertyCallbackInfo &)'. setter = reinterpret_cast(&i::Accessors::ReconfigureToDataProperty); +#if (__GNUC__ >= 8) || defined(__clang__) +#pragma GCC diagnostic pop +#endif } obj->set_setter(i_isolate, reinterpret_cast(setter)); @@ -4645,13 +4649,6 @@ MaybeLocal v8::Object::GetOwnPropertyDescriptor(Local context, return api_scope.Escape(Utils::ToLocal(desc.ToObject(i_isolate))); } -Local v8::Object::GetPrototype() { - auto self = Utils::OpenDirectHandle(this); - auto i_isolate = i::Isolate::Current(); - i::PrototypeIterator iter(i_isolate, self); - return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter)); -} - Local v8::Object::GetPrototypeV2() { auto self = Utils::OpenDirectHandle(this); auto i_isolate = i::Isolate::Current(); @@ -4715,18 +4712,17 @@ Local v8::Object::FindInstanceInPrototypeChain( v8::Local tmpl) { auto self = Utils::OpenDirectHandle(this); auto i_isolate = i::Isolate::Current(); - i::PrototypeIterator iter(i_isolate, *self, i::kStartAtReceiver); i::Tagged tmpl_info = *Utils::OpenDirectHandle(*tmpl); - if (!IsJSObject(iter.GetCurrent())) return {}; - while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) { - iter.Advance(); - if (iter.IsAtEnd()) return {}; - if (!IsJSObject(iter.GetCurrent())) return {}; + for (i::PrototypeIterator iter(i_isolate, *self, i::kStartAtReceiver); + !iter.IsAtEnd(); iter.Advance()) { + if (IsJSObject(iter.GetCurrent()) && + tmpl_info->IsTemplateFor(iter.GetCurrent())) { + return Utils::ToLocal( + i::direct_handle(iter.GetCurrent(), i_isolate)); + } } - // IsTemplateFor() ensures that iter.GetCurrent() can't be a Proxy here. - return Utils::ToLocal( - i::direct_handle(iter.GetCurrent(), i_isolate)); + return {}; } MaybeLocal v8::Object::GetPropertyNames(Local context) { @@ -5031,7 +5027,7 @@ MaybeLocal v8::Object::GetRealNamedPropertyInPrototypeChain( context, RCCId::kAPI_Object_GetRealNamedPropertyInPrototypeChain}; i::Isolate* i_isolate = api_scope.i_isolate(); auto self = Utils::OpenDirectHandle(this); - if (!IsJSObject(*self)) return {}; + if (!IsJSObject(*self) && !IsWasmObject(*self)) return {}; auto key_obj = Utils::OpenDirectHandle(*key); i::PrototypeIterator iter(i_isolate, self); if (iter.IsAtEnd()) return {}; @@ -5053,7 +5049,7 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain( i_isolate, context, RCCId::kAPI_Object_GetRealNamedPropertyAttributesInPrototypeChain}; auto self = Utils::OpenDirectHandle(this); - if (!IsJSObject(*self)) return {}; + if (!IsJSObject(*self) && !IsWasmObject(*self)) return {}; auto key_obj = Utils::OpenDirectHandle(*key); i::PrototypeIterator iter(i_isolate, self); if (iter.IsAtEnd()) return {}; @@ -5699,214 +5695,11 @@ bool String::ContainsOnlyOneByte() const { return helper.Check(*str); } -int String::Utf8Length(Isolate* v8_isolate) const { - auto str = Utils::OpenDirectHandle(this); - str = i::String::Flatten(reinterpret_cast(v8_isolate), str); - int length = str->length(); - if (length == 0) return 0; - i::DisallowGarbageCollection no_gc; - i::String::FlatContent flat = str->GetFlatContent(no_gc); - DCHECK(flat.IsFlat()); - int utf8_length = 0; - if (flat.IsOneByte()) { - for (uint8_t c : flat.ToOneByteVector()) { - utf8_length += c >> 7; - } - utf8_length += length; - } else { - int last_character = unibrow::Utf16::kNoPreviousCharacter; - for (uint16_t c : flat.ToUC16Vector()) { - utf8_length += unibrow::Utf8::Length(c, last_character); - last_character = c; - } - } - return utf8_length; -} - size_t String::Utf8LengthV2(Isolate* v8_isolate) const { auto str = Utils::OpenDirectHandle(this); return i::String::Utf8Length(reinterpret_cast(v8_isolate), str); } -namespace { -// Writes the flat content of a string to a buffer. This is done in two phases. -// The first phase calculates a pessimistic estimate (writable_length) on how -// many code units can be safely written without exceeding the buffer capacity -// and without leaving at a lone surrogate. The estimated number of code units -// is then written out in one go, and the reported byte usage is used to -// correct the estimate. This is repeated until the estimate becomes <= 0 or -// all code units have been written out. The second phase writes out code -// units until the buffer capacity is reached, would be exceeded by the next -// unit, or all code units have been written out. -template -static int WriteUtf8Impl(base::Vector string, char* write_start, - int write_capacity, int options, - int* utf16_chars_read_out) { - bool write_null = !(options & v8::String::NO_NULL_TERMINATION); - bool replace_invalid_utf8 = (options & v8::String::REPLACE_INVALID_UTF8); - char* current_write = write_start; - const Char* read_start = string.begin(); - int read_index = 0; - int read_length = string.length(); - int prev_char = unibrow::Utf16::kNoPreviousCharacter; - // Do a fast loop where there is no exit capacity check. - // Need enough space to write everything but one character. - static_assert(unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3); - static const int kMaxSizePerChar = sizeof(Char) == 1 ? 2 : 3; - while (read_index < read_length) { - int up_to = read_length; - if (write_capacity != -1) { - int remaining_capacity = - write_capacity - static_cast(current_write - write_start); - int writable_length = - (remaining_capacity - kMaxSizePerChar) / kMaxSizePerChar; - // Need to drop into slow loop. - if (writable_length <= 0) break; - up_to = std::min(up_to, read_index + writable_length); - } - // Write the characters to the stream. - if (sizeof(Char) == 1) { - // Simply memcpy if we only have ASCII characters. - uint8_t char_mask = 0; - for (int i = read_index; i < up_to; i++) char_mask |= read_start[i]; - if ((char_mask & 0x80) == 0) { - int copy_length = up_to - read_index; - memcpy(current_write, read_start + read_index, copy_length); - current_write += copy_length; - read_index = up_to; - } else { - for (; read_index < up_to; read_index++) { - current_write += unibrow::Utf8::EncodeOneByte( - current_write, static_cast(read_start[read_index])); - DCHECK(write_capacity == -1 || - (current_write - write_start) <= write_capacity); - } - } - } else { - for (; read_index < up_to; read_index++) { - uint16_t character = read_start[read_index]; - current_write += unibrow::Utf8::Encode(current_write, character, - prev_char, replace_invalid_utf8); - prev_char = character; - DCHECK(write_capacity == -1 || - (current_write - write_start) <= write_capacity); - } - } - } - if (read_index < read_length) { - DCHECK_NE(-1, write_capacity); - // Aborted due to limited capacity. Check capacity on each iteration. - int remaining_capacity = - write_capacity - static_cast(current_write - write_start); - DCHECK_GE(remaining_capacity, 0); - for (; read_index < read_length && remaining_capacity > 0; read_index++) { - uint32_t character = read_start[read_index]; - int written = 0; - // We can't use a local buffer here because Encode needs to modify - // previous characters in the stream. We know, however, that - // exactly one character will be advanced. - if (unibrow::Utf16::IsSurrogatePair(prev_char, character)) { - written = unibrow::Utf8::Encode(current_write, character, prev_char, - replace_invalid_utf8); - DCHECK_EQ(written, 1); - } else { - // Use a scratch buffer to check the required characters. - char temp_buffer[unibrow::Utf8::kMaxEncodedSize]; - // Encoding a surrogate pair to Utf8 always takes 4 bytes. - static const int kSurrogatePairEncodedSize = - static_cast(unibrow::Utf8::kMaxEncodedSize); - // For REPLACE_INVALID_UTF8, catch the case where we cut off in the - // middle of a surrogate pair. Abort before encoding the pair instead. - if (replace_invalid_utf8 && - remaining_capacity < kSurrogatePairEncodedSize && - unibrow::Utf16::IsLeadSurrogate(character) && - read_index + 1 < read_length && - unibrow::Utf16::IsTrailSurrogate(read_start[read_index + 1])) { - write_null = false; - break; - } - // Can't encode using prev_char as gcc has array bounds issues. - written = unibrow::Utf8::Encode(temp_buffer, character, - unibrow::Utf16::kNoPreviousCharacter, - replace_invalid_utf8); - if (written > remaining_capacity) { - // Won't fit. Abort and do not null-terminate the result. - write_null = false; - break; - } - // Copy over the character from temp_buffer. - for (int i = 0; i < written; i++) current_write[i] = temp_buffer[i]; - } - - current_write += written; - remaining_capacity -= written; - prev_char = character; - } - } - - // Write out number of utf16 characters written to the stream. - if (utf16_chars_read_out != nullptr) *utf16_chars_read_out = read_index; - - // Only null-terminate if there's space. - if (write_null && (write_capacity == -1 || - (current_write - write_start) < write_capacity)) { - *current_write++ = '\0'; - } - return static_cast(current_write - write_start); -} -} // anonymous namespace - -int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity, - int* nchars_ref, int options) const { - auto str = Utils::OpenDirectHandle(this); - i::Isolate* i_isolate = reinterpret_cast(v8_isolate); - ApiRuntimeCallStatsScope rcs_scope(i_isolate, RCCId::kAPI_String_WriteUtf8); - EnterV8NoScriptNoExceptionScope api_scope(i_isolate); - str = i::String::Flatten(i_isolate, str); - i::DisallowGarbageCollection no_gc; - i::String::FlatContent content = str->GetFlatContent(no_gc); - if (content.IsOneByte()) { - return WriteUtf8Impl(content.ToOneByteVector(), buffer, capacity, - options, nchars_ref); - } else { - return WriteUtf8Impl(content.ToUC16Vector(), buffer, capacity, - options, nchars_ref); - } -} - -template -static inline int WriteHelper(i::Isolate* i_isolate, const String* string, - CharType* buffer, int start, int length, - int options) { - ApiRuntimeCallStatsScope rcs_scope(i_isolate, RCCId::kAPI_String_Write); - EnterV8NoScriptNoExceptionScope api_scope(i_isolate); - DCHECK(start >= 0 && length >= -1); - auto str = Utils::OpenDirectHandle(string); - int end = start + length; - if ((length == -1) || (static_cast(length) > str->length() - start)) - end = str->length(); - if (end < 0) return 0; - int write_length = end - start; - if (start < end) i::String::WriteToFlat(*str, buffer, start, write_length); - if (!(options & String::NO_NULL_TERMINATION) && - (length == -1 || write_length < length)) { - buffer[write_length] = '\0'; - } - return write_length; -} - -int String::WriteOneByte(Isolate* v8_isolate, uint8_t* buffer, int start, - int length, int options) const { - return WriteHelper(reinterpret_cast(v8_isolate), this, buffer, - start, length, options); -} - -int String::Write(Isolate* v8_isolate, uint16_t* buffer, int start, int length, - int options) const { - return WriteHelper(reinterpret_cast(v8_isolate), this, buffer, - start, length, options); -} - template static inline void WriteHelperV2(i::Isolate* i_isolate, const String* string, CharType* buffer, uint32_t offset, @@ -7504,7 +7297,8 @@ void FunctionTemplate::SealAndPrepareForPromotionToReadOnly() { self); } -Local v8::External::New(Isolate* v8_isolate, void* value) { +Local v8::External::New(Isolate* v8_isolate, void* value, + ExternalPointerTypeTag api_tag) { static_assert(sizeof(value) == sizeof(i::Address)); // Nullptr is not allowed here because serialization/deserialization of // nullptr external api references is not possible as nullptr is used as an @@ -7514,15 +7308,23 @@ Local v8::External::New(Isolate* v8_isolate, void* value) { i::Isolate* i_isolate = reinterpret_cast(v8_isolate); ApiRuntimeCallStatsScope rcs_scope(i_isolate, RCCId::kAPI_External_New); EnterV8NoScriptNoExceptionScope api_scope(i_isolate); - i::DirectHandle external = - i_isolate->factory()->NewExternal(value); + uint16_t tag_value = static_cast(i::kFirstExternalTypeTag) + + static_cast(api_tag); + Utils::ApiCheck(tag_value <= i::kLastExternalTypeTag, "v8::External::New", + "The provided tag is outside the allowed range"); + i::DirectHandle external = i_isolate->factory()->NewExternal( + value, static_cast(tag_value)); return Utils::ExternalToLocal(external); } -void* External::Value() const { +void* External::Value(ExternalPointerTypeTag api_tag) const { i::IsolateForSandbox isolate = i::GetCurrentIsolateForSandbox(); + uint16_t tag_value = static_cast(i::kFirstExternalTypeTag) + + static_cast(api_tag); + Utils::ApiCheck(tag_value <= i::kLastExternalTypeTag, "v8::External::Value", + "The provided tag is outside the allowed range"); return i::Cast(*Utils::OpenDirectHandle(this)) - ->value(isolate); + ->value(isolate, static_cast(tag_value)); } Local v8::CppHeapExternal::NewImpl(Isolate* v8_isolate, @@ -9734,6 +9536,12 @@ void Isolate::HandleExternalMemoryInterrupt() { heap->HandleExternalMemoryInterrupt(); } +bool Isolate::RetryCustomAllocate(std::function allocate) { + i::Heap* heap = reinterpret_cast(this)->heap(); + return heap->allocator()->RetryCustomAllocate( + [&allocate]() { return allocate(); }, internal::AllocationType::kOld); +} + IsolateGroup::IsolateGroup(i::IsolateGroup*&& isolate_group) : isolate_group_(isolate_group) { DCHECK_NOT_NULL(isolate_group_); @@ -10861,7 +10669,6 @@ size_t Isolate::CopyCodePages(size_t capacity, MemoryRange* code_pages_out) { } CALLBACK_SETTER(FatalErrorHandler, FatalErrorCallback, exception_behavior) -CALLBACK_SETTER(OOMErrorHandler, OOMErrorCallback, oom_behavior) CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback, ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback) @@ -10906,6 +10713,22 @@ void Isolate::InstallConditionalFeatures(Local context) { #endif // V8_ENABLE_WEBASSEMBLY } +void Isolate::SetOOMErrorHandler(OOMErrorCallback callback) { + void* data = reinterpret_cast(callback); + SetOOMErrorHandler( + [](const char* location, const OOMDetails& details, void* data) { + reinterpret_cast(data)(location, details); + }, + data); +} + +void Isolate::SetOOMErrorHandler(OOMErrorCallbackWithData callback, + void* data) { + i::Isolate* i_isolate = reinterpret_cast(this); + i_isolate->set_oom_behavior(callback); + i_isolate->set_oom_callback_data(data); +} + void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback, void* data) { i::Isolate* i_isolate = reinterpret_cast(this); @@ -12107,13 +11930,14 @@ void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) { UNREACHABLE(); } -void WasmStreaming::Finish(bool can_use_compiled_module) { UNREACHABLE(); } +void WasmStreaming::Finish( + const WasmStreaming::ModuleCachingCallback& caching_callback) { + UNREACHABLE(); +} void WasmStreaming::Abort(MaybeLocal exception) { UNREACHABLE(); } -bool WasmStreaming::SetCompiledModuleBytes(const uint8_t* bytes, size_t size) { - UNREACHABLE(); -} +void WasmStreaming::SetHasCompiledModuleBytes() { UNREACHABLE(); } void WasmStreaming::SetMoreFunctionsCanBeSerializedCallback( std::function) { diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index 05647c347f1e6f..797a4eae58ce51 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -414,7 +414,7 @@ void AsmJsParser::ValidateModuleParameters() { // 6.1 ValidateModule - variables void AsmJsParser::ValidateModuleVars() { - while (Peek(TOK(var)) || Peek(TOK(const))) { + while (!failed_ && (Peek(TOK(var)) || Peek(TOK(const)))) { bool mutable_variable = true; if (Check(TOK(var))) { // Had a var. diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc index ae03396eea3dc4..bac12150af7987 100644 --- a/deps/v8/src/asmjs/asm-scanner.cc +++ b/deps/v8/src/asmjs/asm-scanner.cc @@ -101,13 +101,13 @@ void AsmJsScanner::Next() { switch (ch) { case ' ': case '\t': - case '\r': // Ignore whitespace. break; + case '\r': case '\n': - // Track when we've passed a newline for optional semicolon support, - // but keep scanning. + // Track when we've passed a line terminator for optional semicolon + // support, but keep scanning. preceded_by_newline_ = true; break; @@ -321,7 +321,7 @@ void AsmJsScanner::ConsumeNumber(base::uc32 ch) { token_ = '.'; return; } - // Decode numbers, with seperate paths for prefixes and implicit octals. + // Decode numbers, with separate paths for prefixes and implicit octals. if (has_prefix && number[0] == '0') { // "0[xob]" by itself is a parse error. if (number.size() <= 2) { @@ -391,7 +391,7 @@ bool AsmJsScanner::ConsumeCComment() { if (Consume('/')) return true; } - if (Consume('\n')) { + if (Consume('\n') || Consume('\r')) { preceded_by_newline_ = true; } else { Advance(); @@ -402,7 +402,7 @@ bool AsmJsScanner::ConsumeCComment() { void AsmJsScanner::ConsumeCPPComment() { while (HasMoreChars()) { - if (Consume('\n')) { + if (Consume('\n') || Consume('\r')) { preceded_by_newline_ = true; return; } diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc index bd79b016f79847..c38d79dfd57b51 100644 --- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc +++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc @@ -57,10 +57,11 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) { for (int i = 0; i < private_members->length(); ++i) { ClassLiteralProperty* prop = private_members->at(i); - // Private fields have their key and value present in + // Private fields and auto-accessors have their key and value present in // instance_members_initializer_function, so they will // already have been visited. - if (prop->kind() == ClassLiteralProperty::Kind::FIELD) { + if (prop->kind() == ClassLiteralProperty::Kind::FIELD || + prop->kind() == ClassLiteralProperty::Kind::AUTO_ACCESSOR) { CheckVisited(prop->value()); } else { Visit(prop->value()); diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h index 009779434c9163..0d6034920bede2 100644 --- a/deps/v8/src/ast/ast-traversal-visitor.h +++ b/deps/v8/src/ast/ast-traversal-visitor.h @@ -514,6 +514,13 @@ void AstTraversalVisitor::VisitInitializeClassMembersStatement( RECURSE(Visit(prop->key())); } RECURSE(Visit(prop->value())); + if (prop->is_auto_accessor()) { + // The generated getter and setter are created after the + // ClassLiteralProperty value is created, so we visit them in + // the same order. + RECURSE(Visit(prop->auto_accessor_info()->generated_getter())); + RECURSE(Visit(prop->auto_accessor_info()->generated_setter())); + } } } diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h index 117d02e442ee8b..11c005ddd435f0 100644 --- a/deps/v8/src/ast/ast-value-factory.h +++ b/deps/v8/src/ast/ast-value-factory.h @@ -252,19 +252,22 @@ using AstRawStringMap = F(bigint_string, "bigint") \ F(boolean_string, "boolean") \ F(computed_string, "") \ - F(dot_brand_string, ".brand") \ F(constructor_string, "constructor") \ F(default_string, "default") \ + F(defer_string, "defer") \ F(done_string, "done") \ + F(dot_brand_string, ".brand") \ + F(dot_catch_string, ".catch") \ F(dot_default_string, ".default") \ F(dot_for_string, ".for") \ F(dot_generator_object_string, ".generator_object") \ F(dot_home_object_string, ".home_object") \ - F(dot_result_string, ".result") \ + F(dot_new_target_string, ".new.target") \ F(dot_repl_result_string, ".repl_result") \ + F(dot_result_string, ".result") \ F(dot_static_home_object_string, ".static_home_object") \ F(dot_switch_tag_string, ".switch_tag") \ - F(dot_catch_string, ".catch") \ + F(dot_this_function_string, ".this_function") \ F(empty_string, "") \ F(eval_string, "eval") \ F(from_string, "from") \ @@ -274,7 +277,6 @@ using AstRawStringMap = F(let_string, "let") \ F(meta_string, "meta") \ F(native_string, "native") \ - F(new_target_string, ".new.target") \ F(next_string, "next") \ F(number_string, "number") \ F(object_string, "object") \ @@ -288,11 +290,9 @@ using AstRawStringMap = F(symbol_string, "symbol") \ F(target_string, "target") \ F(this_string, "this") \ - F(this_function_string, ".this_function") \ F(throw_string, "throw") \ F(undefined_string, "undefined") \ F(value_string, "value") - class AstStringConstants final { public: #define F(name, str) +1 diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index 480a5aab7096be..15e24dae4743ea 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -70,13 +70,13 @@ void SourceTextModuleDescriptor::AddImport( void SourceTextModuleDescriptor::AddStarImport( const AstRawString* local_name, const AstRawString* specifier, + const ModuleImportPhase import_phase, const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = zone->New(loc); entry->local_name = local_name; - entry->module_request = - AddModuleRequest(specifier, ModuleImportPhase::kEvaluation, - import_attributes, specifier_loc, zone); + entry->module_request = AddModuleRequest( + specifier, import_phase, import_attributes, specifier_loc, zone); AddNamespaceImport(entry, zone); } diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h index 8f5e1001a3b13a..50cf0e018ecebc 100644 --- a/deps/v8/src/ast/modules.h +++ b/deps/v8/src/ast/modules.h @@ -45,6 +45,7 @@ class SourceTextModuleDescriptor : public ZoneObject { // import * as x from "foo.js"; void AddStarImport(const AstRawString* local_name, const AstRawString* specifier, + const ModuleImportPhase import_phase, const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone); diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index f4cf7258f8828d..b3be1e57d2bc88 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -592,8 +592,15 @@ void CallPrinter::VisitTemplateLiteral(TemplateLiteral* node) { void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) { Print("import"); - if (node->phase() == ModuleImportPhase::kSource) { - Print(".source"); + switch (node->phase()) { + case ModuleImportPhase::kSource: + Print(".source"); + break; + case ModuleImportPhase::kDefer: + Print(".defer"); + break; + case ModuleImportPhase::kEvaluation: + break; } Print("("); Find(node->specifier(), true); diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 88a3955f547036..e406e9164590f7 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -822,16 +822,17 @@ void DeclarationScope::DeclareDefaultFunctionVariables( DeclareThis(ast_value_factory); bool was_added; - new_target_ = Declare(zone(), ast_value_factory->new_target_string(), + new_target_ = Declare(zone(), ast_value_factory->dot_new_target_string(), VariableMode::kConst, NORMAL_VARIABLE, kCreatedInitialized, kNotAssigned, &was_added); DCHECK(was_added); if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) || IsAccessorFunction(function_kind_)) { - EnsureRareData()->this_function = Declare( - zone(), ast_value_factory->this_function_string(), VariableMode::kConst, - NORMAL_VARIABLE, kCreatedInitialized, kNotAssigned, &was_added); + EnsureRareData()->this_function = + Declare(zone(), ast_value_factory->dot_this_function_string(), + VariableMode::kConst, NORMAL_VARIABLE, kCreatedInitialized, + kNotAssigned, &was_added); DCHECK(was_added); } } diff --git a/deps/v8/src/base/abort-mode.h b/deps/v8/src/base/abort-mode.h index 62879e4fce4f09..400c8f66fc5856 100644 --- a/deps/v8/src/base/abort-mode.h +++ b/deps/v8/src/base/abort-mode.h @@ -28,6 +28,11 @@ enum class AbortMode { kExitWithSuccessAndIgnoreDcheckFailures, kExitWithFailureAndIgnoreDcheckFailures, + // Used in combination with automated vulnerability discovery systems to + // ignore fatal errors that do not have any security impact. + // For everything else the default behavior is used. + kExitIfNoSecurityImpact, + // DCHECKs, CHECKs, etc. use IMMEDIATE_CRASH() to signal abnormal program // termination. See the --hard-abort flag for more details. kImmediateCrash, @@ -48,6 +53,10 @@ V8_INLINE bool DcheckFailuresAreIgnored() { g_abort_mode == AbortMode::kExitWithFailureAndIgnoreDcheckFailures; } +V8_INLINE bool FatalErrorsWithNoSecurityImpactShouldExit() { + return g_abort_mode == AbortMode::kExitIfNoSecurityImpact; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h index 8ab93476ba32ee..da2580e86bc981 100644 --- a/deps/v8/src/base/atomicops.h +++ b/deps/v8/src/base/atomicops.h @@ -5,55 +5,30 @@ #ifndef V8_BASE_ATOMICOPS_H_ #define V8_BASE_ATOMICOPS_H_ -// The routines exported by this module are subtle. If you use them, even if -// you get the code right, it will depend on careful reasoning about atomicity -// and memory ordering; it will be less readable, and harder to maintain. If -// you plan to use these routines, you should have a good reason, such as solid -// evidence that performance would otherwise suffer, or there being no -// alternative. You should assume only properties explicitly guaranteed by the -// specifications in this file. You are almost certainly _not_ writing code -// just for the x86; if you assume x86 semantics, x86 hardware bugs and -// implementations on other archtectures will cause your code to break. If you -// do not know what you are doing, avoid these routines, and use a Mutex. -// -// It is incorrect to make direct assignments to/from an atomic variable. -// You should use one of the Load or Store routines. The Relaxed versions -// are provided when no fences are needed: -// Relaxed_Store() -// Relaxed_Load() -// Although there are currently no compiler enforcement, you are encouraged -// to use these. -// - #include #include -// Small C++ header which defines implementation specific macros used to -// identify the STL implementation. -// - libc++: captures __config for _LIBCPP_VERSION -// - libstdc++: captures bits/c++config.h for __GLIBCXX__ -#include - #include "src/base/base-export.h" -#include "src/base/build_config.h" #include "src/base/macros.h" #if defined(V8_OS_STARBOARD) #include "starboard/atomic.h" #endif // V8_OS_STARBOARD -namespace v8 { -namespace base { +namespace v8::base { #ifdef V8_OS_STARBOARD + using Atomic8 = SbAtomic8; using Atomic16 = int16_t; using Atomic32 = SbAtomic32; #if SB_IS_64_BIT using Atomic64 = SbAtomic64; #endif + #else + using Atomic8 = char; using Atomic16 = int16_t; using Atomic32 = int32_t; @@ -66,6 +41,7 @@ using Atomic64 = int64_t; using Atomic64 = intptr_t; #endif // defined(__ILP32__) #endif // defined(V8_HOST_ARCH_64_BIT) + #endif // V8_OS_STARBOARD // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or @@ -77,21 +53,18 @@ using AtomicWord = Atomic32; #endif static_assert(sizeof(void*) == sizeof(AtomicWord)); -namespace helper { -template -volatile std::atomic* to_std_atomic(volatile T* ptr) { - return reinterpret_cast*>(ptr); -} -template -volatile const std::atomic* to_std_atomic_const(volatile const T* ptr) { - return reinterpret_cast*>(ptr); -} -} // namespace helper - inline void SeqCst_MemoryFence() { std::atomic_thread_fence(std::memory_order_seq_cst); } +template +concept AtomicTypeForTrivialOperations = +#if defined(V8_HOST_ARCH_64_BIT) + std::is_same_v || +#endif + std::is_same_v || std::is_same_v || + std::is_same_v; + // Atomically execute: // result = *ptr; // if (result == old_value) @@ -101,287 +74,106 @@ inline void SeqCst_MemoryFence() { // I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|. // Always return the value of |*ptr| before the operation. // Acquire, Relaxed, Release correspond to standard C++ memory orders. -inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, - Atomic8 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); +template +inline std::type_identity_t Relaxed_CompareAndSwap( + T* ptr, std::type_identity_t old_value, + std::type_identity_t new_value) { + std::atomic_ref(*ptr).compare_exchange_strong(old_value, new_value, + std::memory_order_relaxed); return old_value; } -inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, - Atomic16 old_value, Atomic16 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); +template +inline std::type_identity_t AcquireRelease_CompareAndSwap( + T* ptr, std::type_identity_t old_value, + std::type_identity_t new_value) { + std::atomic_ref(*ptr).compare_exchange_strong(old_value, new_value, + std::memory_order_acq_rel, + std::memory_order_acquire); return old_value; } -inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); +template +inline std::type_identity_t Release_CompareAndSwap( + T* ptr, std::type_identity_t old_value, + std::type_identity_t new_value) { + std::atomic_ref(*ptr).compare_exchange_strong(old_value, new_value, + std::memory_order_release, + std::memory_order_relaxed); return old_value; } -inline Atomic8 Relaxed_FetchOr(volatile Atomic8* ptr, Atomic8 bits) { - auto old = helper::to_std_atomic(ptr); - return old->fetch_or(bits, std::memory_order_relaxed); -} - -inline Atomic16 Relaxed_FetchOr(volatile Atomic16* ptr, Atomic16 bits) { - auto old = helper::to_std_atomic(ptr); - return old->fetch_or(bits, std::memory_order_relaxed); -} - -inline Atomic32 Relaxed_FetchOr(volatile Atomic32* ptr, Atomic32 bits) { - auto old = helper::to_std_atomic(ptr); - return old->fetch_or(bits, std::memory_order_relaxed); -} - -inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, - std::memory_order_relaxed); -} - -inline Atomic32 SeqCst_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, - std::memory_order_seq_cst); -} - -inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr), - increment, - std::memory_order_relaxed); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acquire, std::memory_order_acquire); - return old_value; -} - -inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, - Atomic8 new_value) { - bool result = atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_release, std::memory_order_relaxed); - USE(result); // Make gcc compiler happy. - return old_value; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_release, std::memory_order_relaxed); - return old_value; -} - -inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acq_rel, std::memory_order_acquire); - return old_value; -} - -inline Atomic32 SeqCst_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_seq_cst, std::memory_order_seq_cst); - return old_value; -} - -inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); -} - -inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); -} - -inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); -} - -inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_release); -} - -inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_release); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_release); -} - -inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_seq_cst); -} - -inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_seq_cst); -} - -inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_seq_cst); -} - -inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_acquire); -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_acquire); -} - -inline Atomic8 SeqCst_Load(volatile const Atomic8* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_seq_cst); -} - -inline Atomic32 SeqCst_Load(volatile const Atomic32* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_seq_cst); -} - -#if defined(V8_HOST_ARCH_64_BIT) - -inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); +template +inline std::type_identity_t SeqCst_CompareAndSwap( + T* ptr, std::type_identity_t old_value, + std::type_identity_t new_value) { + std::atomic_ref(*ptr).compare_exchange_strong(old_value, new_value, + std::memory_order_seq_cst, + std::memory_order_seq_cst); return old_value; } -inline Atomic64 Relaxed_FetchOr(volatile Atomic64* ptr, Atomic64 bits) { - auto old = helper::to_std_atomic(ptr); - return old->fetch_or(bits, std::memory_order_relaxed); +template +inline std::type_identity_t Relaxed_AtomicExchange( + T* ptr, std::type_identity_t new_value) { + return std::atomic_ref(*ptr).exchange(new_value, + std::memory_order_relaxed); } -inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, - std::memory_order_relaxed); +template +inline std::type_identity_t SeqCst_AtomicExchange( + T* ptr, std::type_identity_t new_value) { + return std::atomic_ref(*ptr).exchange(new_value, + std::memory_order_seq_cst); } -inline Atomic64 SeqCst_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, - std::memory_order_seq_cst); +template +inline std::type_identity_t Relaxed_FetchOr(T* ptr, + std::type_identity_t bits) { + return std::atomic_ref(*ptr).fetch_or(bits, std::memory_order_relaxed); } -inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr), - increment, - std::memory_order_relaxed); +template +inline std::type_identity_t Relaxed_AtomicIncrement( + T* ptr, std::type_identity_t increment) { + return increment + std::atomic_ref(*ptr).fetch_add( + increment, std::memory_order_relaxed); } -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acquire, std::memory_order_acquire); - return old_value; +template +inline void Relaxed_Store(T* ptr, std::type_identity_t value) { + std::atomic_ref(*ptr).store(value, std::memory_order_relaxed); } -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_release, std::memory_order_relaxed); - return old_value; +template +inline void Release_Store(T* ptr, std::type_identity_t value) { + std::atomic_ref(*ptr).store(value, std::memory_order_release); } -inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acq_rel, std::memory_order_acquire); - return old_value; +template +inline void SeqCst_Store(T* ptr, std::type_identity_t value) { + std::atomic_ref(*ptr).store(value, std::memory_order_seq_cst); } -inline Atomic64 SeqCst_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_seq_cst, std::memory_order_seq_cst); - return old_value; -} - -inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); +template +inline T Relaxed_Load(const T* ptr) { + return std::atomic_ref(*const_cast(ptr)) + .load(std::memory_order_relaxed); } -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_release); +template +inline T Acquire_Load(const T* ptr) { + return std::atomic_ref(*const_cast(ptr)) + .load(std::memory_order_acquire); } -inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_seq_cst); +template +inline T SeqCst_Load(const T* ptr) { + return std::atomic_ref(*const_cast(ptr)) + .load(std::memory_order_seq_cst); } -inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_acquire); -} - -inline Atomic64 SeqCst_Load(volatile const Atomic64* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_seq_cst); -} - -#endif // defined(V8_HOST_ARCH_64_BIT) - -inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src, - size_t bytes) { +inline void Relaxed_Memcpy(Atomic8* dst, const Atomic8* src, size_t bytes) { constexpr size_t kAtomicWordSize = sizeof(AtomicWord); while (bytes > 0 && !IsAligned(reinterpret_cast(dst), kAtomicWordSize)) { @@ -391,9 +183,8 @@ inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src, if (IsAligned(reinterpret_cast(src), kAtomicWordSize) && IsAligned(reinterpret_cast(dst), kAtomicWordSize)) { while (bytes >= kAtomicWordSize) { - Relaxed_Store( - reinterpret_cast(dst), - Relaxed_Load(reinterpret_cast(src))); + Relaxed_Store(reinterpret_cast(dst), + Relaxed_Load(reinterpret_cast(src))); dst += kAtomicWordSize; src += kAtomicWordSize; bytes -= kAtomicWordSize; @@ -405,8 +196,7 @@ inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src, } } -inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src, - size_t bytes) { +inline void Relaxed_Memmove(Atomic8* dst, const Atomic8* src, size_t bytes) { // Use Relaxed_Memcpy if copying forwards is safe. This is the case if there // is no overlap, or {dst} lies before {src}. // This single check checks for both: @@ -431,9 +221,8 @@ inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src, dst -= kAtomicWordSize; src -= kAtomicWordSize; bytes -= kAtomicWordSize; - Relaxed_Store( - reinterpret_cast(dst), - Relaxed_Load(reinterpret_cast(src))); + Relaxed_Store(reinterpret_cast(dst), + Relaxed_Load(reinterpret_cast(src))); } } while (bytes > 0) { @@ -464,8 +253,7 @@ inline int MemcmpNotEqualFundamental(AtomicWord u1, AtomicWord u2) { } } // namespace helper -inline int Relaxed_Memcmp(volatile const Atomic8* s1, - volatile const Atomic8* s2, size_t len) { +inline int Relaxed_Memcmp(const Atomic8* s1, const Atomic8* s2, size_t len) { constexpr size_t kAtomicWordSize = sizeof(AtomicWord); while (len > 0 && !(IsAligned(reinterpret_cast(s1), kAtomicWordSize) && @@ -479,10 +267,8 @@ inline int Relaxed_Memcmp(volatile const Atomic8* s1, if (IsAligned(reinterpret_cast(s1), kAtomicWordSize) && IsAligned(reinterpret_cast(s2), kAtomicWordSize)) { while (len >= kAtomicWordSize) { - AtomicWord u1 = - Relaxed_Load(reinterpret_cast(s1)); - AtomicWord u2 = - Relaxed_Load(reinterpret_cast(s2)); + AtomicWord u1 = Relaxed_Load(reinterpret_cast(s1)); + AtomicWord u2 = Relaxed_Load(reinterpret_cast(s2)); if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2); s1 += kAtomicWordSize; s2 += kAtomicWordSize; @@ -500,7 +286,6 @@ inline int Relaxed_Memcmp(volatile const Atomic8* s1, return 0; } -} // namespace base -} // namespace v8 +} // namespace v8::base #endif // V8_BASE_ATOMICOPS_H_ diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h index 56fb967fb446a7..f53dee47dd9aeb 100644 --- a/deps/v8/src/base/bits.h +++ b/deps/v8/src/base/bits.h @@ -262,14 +262,13 @@ inline constexpr size_t RoundUpToPowerOfTwo(size_t value) { // RoundDownToPowerOfTwo32(value) returns the greatest power of two which is // less than or equal to |value|. If you pass in a |value| that is already a // power of two, it is returned as is. -inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) { +inline constexpr uint32_t RoundDownToPowerOfTwo32(uint32_t value) { if (value > 0x80000000u) return 0x80000000u; uint32_t result = RoundUpToPowerOfTwo32(value); if (result > value) result >>= 1; return result; } - // Precondition: 0 <= shift < 32 inline constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift) { return (value >> shift) | (value << ((32 - shift) & 31)); diff --git a/deps/v8/src/base/container-utils.h b/deps/v8/src/base/container-utils.h index 58857dacda615d..ae9dc07eb4136f 100644 --- a/deps/v8/src/base/container-utils.h +++ b/deps/v8/src/base/container-utils.h @@ -10,15 +10,32 @@ #include #include #include +#include #include namespace v8::base { +namespace detail { +template +struct has_find : std::false_type {}; + +template +struct has_find< + T, U, std::void_t().find(std::declval()))>> + : std::true_type {}; + +template +constexpr bool has_find_v = detail::has_find::value; +} // namespace detail // Returns true iff the {element} is found in the {container}. template bool contains(const C& container, const T& element) { const auto e = std::end(container); - return std::find(std::begin(container), e, element) != e; + if constexpr (detail::has_find_v) { + return container.find(element) != e; + } else { + return std::find(std::begin(container), e, element) != e; + } } // Returns the first index of {element} in {container}. Returns std::nullopt if diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.cc b/deps/v8/src/base/emulated-virtual-address-subspace.cc index 091d5bfa93cfca..50fa9190e65b0a 100644 --- a/deps/v8/src/base/emulated-virtual-address-subspace.cc +++ b/deps/v8/src/base/emulated-virtual-address-subspace.cc @@ -174,7 +174,8 @@ std::unique_ptr EmulatedVirtualAddressSubspace::AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key) { + std::optional key, + PlatformSharedMemoryHandle handle) { UNIMPLEMENTED(); } diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.h b/deps/v8/src/base/emulated-virtual-address-subspace.h index 1b31c2ddddc13a..ddceb7b4e36d23 100644 --- a/deps/v8/src/base/emulated-virtual-address-subspace.h +++ b/deps/v8/src/base/emulated-virtual-address-subspace.h @@ -73,7 +73,8 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key) override; + std::optional key = std::nullopt, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle) override; bool RecommitPages(Address address, size_t size, PagePermissions permissions) override; diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc index 54aa99f02d7fbb..ac36b2e585407c 100644 --- a/deps/v8/src/base/logging.cc +++ b/deps/v8/src/base/logging.cc @@ -89,16 +89,33 @@ void FatalOOM(OOMType type, const char* msg) { OS::PrintError("\n\n#\n# Fatal %s out of memory: %s\n#", type_str, msg); if (g_print_stack_trace) v8::base::g_print_stack_trace(); + fflush(stderr); + if (FatalErrorsWithNoSecurityImpactShouldExit()) { + OS::ExitProcess(-1); + } else { + OS::Abort(); + } +} -#ifdef V8_FUZZILLI - // When fuzzing, we generally want to ignore OOM failures. - // It's important that we exit with a non-zero exit status here so that the - // fuzzer treats it as a failed execution. - _exit(1); -#else - OS::Abort(); -#endif // V8_FUZZILLI +void FatalNoSecurityImpact(const char* format, ...) { + OS::PrintError("\n\n#\n# Fatal error with no security impact:\n# "); + + va_list arguments; + va_start(arguments, format); + v8::base::OS::VPrintError(format, arguments); + va_end(arguments); + + OS::PrintError("\n#\n"); + + if (g_print_stack_trace) v8::base::g_print_stack_trace(); + + fflush(stderr); + if (FatalErrorsWithNoSecurityImpactShouldExit()) { + OS::ExitProcess(-1); + } else { + OS::Abort(); + } } // Define specialization to pretty print characters (escaping non-printable diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h index 722c6cdabda434..7d77327183d0a7 100644 --- a/deps/v8/src/base/logging.h +++ b/deps/v8/src/base/logging.h @@ -105,6 +105,16 @@ enum class OOMType { // recognizes as such by fuzzers and other tooling. [[noreturn]] V8_BASE_EXPORT void FatalOOM(OOMType type, const char* msg); +// A variant of Fatal that makes it clear that the failure does not have any +// security impact. This is useful for automatic vulnerability discover systems +// (e.g. fuzzers) to ignore or discard such crashes. +// +// USE WITH CARE! Using this function means that fuzzers will *not* report +// situations in which the function is reached. Legitimate use cases include +// expected crashes due to misconfigurations (e.g. invalid runtime flags) or +// invalid use of (debug-only) runtime functions exposed to JS. +[[noreturn]] V8_BASE_EXPORT void FatalNoSecurityImpact(const char* format, ...); + // In official builds, assume all check failures can be debugged given just the // stack trace. #if !defined(DEBUG) && defined(OFFICIAL_BUILD) @@ -211,12 +221,6 @@ auto GetUnderlyingEnumTypeForPrinting(T val) { } // namespace detail -// Define default PrintCheckOperand for non-printable types. -template -std::string PrintCheckOperand(T val) { - return ""; -} - // Define PrintCheckOperand for each T which defines operator<< for ostream, // except types explicitly specialized below. template @@ -265,21 +269,25 @@ std::string PrintCheckOperand(T val) { template requires(!has_output_operator && requires(T t) { - { t.begin() } -> std::forward_iterator; + { std::begin(t) } -> std::forward_iterator; }) -std::string PrintCheckOperand(T container) { +std::string PrintCheckOperand(const T& container) { CheckMessageStream oss; - oss << "{"; - bool first = true; - for (const auto& val : container) { - if (!first) { - oss << ","; - } else { - first = false; + const size_t size = std::size(container); + oss << size << " element" << (size == 1 ? "" : "s"); + if constexpr (requires(const T& t) { PrintCheckOperand(*std::begin(t)); }) { + oss << ": {"; + bool first = true; + for (const auto& val : container) { + if (!first) { + oss << ","; + } else { + first = false; + } + oss << PrintCheckOperand(val); } - oss << PrintCheckOperand(val); + oss << "}"; } - oss << "}"; return oss.str(); } @@ -303,16 +311,27 @@ DEFINE_PRINT_CHECK_OPERAND_CHAR(unsigned char) // takes ownership of the returned string. template V8_NOINLINE std::string* MakeCheckOpString(Lhs lhs, Rhs rhs, char const* msg) { - std::string lhs_str = PrintCheckOperand(lhs); - std::string rhs_str = PrintCheckOperand(rhs); + constexpr bool kLhsIsPrintable = requires { PrintCheckOperand(lhs); }; + constexpr bool kRhsIsPrintable = requires { PrintCheckOperand(rhs); }; + CheckMessageStream ss; ss << msg; - constexpr size_t kMaxInlineLength = 50; - if (lhs_str.size() <= kMaxInlineLength && - rhs_str.size() <= kMaxInlineLength) { - ss << " (" << lhs_str << " vs. " << rhs_str << ")"; - } else { - ss << "\n " << lhs_str << "\n vs.\n " << rhs_str << "\n"; + if constexpr (kLhsIsPrintable || kRhsIsPrintable) { + std::string tmp_lhs_str; + std::string tmp_rhs_str; + if constexpr (kLhsIsPrintable) tmp_lhs_str = PrintCheckOperand(lhs); + if constexpr (kRhsIsPrintable) tmp_rhs_str = PrintCheckOperand(rhs); + std::string_view lhs_str{kLhsIsPrintable ? std::string_view{tmp_lhs_str} + : ""}; + std::string_view rhs_str{kRhsIsPrintable ? std::string_view{tmp_rhs_str} + : ""}; + + constexpr size_t kMaxInlineLength = 50; + if (std::max(lhs_str.size(), rhs_str.size()) <= kMaxInlineLength) { + ss << " (" << lhs_str << " vs. " << rhs_str << ")"; + } else { + ss << "\n " << lhs_str << "\n vs.\n " << rhs_str << "\n"; + } } return new std::string(ss.str()); } diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index 1af8b12e8d833d..46a734a0151574 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -266,7 +266,9 @@ void OS::Initialize(AbortMode abort_mode, const char* const gc_fake_mmap) { // static void* OS::Allocate(void* address, size_t size, size_t alignment, - MemoryPermission access) { + MemoryPermission access, PlatformSharedMemoryHandle handle) { + // File handles aren't supported. + DCHECK_EQ(handle, kInvalidSharedMemoryHandle); PlacementMode placement = address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere; return CreateAndMapVmo(*zx::vmar::root_self(), g_root_vmar_base, @@ -337,8 +339,8 @@ bool OS::CanReserveAddressSpace() { return true; } // static std::optional OS::CreateAddressSpaceReservation( - void* hint, size_t size, size_t alignment, - MemoryPermission max_permission) { + void* hint, size_t size, size_t alignment, MemoryPermission max_permission, + PlatformSharedMemoryHandle handle) { DCHECK_EQ(0, reinterpret_cast
(hint) % alignment); zx::vmar child; zx_vaddr_t child_addr; diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index b1de382bd1c07d..d3cd1edf441186 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -129,15 +129,6 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator, static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER; #if !V8_OS_FUCHSIA && !V8_OS_ZOS -#if V8_OS_DARWIN -// kMmapFd is used to pass vm_alloc flags to tag the region with the user -// defined tag 255 This helps identify V8-allocated regions in memory analysis -// tools like vmmap(1). -const int kMmapFd = VM_MAKE_TAG(255); -#else // !V8_OS_DARWIN -const int kMmapFd = -1; -#endif // !V8_OS_DARWIN - #if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64 // During snapshot generation in cross builds, sysconf() runs on the Intel // host and returns host page size, while the snapshot needs to use the @@ -149,9 +140,16 @@ const int kMmapFdOffset = 0; enum class PageType { kShared, kPrivate }; -int GetFlagsForMemoryPermission(OS::MemoryPermission access, - PageType page_type) { - int flags = MAP_ANONYMOUS; +int GetFlagsForMemoryPermission(OS::MemoryPermission access, PageType page_type, + PlatformSharedMemoryHandle handle, + bool fixed = false) { + int flags = 0; + if (handle == kInvalidSharedMemoryHandle) { + flags |= MAP_ANONYMOUS; + } + if (fixed) { + flags |= MAP_FIXED; + } flags |= (page_type == PageType::kShared) ? MAP_SHARED : MAP_PRIVATE; if (access == OS::MemoryPermission::kNoAccess || access == OS::MemoryPermission::kNoAccessWillJitLater) { @@ -176,10 +174,20 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, } void* Allocate(void* hint, size_t size, OS::MemoryPermission access, - PageType page_type) { + PageType page_type, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle, + bool fixed = false) { int prot = GetProtectionFromMemoryPermission(access); - int flags = GetFlagsForMemoryPermission(access, page_type); - void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset); + int flags = GetFlagsForMemoryPermission(access, page_type, handle, fixed); +#if V8_OS_DARWIN + // fd is used to pass vm_alloc flags to tag the region with the user + // defined tag 255 This helps identify V8-allocated regions in memory analysis + // tools like vmmap(1). + int fd = VM_MAKE_TAG(255); +#else + int fd = FileDescriptorFromSharedMemoryHandle(handle); +#endif + void* result = mmap(hint, size, prot, flags, fd, kMmapFdOffset); if (result == MAP_FAILED) return nullptr; #if V8_OS_LINUX && V8_ENABLE_PRIVATE_MAPPING_FORK_OPTIMIZATION @@ -469,7 +477,7 @@ void* OS::GetRandomMmapAddr() { #if !V8_OS_ZOS // static void* OS::Allocate(void* hint, size_t size, size_t alignment, - MemoryPermission access) { + MemoryPermission access, PlatformSharedMemoryHandle handle) { size_t page_size = AllocatePageSize(); DCHECK_EQ(0, size % page_size); DCHECK_EQ(0, alignment % page_size); @@ -477,7 +485,8 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, // Add the maximum misalignment so we are guaranteed an aligned base address. size_t request_size = size + (alignment - page_size); request_size = RoundUp(request_size, OS::AllocatePageSize()); - void* result = base::Allocate(hint, request_size, access, PageType::kPrivate); + PageType page_type = PageType::kPrivate; + void* result = base::Allocate(hint, request_size, access, page_type, handle); if (result == nullptr) return nullptr; // Unmap memory allocated before the aligned base address. @@ -499,6 +508,17 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, } DCHECK_EQ(size, request_size); + + if (aligned_base != base && handle != kInvalidSharedMemoryHandle) { + // We have to remap because the base of mapping must correspond to the base + // of the the underlying file. + uint8_t* new_base = reinterpret_cast(base::Allocate( + aligned_base, size, access, page_type, handle, true /* fixed */)); + if (new_base != aligned_base) { + return nullptr; + } + } + return static_cast(aligned_base); } @@ -687,19 +707,19 @@ bool OS::CanReserveAddressSpace() { return true; } // static std::optional OS::CreateAddressSpaceReservation( - void* hint, size_t size, size_t alignment, - MemoryPermission max_permission) { + void* hint, size_t size, size_t alignment, MemoryPermission max_permission, + PlatformSharedMemoryHandle handle) { // On POSIX, address space reservations are backed by private memory mappings. MemoryPermission permission = MemoryPermission::kNoAccess; if (max_permission == MemoryPermission::kReadWriteExecute) { permission = MemoryPermission::kNoAccessWillJitLater; } - void* reservation = Allocate(hint, size, alignment, permission); + void* reservation = Allocate(hint, size, alignment, permission, handle); if (!reservation && permission == MemoryPermission::kNoAccessWillJitLater) { // Retry without MAP_JIT, for example in case we are running on an old OS X. permission = MemoryPermission::kNoAccess; - reservation = Allocate(hint, size, alignment, permission); + reservation = Allocate(hint, size, alignment, permission, handle); } if (!reservation) return {}; @@ -779,6 +799,7 @@ void OS::Abort() { _exit(-1); case AbortMode::kImmediateCrash: IMMEDIATE_CRASH(); + case AbortMode::kExitIfNoSecurityImpact: case AbortMode::kDefault: break; } diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 11402689fe9395..60e255af03ec3d 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -995,7 +995,10 @@ void CheckIsOOMError(int error) { // static void* OS::Allocate(void* hint, size_t size, size_t alignment, - MemoryPermission access) { + MemoryPermission access, PlatformSharedMemoryHandle handle) { + // File handles aren't supported. + DCHECK_EQ(handle, kInvalidSharedMemoryHandle); + size_t page_size = AllocatePageSize(); DCHECK_EQ(0, size % page_size); DCHECK_EQ(0, alignment % page_size); @@ -1138,8 +1141,10 @@ bool OS::CanReserveAddressSpace() { // static std::optional OS::CreateAddressSpaceReservation( - void* hint, size_t size, size_t alignment, - MemoryPermission max_permission) { + void* hint, size_t size, size_t alignment, MemoryPermission max_permission, + PlatformSharedMemoryHandle handle) { + // File handles aren't supported. + DCHECK_EQ(handle, kInvalidSharedMemoryHandle); CHECK(CanReserveAddressSpace()); size_t page_size = AllocatePageSize(); @@ -1256,6 +1261,7 @@ void OS::Abort() { ExitProcess(-1); case AbortMode::kImmediateCrash: IMMEDIATE_CRASH(); + case AbortMode::kExitIfNoSecurityImpact: case AbortMode::kDefault: break; } diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 53d1042e0a380f..d0bb7d6154eda9 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -382,9 +382,9 @@ class V8_BASE_EXPORT OS { static void* GetRandomMmapAddr(); - V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size, - size_t alignment, - MemoryPermission access); + V8_WARN_UNUSED_RESULT static void* Allocate( + void* address, size_t size, size_t alignment, MemoryPermission access, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle); V8_WARN_UNUSED_RESULT static void* AllocateShared(size_t size, MemoryPermission access); @@ -419,8 +419,10 @@ class V8_BASE_EXPORT OS { V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace(); V8_WARN_UNUSED_RESULT static std::optional - CreateAddressSpaceReservation(void* hint, size_t size, size_t alignment, - MemoryPermission max_permission); + CreateAddressSpaceReservation( + void* hint, size_t size, size_t alignment, + MemoryPermission max_permission, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle); static void FreeAddressSpaceReservation(AddressSpaceReservation reservation); diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc index 85ede6ba33acd4..38bd6af4fa3d49 100644 --- a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc +++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc @@ -65,9 +65,10 @@ void LsanVirtualAddressSpace::FreeSharedPages(Address address, size_t size) { std::unique_ptr LsanVirtualAddressSpace::AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key) { - auto subspace = - vas_->AllocateSubspace(hint, size, alignment, max_page_permissions, key); + std::optional key, + PlatformSharedMemoryHandle handle) { + auto subspace = vas_->AllocateSubspace(hint, size, alignment, + max_page_permissions, key, handle); #if defined(LEAK_SANITIZER) if (subspace) { subspace = std::make_unique(std::move(subspace)); diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h index 8bef3322a4c4ec..8a35c380257697 100644 --- a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h +++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h @@ -70,7 +70,8 @@ class V8_BASE_EXPORT LsanVirtualAddressSpace final std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key) override; + std::optional key = std::nullopt, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle) override; bool DiscardSystemPages(Address address, size_t size) override { return vas_->DiscardSystemPages(address, size); diff --git a/deps/v8/src/base/sanitizer/msan.h b/deps/v8/src/base/sanitizer/msan.h index 5ae40c3abbe75e..a6282363da1240 100644 --- a/deps/v8/src/base/sanitizer/msan.h +++ b/deps/v8/src/base/sanitizer/msan.h @@ -24,6 +24,9 @@ #define DISABLE_MSAN __attribute__((no_sanitize("memory"))) +#define MSAN_CHECK_MEM_IS_INITIALIZED(start, byte_size) \ + __msan_check_mem_is_initialized(start, byte_size) + #else // !V8_USE_MEMORY_SANITIZER #define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) \ @@ -39,6 +42,12 @@ #define DISABLE_MSAN +#define MSAN_CHECK_MEM_IS_INITIALIZED(start, byte_size) \ + static_assert(std::is_pointer_v, "static type violation"); \ + static_assert(std::is_convertible_v, \ + "static type violation"); \ + USE(start, byte_size) + #endif // V8_USE_MEMORY_SANITIZER #endif // V8_BASE_SANITIZER_MSAN_H_ diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index 0ff77ff83b9244..23cb27c7652abb 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -20,6 +20,11 @@ namespace base { // dynamic storage when it overflows. template > class SmallVector { + // TODO(mliedtke): Remove kHasTrivialElement and replace usages with the + // proper conditions. + static constexpr bool kHasTrivialElement = + is_trivially_copyable::value && is_trivially_destructible::value; + public: static constexpr size_t kInlineSize = kSize; using value_type = T; @@ -32,8 +37,13 @@ class SmallVector { SmallVector() = default; explicit SmallVector(const Allocator& allocator) : allocator_(allocator) {} + // Constructs a SmallVector with `size` elements. These elements will be + // default-initialized(!), differently to e.g. `std::vector`. If + // value-initialization is desired, use the constructor overload with an + // explicit `initial_value` instead. explicit V8_INLINE SmallVector(size_t size, const Allocator& allocator = Allocator()) + requires std::default_initializable : allocator_(allocator) { resize(size); } @@ -249,22 +259,35 @@ class SmallVector { DCHECK_LE(erase_start, erase_end); DCHECK_LE(erase_end, end_); T* new_end = std::move(erase_end, end_, erase_start); - std::destroy_n(new_end, std::distance(new_end, end_)); + std::destroy(new_end, end_); end_ = new_end; return erase_start; } T* erase(T* pos) { return erase(pos, pos + 1); } - void resize(size_t new_size) { + // Resizes the SmallVector to the provided `new_size`. If `new_size` is larger + // than the current size, the new elements will not be default-initialized, + // (meaning the objects will only be allocated, not constructed.) + // This is only valid if `T` is an implicit lifetime type. + void resize_no_init(size_t new_size) + requires kHasTrivialElement + { + if (new_size > capacity()) Grow(new_size); + end_ = begin_ + new_size; + } + + // Resizes the SmallVector to the provided `new_size`. If `new_size` is larger + // than the current size, the new elements will be default-initialized. + void resize(size_t new_size) + requires std::default_initializable + { if (new_size > capacity()) Grow(new_size); T* new_end = begin_ + new_size; - if constexpr (!kHasTrivialElement) { - if (new_end > end_) { - std::uninitialized_default_construct(end_, new_end); - } else { - std::destroy_n(new_end, end_ - new_end); - } + if (new_end > end_) { + std::uninitialized_default_construct(end_, new_end); + } else { + std::destroy(new_end, end_); } end_ = new_end; } @@ -275,7 +298,7 @@ class SmallVector { if (new_end > end_) { std::uninitialized_fill(end_, new_end, initial_value); } else { - std::destroy_n(new_end, end_ - new_end); + std::destroy(new_end, end_); } end_ = new_end; } @@ -286,7 +309,7 @@ class SmallVector { // Clear without reverting back to inline storage. void clear() { - std::destroy_n(begin_, end_ - begin_); + std::destroy(begin_, end_); end_ = begin_; } @@ -318,7 +341,7 @@ class SmallVector { } V8_NOINLINE V8_PRESERVE_MOST void FreeStorage() { - std::destroy_n(begin_, end_ - begin_); + std::destroy(begin_, end_); if (is_big()) allocator_.deallocate(begin_, end_of_storage_ - begin_); } @@ -326,7 +349,7 @@ class SmallVector { // internal use only. void reset_to_inline_storage() { if constexpr (!kHasTrivialElement) { - if (!is_big()) std::destroy_n(begin_, end_ - begin_); + if (!is_big()) std::destroy(begin_, end_); } begin_ = inline_storage_begin(); end_ = begin_; @@ -352,9 +375,6 @@ class SmallVector { T* end_ = begin_; T* end_of_storage_ = begin_ + kInlineSize; alignas(T) char inline_storage_[sizeof(T) * kInlineSize]; - - static constexpr bool kHasTrivialElement = - is_trivially_copyable::value && is_trivially_destructible::value; }; } // namespace base diff --git a/deps/v8/src/base/string-format.h b/deps/v8/src/base/string-format.h index 9532c1c91a38b3..3a3cd3fb8bc552 100644 --- a/deps/v8/src/base/string-format.h +++ b/deps/v8/src/base/string-format.h @@ -185,6 +185,37 @@ class FormattedString { // explicitly declare template parameters anyway. FormattedString()->FormattedString<>; +class JSONEscaped { + public: + template + explicit JSONEscaped(const T& value) { + std::ostringstream s; + s << value; + str_ = s.str(); + } + explicit JSONEscaped(std::string str) : str_(std::move(str)) {} + explicit JSONEscaped(const std::ostringstream& os) : str_(os.str()) {} + + friend std::ostream& operator<<(std::ostream& os, const JSONEscaped& e) { + for (char c : e.str_) PipeCharacter(os, c); + return os; + } + + private: + static std::ostream& PipeCharacter(std::ostream& os, char c) { + if (c == '"') return os << "\\\""; + if (c == '\\') return os << "\\\\"; + if (c == '\b') return os << "\\b"; + if (c == '\f') return os << "\\f"; + if (c == '\n') return os << "\\n"; + if (c == '\r') return os << "\\r"; + if (c == '\t') return os << "\\t"; + return os << c; + } + + std::string str_; +}; + } // namespace v8::base #endif // V8_BASE_STRING_FORMAT_H_ diff --git a/deps/v8/src/base/virtual-address-space.cc b/deps/v8/src/base/virtual-address-space.cc index 4450c71851cd2c..ac38257e7b4877 100644 --- a/deps/v8/src/base/virtual-address-space.cc +++ b/deps/v8/src/base/virtual-address-space.cc @@ -148,7 +148,8 @@ void VirtualAddressSpace::FreeSharedPages(Address address, size_t size) { std::unique_ptr VirtualAddressSpace::AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key) { + std::optional key, + PlatformSharedMemoryHandle handle) { DCHECK(IsAligned(alignment, allocation_granularity())); DCHECK(IsAligned(hint, alignment)); DCHECK(IsAligned(size, allocation_granularity())); @@ -156,7 +157,7 @@ std::unique_ptr VirtualAddressSpace::AllocateSubspace( std::optional reservation = OS::CreateAddressSpaceReservation( reinterpret_cast(hint), size, alignment, - static_cast(max_page_permissions)); + static_cast(max_page_permissions), handle); if (!reservation.has_value()) return std::unique_ptr(); return std::unique_ptr(new VirtualAddressSubspace( @@ -377,7 +378,10 @@ std::unique_ptr VirtualAddressSubspace::AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key) { + std::optional key, + PlatformSharedMemoryHandle handle) { + // File backed mapping isn't supported for subspaces. + DCHECK_EQ(handle, kInvalidSharedMemoryHandle); #if V8_HAS_PKU_SUPPORT // We don't allow subspaces with different keys as that could be unexpected. // If we ever want to support this, we should probably require specifying diff --git a/deps/v8/src/base/virtual-address-space.h b/deps/v8/src/base/virtual-address-space.h index 281ab1da4cef6e..24f3fedee45428 100644 --- a/deps/v8/src/base/virtual-address-space.h +++ b/deps/v8/src/base/virtual-address-space.h @@ -84,7 +84,8 @@ class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase { std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key = std::nullopt) override; + std::optional key = std::nullopt, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle) override; bool RecommitPages(Address address, size_t size, PagePermissions access) override; @@ -135,7 +136,8 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase { std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, - std::optional key = std::nullopt) override; + std::optional key = std::nullopt, + PlatformSharedMemoryHandle handle = kInvalidSharedMemoryHandle) override; bool RecommitPages(Address address, size_t size, PagePermissions permissions) override; diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 29600e3017d0f5..cd3d4b5de672ef 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -530,6 +530,12 @@ void BaselineCompiler::VisitSingleBytecode() { } } +#ifdef DEBUG + // We've now executed the bytecode, so any remaining effects (e.g. tracing) + // are skippable. + effect_state_.safe_to_skip = true; +#endif + #ifdef V8_TRACE_UNOPTIMIZED TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit); #endif @@ -639,6 +645,18 @@ constexpr static bool BuiltinMayDeopt(Builtin id) { return true; } } +constexpr static bool RuntimeFunctionMayDeopt(Runtime::FunctionId function) { + switch (function) { +#ifdef V8_TRACE_UNOPTIMIZED + case Runtime::kTraceUnoptimizedBytecodeEntry: + case Runtime::kTraceUnoptimizedBytecodeExit: + return false; +#endif + default: + return true; + } +} + #endif // DEBUG || V8_ENABLE_CET_SHADOW_STACK template @@ -672,13 +690,17 @@ template void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) { #ifdef DEBUG effect_state_.CheckEffect(); - effect_state_.MayDeopt(); + if (RuntimeFunctionMayDeopt(function)) { + effect_state_.MayDeopt(); + } #endif __ LoadContext(kContextRegister); int nargs = __ Push(args...); __ CallRuntime(function, nargs); #ifdef V8_ENABLE_CET_SHADOW_STACK - __ MaybeEmitPlaceHolderForDeopt(); + if (RuntimeFunctionMayDeopt(function)) { + __ MaybeEmitPlaceHolderForDeopt(); + } #endif // V8_ENABLE_CET_SHADOW_STACK } @@ -2467,10 +2489,18 @@ void BaselineCompiler::VisitResumeGenerator() { } void BaselineCompiler::VisitForOfNext() { - SaveAccumulatorScope accumulator_scope(this, &basm_); - CallBuiltin(RegisterOperand(0), // object - RegisterOperand(1)); // next - StoreRegisterPair(2, kReturnRegister0, kReturnRegister1); + BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); + Register object = scratch_scope.AcquireScratch(); + Register next = scratch_scope.AcquireScratch(); + __ LoadRegister(object, RegisterOperand(0)); + __ LoadRegister(next, RegisterOperand(1)); + // Pass the output register slot as an argument, so that the builtin + // is responsible for writing into the slots. + Register out_reg_address = scratch_scope.AcquireScratch(); + basm_.RegisterFrameAddress(RegisterOperand(2), out_reg_address); + CallBuiltin(object, // object + next, // next + out_reg_address); // out_reg } void BaselineCompiler::VisitGetIterator() { diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 40b5b6ac23f7a9..96f86508dce4ef 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -2858,6 +2858,12 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ ldr(vector, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + + // Increment the total invocation count of the function. + __ ldr(scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + __ add(scratch, scratch, Operand(Smi::FromInt(1))); + __ str(scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + __ push(kWasmImplicitArgRegister); __ push(vector); __ Ret(); @@ -3384,6 +3390,43 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ Jump(lr); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont, kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(6); + __ str(tag, MemOperand(sp, 0)); // arg 4 + __ str(cont, MemOperand(sp, kSystemPointerSize)); // arg 5 + __ Move(kCArgRegs[0], ExternalReference::isolate_address()); + __ Move(kCArgRegs[1], sp); + __ Move(kCArgRegs[2], fp); + __ GetLabelAddress(kCArgRegs[3], &resume); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = r1; + __ Move(target_stack, kReturnRegister0); + cont = kReturnRegister0; + __ Pop(cont, kContextRegister); + + Label ok; + __ cmp(target_stack, Operand(0)); + __ b(ne, &ok); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + DCHECK_EQ(cont, kReturnRegister0); + LoadJumpBuffer(masm, target_stack, true, r3); + __ Trap(); + __ bind(&resume); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ Ret(); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { Register active_stack = r0; __ LoadRootRelative(active_stack, IsolateData::active_stack_offset()); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index efbef17689fe82..73cb0b4e13391a 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -3366,6 +3366,17 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + // Increment the total invocation count of the function. + __ LoadTaggedField(scratch, + FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + if (SmiValuesAre31Bits()) { + __ Add(scratch.W(), scratch.W(), Immediate(Smi::FromInt(1))); + } else { + DCHECK(scratch.IsX()); + __ Add(scratch.X(), scratch.X(), Immediate(Smi::FromInt(1))); + } + __ StoreTaggedField( + scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ Push(vector, xzr); __ Ret(); @@ -3925,6 +3936,43 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ Ret(lr); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont, kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + DCHECK_NE(kCArgRegs[4], cont); + __ Mov(kCArgRegs[4], tag); + __ Mov(kCArgRegs[5], cont); + __ Mov(kCArgRegs[0], ExternalReference::isolate_address()); + __ Mov(kCArgRegs[1], sp); + __ Mov(kCArgRegs[2], fp); + __ Adr(kCArgRegs[3], &resume); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = x1; + __ Move(target_stack, kReturnRegister0); + cont = kReturnRegister0; + __ Pop(kContextRegister, cont); + + Label ok; + __ cmp(target_stack, Operand(0)); + __ B(ne, &ok); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + DCHECK_EQ(cont, kReturnRegister0); + LoadJumpBuffer(masm, target_stack, true, x3); + __ Trap(); + __ Bind(&resume, BranchTargetIdentifier::kBtiJump); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ Ret(); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { Register active_stack = x0; __ LoadRootRelative(active_stack, IsolateData::active_stack_offset()); diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq index 7a6c82268813d1..0cda51eda4f135 100644 --- a/deps/v8/src/builtins/array-reduce-right.tq +++ b/deps/v8/src/builtins/array-reduce-right.tq @@ -55,7 +55,7 @@ transitioning javascript builtin ArrayReduceRightLoopLazyDeoptContinuation( const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; - // The accumulator is the result from the callback call which just occured. + // The accumulator is the result from the callback call which just occurred. const r = ArrayReduceRightLoopContinuation( jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength); return r; diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq index a67347805c8982..374cdeb9d954c4 100644 --- a/deps/v8/src/builtins/array-reduce.tq +++ b/deps/v8/src/builtins/array-reduce.tq @@ -54,7 +54,7 @@ transitioning javascript builtin ArrayReduceLoopLazyDeoptContinuation( const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; - // The accumulator is the result from the callback call which just occured. + // The accumulator is the result from the callback call which just occurred. const r = ArrayReduceLoopContinuation( jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength); return r; diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 1fb7c7b21e489a..b0cc66266519e1 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -498,6 +498,7 @@ extern enum MessageTemplate { kWasmTrapDivUnrepresentable, kWasmTrapRemByZero, kWasmTrapFloatUnrepresentable, + kWasmTrapNullFunc, kWasmTrapFuncSigMismatch, kWasmTrapDataSegmentOutOfBounds, kWasmTrapElementSegmentOutOfBounds, @@ -509,6 +510,7 @@ extern enum MessageTemplate { kWasmTrapArrayOutOfBounds, kWasmTrapArrayTooLarge, kWasmTrapStringOffsetOutOfBounds, + kWasmTrapResume, kWasmObjectsAreOpaque, kWeakRefsRegisterTargetAndHoldingsMustNotBeSame, kInvalidWeakRefsRegisterTarget, @@ -807,8 +809,6 @@ extern macro GetInterestingProperty(Context, JSReceiver, Name): JSAny labels NotFound; extern transitioning builtin SetProperty( implicit context: Context)(JSAny, JSAny, JSAny): JSAny; -extern transitioning builtin SetPropertyIgnoreAttributes( - implicit context: Context)(JSObject, String, JSAny, Smi): JSAny; extern transitioning builtin CreateDataProperty( implicit context: Context)(JSAny, JSAny, JSAny): JSAny; extern transitioning builtin DeleteProperty( @@ -936,6 +936,9 @@ extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr; extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi; extern macro LoadBufferIntptr(RawPtr, constexpr int32): intptr; +extern macro LoadFullTagged(RawPtr): Object; +extern macro StoreFullTaggedNoWriteBarrier(RawPtr, Object): void; + extern runtime StringEqual(Context, String, String): Oddball; extern builtin StringLessThan(String, String): Boolean; extern builtin StringCompare(String, String): Smi; @@ -2228,7 +2231,7 @@ operator '==' macro HashFieldTypeEquals( const kNoHashSentinel: constexpr int32 generates 'PropertyArray::kNoHashSentinel'; -extern macro LoadNameHash(Name): uint32; +extern macro LoadNameHashAssumeComputed(Name): uint32; extern transitioning builtin ToName(implicit context: Context)(JSAny): AnyName; diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc index 822d5a332efad2..1336d4f74e5173 100644 --- a/deps/v8/src/builtins/builtins-api.cc +++ b/deps/v8/src/builtins/builtins-api.cc @@ -29,7 +29,8 @@ bool IsCompatibleReceiver(Isolate* isolate, Tagged info, Tagged recv_type = info->signature(); // No signature, so function can be called with any receiver. if (!IsFunctionTemplateInfo(recv_type)) return true; - // A Proxy cannot have been created from the signature template. + // A Proxy or Wasm object cannot have been created from the signature + // template. if (!IsJSObject(receiver)) return false; Tagged js_obj_receiver = Cast(receiver); @@ -80,7 +81,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle HandleApiCallHelper( js_receiver = indirect_handle(Cast(receiver), isolate); if (!fun_data->accept_any_receiver() && IsAccessCheckNeeded(*js_receiver)) { - // Proxies never need access checks. + // Proxies and Wasm objects never need access checks. DCHECK(IsJSObject(*js_receiver)); DirectHandle js_object = Cast(js_receiver); if (!isolate->MayAccess(isolate->native_context(), js_object)) { diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index 5d6351f0f8b8cd..82654f718f085c 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -1195,7 +1195,7 @@ template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey( TNode table, TNode key_tagged, TVariable* result, Label* entry_found, Label* not_found) { - const TNode hash = LoadNameHash(key_tagged); + const TNode hash = LoadNameHashAssumeComputed(key_tagged); *result = Signed(ChangeUint32ToWord(hash)); FindOrderedHashTableEntry( table, hash, @@ -1554,38 +1554,61 @@ CollectionsBuiltinsAssembler::NextKeyValueIndexTuple( return TorqueStructKeyValueIndexTuple{key, value, next_index}; } +TNode CollectionsBuiltinsAssembler::LoadTable( + TNode receiver) { + CSA_DCHECK(this, HasInstanceType(receiver, JS_MAP_TYPE)); + return LoadObjectField(receiver, JSMap::kTableOffset); +} + +TNode CollectionsBuiltinsAssembler::TableGetIfExists( + const TNode context, TNode receiver, + TNode table, const TNode key, Label* if_found, + Label* if_not_found) { + const TNode index = + CAST(CallBuiltin(Builtin::kFindOrderedHashMapEntry, context, table, key)); + + Label if_found_internal(this); + Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found_internal, + if_not_found); + + BIND(&if_found_internal); + TNode res = + CAST(LoadValueFromOrderedHashMapEntry(table, SmiUntag(index))); + Goto(if_found); + + return res; +} + TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) { - const auto receiver = Parameter(Descriptor::kReceiver); - const auto key = Parameter(Descriptor::kKey); + const auto orig_receiver = Parameter(Descriptor::kReceiver); + const auto key = Parameter(Descriptor::kKey); const auto context = Parameter(Descriptor::kContext); - ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get"); - - const TNode table = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); - TNode index = - CAST(CallBuiltin(Builtin::kFindOrderedHashMapEntry, context, table, key)); + ThrowIfNotInstanceType(context, orig_receiver, JS_MAP_TYPE, + "Map.prototype.get"); + const TNode receiver = CAST(orig_receiver); Label if_found(this), if_not_found(this); - Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found, - &if_not_found); + TNode res = TableGetIfExists(context, receiver, LoadTable(receiver), + key, &if_found, &if_not_found); BIND(&if_found); - Return(LoadValueFromOrderedHashMapEntry(CAST(table), SmiUntag(index))); + Return(res); BIND(&if_not_found); Return(UndefinedConstant()); } TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) { - const auto receiver = Parameter(Descriptor::kReceiver); + const auto orig_receiver = Parameter(Descriptor::kReceiver); const auto key = Parameter(Descriptor::kKey); const auto context = Parameter(Descriptor::kContext); - ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has"); + ThrowIfNotInstanceType(context, orig_receiver, JS_MAP_TYPE, + "Map.prototype.has"); + const TNode receiver = CAST(orig_receiver); - const TNode table = - CAST(LoadObjectField(CAST(receiver), JSMap::kTableOffset)); + const TNode table = LoadTable(receiver); Label if_found(this), if_not_found(this); Branch(TableHasKey(context, table, key), &if_found, &if_not_found); @@ -1628,8 +1651,8 @@ template TNode CollectionsBuiltinsAssembler::AddToOrderedHashTable( const TNode table, TVariable* key, const GrowCollection& grow, - const StoreAtEntry& store_at_new_entry, - const StoreAtEntry& store_at_existing_entry) { + const ApplyAtEntry& store_at_new_entry, + const ApplyAtEntry& store_at_existing_entry) { TVARIABLE(CollectionType, table_var, table); TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this), done(this); @@ -1704,45 +1727,123 @@ TNode CollectionsBuiltinsAssembler::AddToOrderedHashTable( return table_var.value(); } +void CollectionsBuiltinsAssembler::TableSet(const TNode context, + TNode receiver, + TNode table, + TVariable* key, + TNode value) { + GrowCollection grow = [this, context, receiver]() { + CallRuntime(Runtime::kMapGrow, context, receiver); + return LoadTable(receiver); + }; + + ApplyAtEntry store_at_new_entry = + [this, &key, value](const TNode tbl, + const TNode entry_start) { + UnsafeStoreKeyValueInOrderedHashMapEntry(tbl, key->value(), value, + entry_start); + }; + + ApplyAtEntry store_at_existing_entry = + [this, value](const TNode tbl, + const TNode entry_start) { + UnsafeStoreValueInOrderedHashMapEntry(tbl, value, entry_start); + }; + + AddToOrderedHashTable(table, key, grow, store_at_new_entry, + store_at_existing_entry); +} + TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { - const auto receiver = Parameter(Descriptor::kReceiver); + const auto orig_receiver = Parameter(Descriptor::kReceiver); auto original_key = Parameter(Descriptor::kKey); - const auto value = Parameter(Descriptor::kValue); + const auto value = Parameter(Descriptor::kValue); const auto context = Parameter(Descriptor::kContext); TVARIABLE(JSAny, key, original_key); - ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set"); + ThrowIfNotInstanceType(context, orig_receiver, JS_MAP_TYPE, + "Map.prototype.set"); + const TNode receiver = CAST(orig_receiver); + + TableSet(context, receiver, LoadTable(receiver), &key, value); + + Return(receiver); +} + +TF_BUILTIN(MapPrototypeGetOrInsert, CollectionsBuiltinsAssembler) { + const auto orig_receiver = Parameter(Descriptor::kReceiver); + auto original_key = Parameter(Descriptor::kKey); + const auto value = Parameter(Descriptor::kValue); + const auto context = Parameter(Descriptor::kContext); + + TVARIABLE(JSAny, key, original_key); + ThrowIfNotInstanceType(context, orig_receiver, JS_MAP_TYPE, + "Map.prototype.getOrInsert"); + const TNode receiver = CAST(orig_receiver); + + TVARIABLE(JSAny, res); GrowCollection grow = [this, context, receiver]() { CallRuntime(Runtime::kMapGrow, context, receiver); - return LoadObjectField(CAST(receiver), JSMap::kTableOffset); + return LoadTable(receiver); }; - StoreAtEntry store_at_new_entry = - [this, &key, value](const TNode table, - const TNode entry_start) { - UnsafeStoreKeyValueInOrderedHashMapEntry(table, key.value(), value, + ApplyAtEntry store_at_new_entry = + [this, &key, value, &res](const TNode tbl, + const TNode entry_start) { + UnsafeStoreKeyValueInOrderedHashMapEntry(tbl, key.value(), value, entry_start); + res = value; }; - StoreAtEntry store_at_existing_entry = - [this, value](const TNode table, - const TNode entry_start) { - UnsafeStoreValueInOrderedHashMapEntry(table, value, entry_start); + ApplyAtEntry load_at_existing_entry = + [this, value, &res](const TNode tbl, + const TNode entry_start) { + res = CAST(LoadValueFromOrderedHashMapEntry(tbl, entry_start)); }; - const TNode table = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); - AddToOrderedHashTable(table, &key, grow, store_at_new_entry, - store_at_existing_entry); - Return(receiver); + AddToOrderedHashTable(LoadTable(receiver), &key, grow, store_at_new_entry, + load_at_existing_entry); + + Return(res.value()); +} + +TF_BUILTIN(MapPrototypeGetOrInsertComputed, CollectionsBuiltinsAssembler) { + const auto orig_receiver = Parameter(Descriptor::kReceiver); + auto original_key = Parameter(Descriptor::kKey); + const auto callback = Parameter(Descriptor::kCallbackfn); + const auto context = Parameter(Descriptor::kContext); + + TVARIABLE(JSAny, key, original_key); + ThrowIfNotInstanceType(context, orig_receiver, JS_MAP_TYPE, + "Map.prototype.getOrInsertComputed"); + const TNode receiver = CAST(orig_receiver); + ThrowIfNotCallable(context, callback, "Map.prototype.getOrInsertComputed"); + + Label if_found(this), if_not_found(this); + TNode res = TableGetIfExists(context, receiver, LoadTable(receiver), + key.value(), &if_found, &if_not_found); + + BIND(&if_found); + Return(res); + + BIND(&if_not_found); + key = NormalizeNumberKey(key.value()); + TNode value = + Call(context, callback, UndefinedConstant(), key.value()); + // NOTE: The Map may have been modified during execution of _callback_. + // TODO(olivf): Add a fastcase similar to GetOrInsert for when it doesn't. + + TableSet(context, receiver, LoadTable(receiver), &key, value); + + Return(value); } template void CollectionsBuiltinsAssembler::StoreOrderedHashTableNewEntry( const TNode table, const TNode hash, const TNode number_of_buckets, const TNode occupancy, - const StoreAtEntry& store_at_new_entry) { + const ApplyAtEntry& store_at_new_entry) { const TNode bucket = WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); TNode bucket_entry = CAST(UnsafeLoadFixedArrayElement( @@ -1779,7 +1880,7 @@ template void CollectionsBuiltinsAssembler::AddNewToOrderedHashTable( const TNode table, const TNode normalised_key, const TNode number_of_buckets, const TNode occupancy, - const StoreAtEntry& store_at_new_entry) { + const ApplyAtEntry& store_at_new_entry) { Label if_key_smi(this), if_key_string(this), if_key_heap_number(this), if_key_bigint(this), if_key_other(this), call_store(this); TVARIABLE(IntPtrT, hash, IntPtrConstant(0)); @@ -1810,7 +1911,8 @@ void CollectionsBuiltinsAssembler::AddNewToOrderedHashTable( { CSA_DCHECK(this, IsInternalizedStringInstanceType(key_instance_type)); - hash = Signed(ChangeUint32ToWord(LoadNameHash(CAST(normalised_key)))); + hash = Signed( + ChangeUint32ToWord(LoadNameHashAssumeComputed(CAST(normalised_key)))); Goto(&call_store); } @@ -1921,13 +2023,13 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { return LoadObjectField(CAST(receiver), JSSet::kTableOffset); }; - StoreAtEntry store_at_new_entry = + ApplyAtEntry store_at_new_entry = [this, &key](const TNode table, const TNode entry_start) { UnsafeStoreKeyInOrderedHashSetEntry(table, key.value(), entry_start); }; - StoreAtEntry store_at_existing_entry = + ApplyAtEntry store_at_existing_entry = [](const TNode, const TNode) { // If the entry was found, there is nothing to do. }; @@ -1952,13 +2054,13 @@ TNode CollectionsBuiltinsAssembler::AddToSetTable( return new_table; }; - StoreAtEntry store_at_new_entry = + ApplyAtEntry store_at_new_entry = [this, &key](const TNode table, const TNode entry_start) { UnsafeStoreKeyInOrderedHashSetEntry(table, key.value(), entry_start); }; - StoreAtEntry store_at_existing_entry = + ApplyAtEntry store_at_existing_entry = [](const TNode, const TNode) { // If the entry was found, there is nothing to do. }; @@ -2549,7 +2651,7 @@ const TNode CollectionsBuiltinsAssembler::AddValueToKeyedGroup( return new_groups; }; - StoreAtEntry store_at_new_entry = + ApplyAtEntry store_at_new_entry = [this, &key, value](const TNode table, const TNode entry_start) { TNode array = AllocateArrayList(SmiConstant(1)); @@ -2559,7 +2661,7 @@ const TNode CollectionsBuiltinsAssembler::AddValueToKeyedGroup( entry_start); }; - StoreAtEntry store_at_existing_entry = + ApplyAtEntry store_at_existing_entry = [this, &key, value](const TNode table, const TNode entry_start) { TNode array = @@ -2602,7 +2704,7 @@ TNode WeakCollectionsBuiltinsAssembler::GetHash( CSA_DCHECK(this, Word32BinaryNot( Word32And(LoadSymbolFlags(CAST(key)), Symbol::IsInPublicSymbolTableBit::kMask))); - var_hash = Signed(ChangeUint32ToWord(LoadNameHash(CAST(key), nullptr))); + var_hash = Signed(ChangeUint32ToWord(LoadNameHashAssumeComputed(CAST(key)))); Goto(&return_result); Bind(&return_result); return var_hash.value(); @@ -2867,39 +2969,55 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) { Return(SmiConstant(-1)); } -TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) { - const auto receiver = Parameter(Descriptor::kReceiver); - const auto key = Parameter(Descriptor::kKey); +TNode WeakCollectionsBuiltinsAssembler::TableGetIfExists( + const TNode context, TNode receiver, + TNode table, const TNode key, Label* if_found, + Label* if_not_found) { + const TNode index = + CAST(CallBuiltin(Builtin::kWeakMapLookupHashIndex, context, table, key)); + + GotoIf(TaggedEqual(index, SmiConstant(-1)), if_not_found); + + TNode res = CAST(LoadFixedArrayElement(table, SmiUntag(index))); + Goto(if_found); + + return res; +} + +TF_BUILTIN(WeakMapPrototypeGet, WeakCollectionsBuiltinsAssembler) { + const auto orig_receiver = Parameter(Descriptor::kReceiver); + const auto key = Parameter(Descriptor::kKey); const auto context = Parameter(Descriptor::kContext); Label return_undefined(this); - ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE, + ThrowIfNotInstanceType(context, orig_receiver, JS_WEAK_MAP_TYPE, "WeakMap.prototype.get"); + TNode receiver = CAST(orig_receiver); - const TNode table = LoadTable(CAST(receiver)); - const TNode index = - CAST(CallBuiltin(Builtin::kWeakMapLookupHashIndex, context, table, key)); - - GotoIf(TaggedEqual(index, SmiConstant(-1)), &return_undefined); + Label if_found(this), if_not_found(this); + TNode res = TableGetIfExists(context, receiver, LoadTable(receiver), + key, &if_found, &if_not_found); - Return(LoadFixedArrayElement(table, SmiUntag(index))); + BIND(&if_found); + Return(res); - BIND(&return_undefined); + BIND(&if_not_found); Return(UndefinedConstant()); } TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) { - const auto receiver = Parameter(Descriptor::kReceiver); - const auto key = Parameter(Descriptor::kKey); + const auto orig_receiver = Parameter(Descriptor::kReceiver); + const auto key = Parameter(Descriptor::kKey); const auto context = Parameter(Descriptor::kContext); Label return_false(this); - ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE, + ThrowIfNotInstanceType(context, orig_receiver, JS_WEAK_MAP_TYPE, "WeakMap.prototype.has"); + TNode receiver = CAST(orig_receiver); - const TNode table = LoadTable(CAST(receiver)); + const TNode table = LoadTable(receiver); const TNode index = CallBuiltin(Builtin::kWeakMapLookupHashIndex, context, table, key); @@ -2942,17 +3060,13 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) { SmiTag(hash))); } -// Helper that sets the key and value to the backing store (EphemeronHashTable) -// of a WeakMap or WeakSet. -TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) { - auto context = Parameter(Descriptor::kContext); - auto collection = Parameter(Descriptor::kCollection); - auto key = Parameter(Descriptor::kKey); - auto value = Parameter(Descriptor::kValue); - +void WeakCollectionsBuiltinsAssembler::AddToEphemeronHashTable( + const TNode context, const TNode collection, + const TNode key, const TNode value, + const ApplyAtEntry& existing_entry) { CSA_DCHECK(this, Word32Or(IsJSReceiver(key), IsSymbol(key))); - Label call_runtime(this), if_no_hash(this), if_not_found(this); + Label call_runtime(this), if_no_hash(this), if_not_found(this), done(this); TNode table = LoadTable(collection); TNode capacity = LoadTableCapacity(table); @@ -2960,9 +3074,8 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) { TVARIABLE(IntPtrT, var_hash, GetHash(key, &if_no_hash)); TNode key_index = FindKeyIndexForKey(table, key, var_hash.value(), capacity, &if_not_found); - - StoreFixedArrayElement(table, ValueIndexFromKeyIndex(key_index), value); - Return(collection); + existing_entry(table, ValueIndexFromKeyIndex(key_index), value); + Goto(&done); BIND(&if_no_hash); { @@ -2989,14 +3102,36 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) { TNode insertion_key_index = FindKeyIndexForInsertion(table, var_hash.value(), capacity); AddEntry(table, insertion_key_index, key, value, number_of_elements); - Return(collection); + Goto(&done); } BIND(&call_runtime); { CallRuntime(Runtime::kWeakCollectionSet, context, collection, key, value, SmiTag(var_hash.value())); - Return(collection); + Goto(&done); } + BIND(&done); +} + +// Helper that sets the key and value to the backing store (EphemeronHashTable) +// of a WeakMap or WeakSet. +TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) { + auto context = Parameter(Descriptor::kContext); + auto collection = Parameter(Descriptor::kCollection); + auto key = Parameter(Descriptor::kKey); + auto value = Parameter(Descriptor::kValue); + + CSA_DCHECK(this, Word32Or(IsJSReceiver(key), IsSymbol(key))); + + auto store_at_existing_entry = [this](const TNode table, + const TNode entry_start, + const TNode value) { + StoreFixedArrayElement(table, entry_start, value); + }; + AddToEphemeronHashTable(context, collection, key, value, + store_at_existing_entry); + + Return(collection); } TF_BUILTIN(WeakMapPrototypeDelete, CodeStubAssembler) { @@ -3032,6 +3167,69 @@ TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) { ThrowTypeError(context, MessageTemplate::kInvalidWeakMapKey, key); } +TF_BUILTIN(WeakMapPrototypeGetOrInsert, WeakCollectionsBuiltinsAssembler) { + auto context = Parameter(Descriptor::kContext); + auto orig_receiver = Parameter(Descriptor::kReceiver); + auto key = Parameter(Descriptor::kKey); + auto orig_value = Parameter(Descriptor::kValue); + + ThrowIfNotInstanceType(context, orig_receiver, JS_WEAK_MAP_TYPE, + "WeakMap.prototype.getOrInsert"); + const TNode receiver = CAST(orig_receiver); + + Label throw_invalid_key(this); + GotoIfCannotBeHeldWeakly(key, &throw_invalid_key); + + TVARIABLE(JSAny, value, orig_value); + auto existing_entry = [this, &value](const TNode table, + const TNode entry_start, + const TNode _) { + value = CAST(LoadFixedArrayElement(table, entry_start)); + }; + + AddToEphemeronHashTable(context, receiver, CAST(key), orig_value, + existing_entry); + Return(value.value()); + + BIND(&throw_invalid_key); + ThrowTypeError(context, MessageTemplate::kInvalidWeakMapKey, key); +} + +TF_BUILTIN(WeakMapPrototypeGetOrInsertComputed, + WeakCollectionsBuiltinsAssembler) { + auto context = Parameter(Descriptor::kContext); + auto orig_receiver = Parameter(Descriptor::kReceiver); + auto key = Parameter(Descriptor::kKey); + auto callback = Parameter(Descriptor::kCallbackfn); + + ThrowIfNotInstanceType(context, orig_receiver, JS_WEAK_MAP_TYPE, + "WeakMap.prototype.getOrInsertComputed"); + const TNode receiver = CAST(orig_receiver); + ThrowIfNotCallable(context, callback, + "WeakMap.prototype.getOrInsertComputed"); + + Label throw_invalid_key(this); + GotoIfCannotBeHeldWeakly(key, &throw_invalid_key); + + Label if_found(this), if_not_found(this); + TNode res = TableGetIfExists(context, receiver, LoadTable(receiver), + CAST(key), &if_found, &if_not_found); + + BIND(&if_found); + Return(res); + + BIND(&if_not_found); + TNode value = Call(context, callback, UndefinedConstant(), key); + // NOTE: The WeakMap may have been modified during execution of _callback_. + // TODO(olivf): Add a fastcase similar to GetOrInsert for when it doesn't. + + CallBuiltin(Builtin::kWeakCollectionSet, context, receiver, key, value); + Return(value); + + BIND(&throw_invalid_key); + ThrowTypeError(context, MessageTemplate::kInvalidWeakMapKey, key); +} + TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) { auto context = Parameter(Descriptor::kContext); auto receiver = Parameter(Descriptor::kReceiver); @@ -3065,16 +3263,17 @@ TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) { } TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) { - const auto receiver = Parameter(Descriptor::kReceiver); - const auto key = Parameter(Descriptor::kKey); + const auto orig_receiver = Parameter(Descriptor::kReceiver); + const auto key = Parameter(Descriptor::kKey); const auto context = Parameter(Descriptor::kContext); Label return_false(this); - ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE, + ThrowIfNotInstanceType(context, orig_receiver, JS_WEAK_SET_TYPE, "WeakSet.prototype.has"); + TNode receiver = CAST(orig_receiver); - const TNode table = LoadTable(CAST(receiver)); + const TNode table = LoadTable(receiver); const TNode index = CallBuiltin(Builtin::kWeakMapLookupHashIndex, context, table, key); diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h index eca5d860c0524d..80b53a6e11ea99 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.h +++ b/deps/v8/src/builtins/builtins-collections-gen.h @@ -198,6 +198,16 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { TNode TableHasKey(const TNode context, TNode table, TNode key); + TNode LoadTable(TNode receiver); + TNode TableGetIfExists(const TNode context, + TNode receiver, + TNode table, + const TNode key, Label* if_found, + Label* if_not_found); + void TableSet(const TNode context, TNode receiver, + TNode table, TVariable* key, + TNode value); + // Adds {value} to a FixedArray keyed by {key} in {groups}. // // Utility used by Object.groupBy and Map.groupBy. @@ -358,14 +368,14 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { template using GrowCollection = std::function()>; template - using StoreAtEntry = std::function table, + using ApplyAtEntry = std::function table, const TNode entry_start)>; template TNode AddToOrderedHashTable( const TNode table, TVariable* key, const GrowCollection& grow, - const StoreAtEntry& store_at_new_entry, - const StoreAtEntry& store_at_existing_entry); + const ApplyAtEntry& store_at_new_entry, + const ApplyAtEntry& store_at_existing_entry); template void TryLookupOrderedHashTableIndex(const TNode table, @@ -379,13 +389,13 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { void AddNewToOrderedHashTable( const TNode table, const TNode normalised_key, const TNode number_of_buckets, const TNode occupancy, - const StoreAtEntry& store_at_new_entry); + const ApplyAtEntry& store_at_new_entry); void AddNewToOrderedHashSet(const TNode table, const TNode key, const TNode number_of_buckets, const TNode occupancy) { - StoreAtEntry store_at_new_entry = + ApplyAtEntry store_at_new_entry = [this, key](const TNode table, const TNode entry_start) { UnsafeStoreKeyInOrderedHashSetEntry(table, key, entry_start); @@ -402,7 +412,7 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { void StoreOrderedHashTableNewEntry( const TNode table, const TNode hash, const TNode number_of_buckets, const TNode occupancy, - const StoreAtEntry& store_at_new_entry); + const ApplyAtEntry& store_at_new_entry); // Store payload (key, value, or both) in {table} at {entry}. Does not connect // the bucket chain and update the bucket head. @@ -548,6 +558,12 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { TNode LoadTable(TNode collection); TNode LoadTableCapacity(TNode table); + TNode TableGetIfExists(const TNode context, + TNode receiver, + TNode table, + const TNode key, Label* if_found, + Label* if_not_found); + void RemoveEntry(TNode table, TNode key_index, TNode number_of_elements); TNode ShouldRehash(TNode number_of_elements, @@ -556,6 +572,15 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { TNode number_of_elements); TNode ValueIndexFromKeyIndex(TNode key_index); + using ApplyAtEntry = std::function table, + const TNode entry_start, + const TNode value)>; + void AddToEphemeronHashTable(const TNode context, + const TNode collection, + const TNode key, + const TNode value, + const ApplyAtEntry& existing_entry); + void GetEntriesIfFastCollectionOrIterable( Variant variant, TNode initial_entries, TNode context, TVariable* var_entries_table, diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index ba81205ce6c6f1..e0289080929ca6 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -866,6 +866,10 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2; TFJ(MapPrototypeGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ TFJ(MapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ CPP(MapPrototypeClear, JSParameterCount(0)) \ + TFJ(MapPrototypeGetOrInsert, kJSArgcReceiverSlots + 2, kReceiver, kKey, \ + kValue) \ + TFJ(MapPrototypeGetOrInsertComputed, kJSArgcReceiverSlots + 2, kReceiver, \ + kKey, kCallbackfn) \ /* ES #sec-map.prototype.entries */ \ TFJ(MapPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \ /* ES #sec-get-map.prototype.size */ \ @@ -1385,6 +1389,7 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2; IF_WASM(ASM, WasmSuspend, WasmSuspend) \ IF_WASM(ASM, WasmResume, JSTrampoline) \ IF_WASM(ASM, WasmFXResume, WasmFXResume) \ + IF_WASM(ASM, WasmFXSuspend, WasmFXSuspend) \ IF_WASM(ASM, WasmFXReturn, WasmFXReturn) \ IF_WASM(ASM, WasmReject, JSTrampoline) \ IF_WASM(ASM, WasmTrapHandlerLandingPad, WasmDummy) \ @@ -1401,9 +1406,13 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2; /* WeakMap */ \ TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \ TFS(WeakMapLookupHashIndex, NeedsContext::kYes, kTable, kKey) \ - TFJ(WeakMapGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ + TFJ(WeakMapPrototypeGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ TFJ(WeakMapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ TFJ(WeakMapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \ + TFJ(WeakMapPrototypeGetOrInsert, kJSArgcReceiverSlots + 2, kReceiver, kKey, \ + kValue) \ + TFJ(WeakMapPrototypeGetOrInsertComputed, kJSArgcReceiverSlots + 2, \ + kReceiver, kKey, kCallbackfn) \ TFJ(WeakMapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ \ /* WeakSet */ \ @@ -2162,6 +2171,8 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2; CPP(LocalePrototypeTimeZones, JSParameterCount(0)) \ /* ecma402 #sec-Intl.Locale.prototype.toString */ \ CPP(LocalePrototypeToString, kDontAdaptArgumentsSentinel) \ + /* ecma402 #sec-Intl.Locale.prototype.variants */ \ + CPP(LocalePrototypeVariants, JSParameterCount(0)) \ /* ecma402 #sec-Intl.Locale.prototype.weekInfo */ \ CPP(LocalePrototypeWeekInfo, JSParameterCount(0)) \ /* ecma402 #sec-intl.numberformat */ \ diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc index 467017a01bf613..7d61749e4aff32 100644 --- a/deps/v8/src/builtins/builtins-function.cc +++ b/deps/v8/src/builtins/builtins-function.cc @@ -280,8 +280,7 @@ namespace { bool IsSloppyNormalJSFunction(Tagged receiver) { if (!IsJSFunction(receiver)) return false; Tagged function = Cast(receiver); - return function->shared()->kind() == FunctionKind::kNormalFunction && - is_sloppy(function->shared()->language_mode()); + return function->shared()->IsSloppyNormalJSFunction(); } } // namespace diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index a771f93143347b..21d49e8535374b 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -110,6 +110,9 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) { BIND(&tailcall_to_shared); // Tail call into code object on the SharedFunctionInfo. + // TODO(https://crbug.com/451355210, ishell): consider removing this + // duplicate implementation in favour of returning code object from above + // runtime calls once non-leaptering code is removed. TNode code = GetSharedFunctionInfoCode(shared); // TailCallJSCode will take care of parameter count validation between the diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc index 305ab81acb972b..99ecc8bf98616d 100644 --- a/deps/v8/src/builtins/builtins-internal.cc +++ b/deps/v8/src/builtins/builtins-internal.cc @@ -12,7 +12,8 @@ namespace v8 { namespace internal { BUILTIN(Illegal) { - UNREACHABLE(); + // Make it distinguishable from other UNREACHABLE() calls for convenience. + FATAL("Called Illegal builtin"); } BUILTIN(IllegalInvocationThrower) { diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index 6bb764dbdc7c79..6c3f8fbef44435 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -919,6 +919,13 @@ BUILTIN(LocalePrototypeRegion) { return *JSLocale::Region(isolate, locale); } +BUILTIN(LocalePrototypeVariants) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.variants"); + + return *JSLocale::Variants(isolate, locale); +} + BUILTIN(LocalePrototypeBaseName) { HandleScope scope(isolate); CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.baseName"); diff --git a/deps/v8/src/builtins/builtins-number-tsa.cc b/deps/v8/src/builtins/builtins-number-tsa.cc index 4d2ad5275bdf3b..b9c5daffb69817 100644 --- a/deps/v8/src/builtins/builtins-number-tsa.cc +++ b/deps/v8/src/builtins/builtins-number-tsa.cc @@ -175,7 +175,8 @@ class NumberBuiltinsAssemblerTS void ThrowRangeError(V context, MessageTemplate message) { V template_index = SmiConstant(Smi::FromInt(static_cast(message))); - CallRuntime_ThrowRangeError(isolate(), context, template_index); + CallRuntime(context, + {.template_index = template_index}); } V AddWithFeedback(V context, V lhs, V rhs, diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 80aac224ce811e..d88b7198345d2a 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -1141,9 +1141,8 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) { subject_string); StoreObjectField(match_info, offsetof(RegExpMatchInfo, last_input_), subject_string); - UnsafeStoreArrayElement(match_info, 0, match_from, - UNSAFE_SKIP_WRITE_BARRIER); - UnsafeStoreArrayElement(match_info, 1, match_to, UNSAFE_SKIP_WRITE_BARRIER); + UnsafeStoreArrayElement(match_info, 0, match_from, SKIP_WRITE_BARRIER); + UnsafeStoreArrayElement(match_info, 1, match_to, SKIP_WRITE_BARRIER); Return(match_info); } diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 62380890e1f9c2..a49c717e7e2ab7 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -1134,24 +1134,10 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( const TNode maybe_string, Handle symbol, DescriptorIndexNameValue additional_property_to_check, const NodeFunction0& regexp_call, const NodeFunction1& generic_call) { - Label out(this), no_protector(this), object_is_heapobject(this); - Label get_property_lookup(this); - - // The protector guarantees that that the Number and String wrapper - // prototypes do not contain Symbol.{matchAll|replace|split} (aka. - // @@matchAll, @@replace @@split). - GotoIf(IsNumberStringNotRegexpLikeProtectorCellInvalid(), &no_protector); - // Smi is safe thanks to the protector. - GotoIf(TaggedIsSmi(object), &out); - // String is safe thanks to the protector. - GotoIf(IsString(CAST(object)), &out); - // HeapNumber is safe thanks to the protector. - Branch(IsHeapNumber(CAST(object)), &out, &object_is_heapobject); + Label out(this); - BIND(&no_protector); - // Smis have to go through the GetProperty lookup in case Number.prototype or - // Object.prototype was modified. - Branch(TaggedIsSmi(object), &get_property_lookup, &object_is_heapobject); + GotoIf(TaggedIsSmi(object), &out); + GotoIfNot(IsJSReceiver(CAST(object)), &out); // Take the fast path for RegExps. // There's two conditions: {object} needs to be a fast regexp, and @@ -1160,7 +1146,6 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( { Label stub_call(this), slow_lookup(this); - BIND(&object_is_heapobject); TNode heap_object = CAST(object); GotoIf(TaggedIsSmi(maybe_string), &slow_lookup); @@ -1183,8 +1168,6 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( regexp_call(); BIND(&slow_lookup); - // Special case null and undefined to skip the property lookup. - Branch(IsNullOrUndefined(heap_object), &out, &get_property_lookup); } // Fall back to a slow lookup of {heap_object[symbol]}. @@ -1194,8 +1177,6 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( // * an exception is thrown if the value is not undefined, null, or callable. // We handle the former by jumping to {out} for null values as well, while // the latter is already handled by the Call({maybe_func}) operation. - - BIND(&get_property_lookup); const TNode maybe_func = GetProperty(context, object, symbol); GotoIf(IsUndefined(maybe_func), &out); GotoIf(IsNull(maybe_func), &out); @@ -1268,22 +1249,24 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { RequireObjectCoercible(context, receiver, "String.prototype.replace"); - // Redirect to replacer method if {search[@@replace]} is not undefined. + // Redirect to replacer method if {search} is an Object and + // {search[@@replace]} is not undefined. { Label next(this); + auto if_regexp_call = [=, this] { + Return(CallBuiltin(Builtin::kRegExpReplace, context, search, receiver, + replace)); + }; + auto if_generic_call = [=, this](TNode fn) { + Return(Call(context, fn, search, receiver, replace)); + }; MaybeCallFunctionAtSymbol( context, search, receiver, isolate()->factory()->replace_symbol(), DescriptorIndexNameValue{ JSRegExp::kSymbolReplaceFunctionDescriptorIndex, RootIndex::kreplace_symbol, Context::REGEXP_REPLACE_FUNCTION_INDEX}, - [=, this]() { - Return(CallBuiltin(Builtin::kRegExpReplace, context, search, receiver, - replace)); - }, - [=, this](TNode fn) { - Return(Call(context, fn, search, receiver, replace)); - }); + if_regexp_call, if_generic_call); Goto(&next); BIND(&next); @@ -1434,9 +1417,10 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { { Label fast(this), slow(this, Label::kDeferred), throw_exception(this, Label::kDeferred), - throw_flags_exception(this, Label::kDeferred), next(this); + throw_flags_exception(this, Label::kDeferred), maybe_call_matcher(this), + next(this); - // 2. If regexp is neither undefined nor null, then + // 2. If regexp is an Object, then // a. Let isRegExp be ? IsRegExp(regexp). // b. If isRegExp is true, then // i. Let flags be ? Get(regexp, "flags"). @@ -1444,6 +1428,7 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { // iii. If ? ToString(flags) does not contain "g", throw a // TypeError exception. GotoIf(TaggedIsSmi(maybe_regexp), &next); + GotoIfNot(IsJSReceiver(CAST(maybe_regexp)), &next); TNode heap_maybe_regexp = CAST(maybe_regexp); regexp_asm.BranchIfFastRegExpForMatch(context, heap_maybe_regexp, &fast, &slow); @@ -1452,12 +1437,13 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { { TNode is_global = regexp_asm.FastFlagGetter(CAST(heap_maybe_regexp), JSRegExp::kGlobal); - Branch(is_global, &next, &throw_exception); + Branch(is_global, &maybe_call_matcher, &throw_exception); } BIND(&slow); { - GotoIfNot(regexp_asm.IsRegExp(native_context, heap_maybe_regexp), &next); + GotoIfNot(regexp_asm.IsRegExp(native_context, heap_maybe_regexp), + &maybe_call_matcher); TNode flags = GetProperty(context, heap_maybe_regexp, isolate()->factory()->flags_string()); @@ -1470,7 +1456,8 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { TNode global_ix = CAST(CallBuiltin(Builtin::kStringIndexOf, context, flags_string, global_char_string, SmiConstant(0))); - Branch(SmiEqual(global_ix, SmiConstant(-1)), &throw_exception, &next); + Branch(SmiEqual(global_ix, SmiConstant(-1)), &throw_exception, + &maybe_call_matcher); } BIND(&throw_exception); @@ -1481,28 +1468,35 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { ThrowTypeError(context, MessageTemplate::kStringMatchAllNullOrUndefinedFlags); + // a. Let matcher be ? GetMethod(regexp, %Symbol.matchAll%). + // b. If matcher is not undefined, then + // i. Return ? Call(matcher, regexp, « O »). + BIND(&maybe_call_matcher); + { + auto if_regexp_call = [&] { + // MaybeCallFunctionAtSymbol guarantees fast path is chosen only if + // maybe_regexp is a fast regexp and receiver is a string. + TNode s = CAST(receiver); + + Return(RegExpPrototypeMatchAllImpl(context, native_context, + maybe_regexp, s)); + }; + auto if_generic_call = [=, this](TNode fn) { + Return(Call(context, fn, maybe_regexp, receiver)); + }; + MaybeCallFunctionAtSymbol( + context, maybe_regexp, receiver, + isolate()->factory()->match_all_symbol(), + DescriptorIndexNameValue{ + JSRegExp::kSymbolMatchAllFunctionDescriptorIndex, + RootIndex::kmatch_all_symbol, + Context::REGEXP_MATCH_ALL_FUNCTION_INDEX}, + if_regexp_call, if_generic_call); + Goto(&next); + } + BIND(&next); } - // a. Let matcher be ? GetMethod(regexp, @@matchAll). - // b. If matcher is not undefined, then - // i. Return ? Call(matcher, regexp, « O »). - auto if_regexp_call = [&] { - // MaybeCallFunctionAtSymbol guarantees fast path is chosen only if - // maybe_regexp is a fast regexp and receiver is a string. - TNode s = CAST(receiver); - - Return( - RegExpPrototypeMatchAllImpl(context, native_context, maybe_regexp, s)); - }; - auto if_generic_call = [=, this](TNode fn) { - Return(Call(context, fn, maybe_regexp, receiver)); - }; - MaybeCallFunctionAtSymbol( - context, maybe_regexp, receiver, isolate()->factory()->match_all_symbol(), - DescriptorIndexNameValue{JSRegExp::kSymbolMatchAllFunctionDescriptorIndex, - RootIndex::kmatch_all_symbol, - Context::REGEXP_MATCH_ALL_FUNCTION_INDEX}, - if_regexp_call, if_generic_call); // 3. Let S be ? ToString(O). TNode s = ToString_Inline(context, receiver); @@ -1511,7 +1505,7 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { TNode rx = regexp_asm.RegExpCreate(context, native_context, maybe_regexp, StringConstant("g")); - // 5. Return ? Invoke(rx, @@matchAll, « S »). + // 5. Return ? Invoke(rx, %Symbol.matchAll%, « S »). TNode match_all_func = GetProperty(context, rx, isolate()->factory()->match_all_symbol()); Return(Call(context, match_all_func, rx, s)); @@ -1611,20 +1605,28 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) { RequireObjectCoercible(context, receiver, "String.prototype.split"); - // Redirect to splitter method if {separator[@@split]} is not undefined. + // Redirect to splitter method if {separator} is an Object and + // {separator[@@split]} is not undefined. + { + Label next(this); - MaybeCallFunctionAtSymbol( - context, separator, receiver, isolate()->factory()->split_symbol(), - DescriptorIndexNameValue{JSRegExp::kSymbolSplitFunctionDescriptorIndex, - RootIndex::ksplit_symbol, - Context::REGEXP_SPLIT_FUNCTION_INDEX}, - [&]() { - args.PopAndReturn(CallBuiltin(Builtin::kRegExpSplit, context, - separator, receiver, limit)); - }, - [&](TNode fn) { - args.PopAndReturn(Call(context, fn, separator, receiver, limit)); - }); + auto if_regexp_call = [&] { + args.PopAndReturn(CallBuiltin(Builtin::kRegExpSplit, context, + separator, receiver, limit)); + }; + auto if_generic_call = [&](TNode fn) { + args.PopAndReturn(Call(context, fn, separator, receiver, limit)); + }; + MaybeCallFunctionAtSymbol( + context, separator, receiver, isolate()->factory()->split_symbol(), + DescriptorIndexNameValue{JSRegExp::kSymbolSplitFunctionDescriptorIndex, + RootIndex::ksplit_symbol, + Context::REGEXP_SPLIT_FUNCTION_INDEX}, + if_regexp_call, if_generic_call); + Goto(&next); + + BIND(&next); + } // String and integer conversions. diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h index acb1dcaf163e2c..7a45660abbb381 100644 --- a/deps/v8/src/builtins/builtins-string-gen.h +++ b/deps/v8/src/builtins/builtins-string-gen.h @@ -175,14 +175,14 @@ class StringBuiltinsAssembler : public CodeStubAssembler { // Implements boilerplate logic for {match, split, replace, search} of the // form: // - // if (!IS_NULL_OR_UNDEFINED(object)) { + // if (IS_OBJECT(object)) { // var maybe_function = object[symbol]; // if (!IS_UNDEFINED(maybe_function)) { // return %_Call(maybe_function, ...); // } // } // - // Contains fast paths for Smi and RegExp objects. + // Contains fast paths for RegExp objects. // Important: {regexp_call} may not contain any code that can call into JS. using NodeFunction0 = std::function; using NodeFunction1 = std::function fn)>; diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index 4e76791b23f7d2..a3c834e7a19e54 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -14,6 +14,7 @@ #include "src/codegen/macro-assembler.h" #include "src/diagnostics/code-tracer.h" #include "src/execution/isolate.h" +#include "src/heap/combined-heap.h" #include "src/interpreter/bytecodes.h" #include "src/logging/code-events.h" // For CodeCreateEvent. #include "src/logging/log.h" // For V8FileLogger. @@ -584,6 +585,496 @@ bool Builtins::AllowDynamicFunction( return isolate->MayAccess(responsible_context, target_global_proxy); } +// static +Builtins::JSBuiltinStateFlags Builtins::GetJSBuiltinState(Builtin builtin) { +#ifdef DEBUG +#define CHECK_FEATURE_FLAG_IS_CONSISTENT(IS_ENABLED) \ + static bool is_feature_enabled = IS_ENABLED; \ + DCHECK_EQ(is_feature_enabled, IS_ENABLED); +#else +#define CHECK_FEATURE_FLAG_IS_CONSISTENT(IS_ENABLED) +#endif // DEBUG + +// Helper macro for returning optional builtin's state depending on whether +// the respective feature is enabled or not. +// In debug mode it also verifies that the state of the feature hasn't changed +// since previous check. This might happen in unit tests if they flip feature +// flags back and forth before Isolate deinitialization. +#define RETURN_FLAG_DEPENDENT_BUILTIN_STATE(IS_FEATURE_ENABLED) \ + { \ + CHECK_FEATURE_FLAG_IS_CONSISTENT(IS_FEATURE_ENABLED); \ + return (IS_FEATURE_ENABLED) ? JSBuiltinStateFlag::kEnabledFlagDependent \ + : JSBuiltinStateFlag::kDisabledFlagDependent; \ + } + +#define RETURN_FLAG_DEPENDENT_LAZY_BUILTIN_STATE(IS_FEATURE_ENABLED) \ + { \ + CHECK_FEATURE_FLAG_IS_CONSISTENT(IS_FEATURE_ENABLED); \ + return (IS_FEATURE_ENABLED) \ + ? JSBuiltinStateFlag::kEnabledFlagDependentLazy \ + : JSBuiltinStateFlag::kDisabledFlagDependentLazy; \ + } + + switch (builtin) { + // Helper builtins with JS calling convention which are not supposed to be + // called directly from user JS code. + case Builtin::kConstructFunction: + case Builtin::kConstructBoundFunction: + case Builtin::kConstructedNonConstructable: + case Builtin::kConstructProxy: + case Builtin::kHandleApiConstruct: + case Builtin::kArrayConcat: + case Builtin::kArrayPop: + case Builtin::kArrayPush: + case Builtin::kArrayShift: + case Builtin::kArrayUnshift: + case Builtin::kFunctionPrototypeBind: + case Builtin::kInterpreterEntryTrampolineForProfiling: + // Tiering builtins are set directly into the dispatch table and never + // via Code object. + case Builtin::kStartMaglevOptimizeJob: + case Builtin::kOptimizeMaglevEager: + case Builtin::kStartTurbofanOptimizeJob: + case Builtin::kOptimizeTurbofanEager: + case Builtin::kFunctionLogNextExecution: + case Builtin::kMarkReoptimizeLazyDeoptimized: + case Builtin::kMarkLazyDeoptimized: + // All *DeoptContinuation builtins. + case Builtin::kArrayEveryLoopEagerDeoptContinuation: + case Builtin::kArrayEveryLoopLazyDeoptContinuation: + case Builtin::kArrayFilterLoopEagerDeoptContinuation: + case Builtin::kArrayFilterLoopLazyDeoptContinuation: + case Builtin::kArrayFindLoopEagerDeoptContinuation: + case Builtin::kArrayFindLoopLazyDeoptContinuation: + case Builtin::kArrayFindLoopAfterCallbackLazyDeoptContinuation: + case Builtin::kArrayFindIndexLoopEagerDeoptContinuation: + case Builtin::kArrayFindIndexLoopLazyDeoptContinuation: + case Builtin::kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation: + case Builtin::kArrayForEachLoopEagerDeoptContinuation: + case Builtin::kArrayForEachLoopLazyDeoptContinuation: + case Builtin::kArrayMapPreLoopLazyDeoptContinuation: + case Builtin::kArrayMapLoopEagerDeoptContinuation: + case Builtin::kArrayMapLoopLazyDeoptContinuation: + case Builtin::kArrayReduceRightPreLoopEagerDeoptContinuation: + case Builtin::kArrayReduceRightLoopEagerDeoptContinuation: + case Builtin::kArrayReduceRightLoopLazyDeoptContinuation: + case Builtin::kArrayReducePreLoopEagerDeoptContinuation: + case Builtin::kArrayReduceLoopEagerDeoptContinuation: + case Builtin::kArrayReduceLoopLazyDeoptContinuation: + case Builtin::kArraySomeLoopEagerDeoptContinuation: + case Builtin::kArraySomeLoopLazyDeoptContinuation: + case Builtin::kStringCreateLazyDeoptContinuation: + case Builtin::kGenericLazyDeoptContinuation: + case Builtin::kPromiseConstructorLazyDeoptContinuation: + return JSBuiltinStateFlag::kDisabledJSBuiltin; + + // These builtins with JS calling convention are not JS language builtins + // but are allowed to be installed into JSFunctions. + case Builtin::kCompileLazy: + case Builtin::kDebugBreakTrampoline: + case Builtin::kHandleApiCallOrConstruct: + case Builtin::kInstantiateAsmJs: + case Builtin::kInterpreterEntryTrampoline: + return JSBuiltinStateFlag::kJSTrampoline; + + // These are core JS builtins which are instantiated lazily. + case Builtin::kConsoleAssert: + case Builtin::kArrayFromAsyncIterableOnFulfilled: + case Builtin::kArrayFromAsyncIterableOnRejected: + case Builtin::kArrayFromAsyncArrayLikeOnFulfilled: + case Builtin::kArrayFromAsyncArrayLikeOnRejected: + case Builtin::kAsyncFromSyncIteratorCloseSyncAndRethrow: + case Builtin::kAsyncFunctionAwaitRejectClosure: + case Builtin::kAsyncFunctionAwaitResolveClosure: + case Builtin::kAsyncDisposableStackOnFulfilled: + case Builtin::kAsyncDisposableStackOnRejected: + case Builtin::kAsyncDisposeFromSyncDispose: + case Builtin::kAsyncGeneratorAwaitResolveClosure: + case Builtin::kAsyncGeneratorAwaitRejectClosure: + case Builtin::kAsyncGeneratorYieldWithAwaitResolveClosure: + case Builtin::kAsyncGeneratorReturnClosedResolveClosure: + case Builtin::kAsyncGeneratorReturnClosedRejectClosure: + case Builtin::kAsyncGeneratorReturnResolveClosure: + case Builtin::kAsyncIteratorPrototypeAsyncDisposeResolveClosure: + case Builtin::kAsyncIteratorValueUnwrap: + case Builtin::kCallAsyncModuleFulfilled: + case Builtin::kCallAsyncModuleRejected: + case Builtin::kPromiseCapabilityDefaultReject: + case Builtin::kPromiseCapabilityDefaultResolve: + case Builtin::kPromiseGetCapabilitiesExecutor: + case Builtin::kPromiseAllResolveElementClosure: + case Builtin::kPromiseAllSettledResolveElementClosure: + case Builtin::kPromiseAllSettledRejectElementClosure: + case Builtin::kPromiseAnyRejectElementClosure: + case Builtin::kPromiseValueThunkFinally: + case Builtin::kPromiseThrowerFinally: + case Builtin::kPromiseCatchFinally: + case Builtin::kPromiseThenFinally: + case Builtin::kProxyRevoke: + return JSBuiltinStateFlag::kCoreJSLazy; + +#if V8_ENABLE_WEBASSEMBLY + // These builtins with JS calling convention are not JS language builtins + // but are allowed to be installed into JSFunctions. + case Builtin::kJSToWasmWrapper: + case Builtin::kJSToJSWrapper: + case Builtin::kJSToJSWrapperInvalidSig: + case Builtin::kWasmPromising: +#if V8_ENABLE_DRUMBRAKE + case Builtin::kGenericJSToWasmInterpreterWrapper: +#endif + case Builtin::kWasmStressSwitch: + return JSBuiltinStateFlag::kJSTrampoline; + + // These are core JS builtins which are instantiated lazily. + case Builtin::kWasmConstructorWrapper: + case Builtin::kWasmMethodWrapper: + case Builtin::kWasmResume: + case Builtin::kWasmReject: + // Well known import functions. + case Builtin::kWebAssemblyStringCast: + case Builtin::kWebAssemblyStringTest: + case Builtin::kWebAssemblyStringFromWtf16Array: + case Builtin::kWebAssemblyStringFromUtf8Array: + case Builtin::kWebAssemblyStringIntoUtf8Array: + case Builtin::kWebAssemblyStringToUtf8Array: + case Builtin::kWebAssemblyStringToWtf16Array: + case Builtin::kWebAssemblyStringFromCharCode: + case Builtin::kWebAssemblyStringFromCodePoint: + case Builtin::kWebAssemblyStringCodePointAt: + case Builtin::kWebAssemblyStringCharCodeAt: + case Builtin::kWebAssemblyStringLength: + case Builtin::kWebAssemblyStringMeasureUtf8: + case Builtin::kWebAssemblyStringConcat: + case Builtin::kWebAssemblyStringSubstring: + case Builtin::kWebAssemblyStringEquals: + case Builtin::kWebAssemblyStringCompare: + case Builtin::kWebAssemblyConfigureAllPrototypes: + return JSBuiltinStateFlag::kCoreJSLazy; +#endif // V8_ENABLE_WEBASSEMBLY + +#ifdef V8_INTL_SUPPORT + // Some Intl builtins are lazily instantiated. + case Builtin::kCollatorInternalCompare: + case Builtin::kDateTimeFormatInternalFormat: + case Builtin::kNumberFormatInternalFormatNumber: + case Builtin::kV8BreakIteratorInternalAdoptText: + case Builtin::kV8BreakIteratorInternalBreakType: + case Builtin::kV8BreakIteratorInternalCurrent: + case Builtin::kV8BreakIteratorInternalFirst: + case Builtin::kV8BreakIteratorInternalNext: + return JSBuiltinStateFlag::kCoreJSLazy; + + // --harmony_remove_intl_locale_info_getters + case Builtin::kLocalePrototypeCalendars: + case Builtin::kLocalePrototypeCollations: + case Builtin::kLocalePrototypeHourCycles: + case Builtin::kLocalePrototypeNumberingSystems: + case Builtin::kLocalePrototypeTextInfo: + case Builtin::kLocalePrototypeTimeZones: + case Builtin::kLocalePrototypeWeekInfo: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE( + !v8_flags.harmony_remove_intl_locale_info_getters); + +#endif // V8_INTL_SUPPORT + +#ifdef V8_TEMPORAL_SUPPORT +#define CASE(Name, ...) case Builtin::k##Name: + BUILTIN_LIST_TEMPORAL(CASE, CASE) // CPP, TFJ +#undef CASE + RETURN_FLAG_DEPENDENT_LAZY_BUILTIN_STATE(v8_flags.harmony_temporal); +#endif // V8_TEMPORAL_SUPPORT + + // + // Various feature-dependent builtins. + // + +#if V8_ENABLE_WEBASSEMBLY + case Builtin::kWebAssemblyFunctionPrototypeBind: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE( + wasm::WasmEnabledFeatures::FromFlags().has_type_reflection()); +#endif // V8_ENABLE_WEBASSEMBLY + + // --enable-experimental-regexp-engine + case Builtin::kRegExpPrototypeLinearGetter: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE( + v8_flags.enable_experimental_regexp_engine); + + // --js-source-phase-imports + case Builtin::kIllegalInvocationThrower: + case Builtin::kAbstractModuleSourceToStringTag: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_source_phase_imports); + + // --harmony-shadow-realm + case Builtin::kShadowRealmConstructor: + case Builtin::kShadowRealmPrototypeEvaluate: + case Builtin::kShadowRealmPrototypeImportValue: + case Builtin::kShadowRealmImportValueRejected: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.harmony_shadow_realm); + case Builtin::kShadowRealmImportValueFulfilled: + RETURN_FLAG_DEPENDENT_LAZY_BUILTIN_STATE(v8_flags.harmony_shadow_realm); + + // --harmony-struct + case Builtin::kSharedSpaceJSObjectHasInstance: + case Builtin::kSharedStructTypeConstructor: + case Builtin::kSharedStructTypeIsSharedStruct: + case Builtin::kSharedArrayConstructor: + case Builtin::kSharedArrayIsSharedArray: + case Builtin::kAtomicsMutexConstructor: + case Builtin::kAtomicsMutexLock: + case Builtin::kAtomicsMutexLockWithTimeout: + case Builtin::kAtomicsMutexTryLock: + case Builtin::kAtomicsMutexIsMutex: + case Builtin::kAtomicsMutexLockAsync: + case Builtin::kAtomicsConditionConstructor: + case Builtin::kAtomicsConditionWait: + case Builtin::kAtomicsConditionNotify: + case Builtin::kAtomicsConditionIsCondition: + case Builtin::kAtomicsConditionWaitAsync: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.harmony_struct); + case Builtin::kSharedStructConstructor: + case Builtin::kAtomicsMutexAsyncUnlockResolveHandler: + case Builtin::kAtomicsMutexAsyncUnlockRejectHandler: + case Builtin::kAtomicsConditionAcquireLock: + RETURN_FLAG_DEPENDENT_LAZY_BUILTIN_STATE(v8_flags.harmony_struct); + + // --js-promise-try + case Builtin::kPromiseTry: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_promise_try); + + // --js-atomics-pause + case Builtin::kAtomicsPause: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_atomics_pause); + + // --js-error-iserror + case Builtin::kErrorIsError: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_error_iserror); + + // --js-regexp-escape + case Builtin::kRegExpEscape: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_regexp_escape); + + // --js-explicit-resource-management + case Builtin::kSuppressedErrorConstructor: + case Builtin::kDisposableStackConstructor: + case Builtin::kDisposableStackPrototypeUse: + case Builtin::kDisposableStackPrototypeDispose: + case Builtin::kDisposableStackPrototypeAdopt: + case Builtin::kDisposableStackPrototypeDefer: + case Builtin::kDisposableStackPrototypeMove: + case Builtin::kDisposableStackPrototypeGetDisposed: + case Builtin::kAsyncDisposableStackConstructor: + case Builtin::kAsyncDisposableStackPrototypeUse: + case Builtin::kAsyncDisposableStackPrototypeDisposeAsync: + case Builtin::kAsyncDisposableStackPrototypeAdopt: + case Builtin::kAsyncDisposableStackPrototypeDefer: + case Builtin::kAsyncDisposableStackPrototypeMove: + case Builtin::kAsyncDisposableStackPrototypeGetDisposed: + case Builtin::kIteratorPrototypeDispose: + case Builtin::kAsyncIteratorPrototypeAsyncDispose: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE( + v8_flags.js_explicit_resource_management); + + // --js-float16array + case Builtin::kMathF16round: + case Builtin::kDataViewPrototypeGetFloat16: + case Builtin::kDataViewPrototypeSetFloat16: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_float16array); + + // --js-base-64 + case Builtin::kUint8ArrayFromBase64: + case Builtin::kUint8ArrayFromHex: + case Builtin::kUint8ArrayPrototypeToBase64: + case Builtin::kUint8ArrayPrototypeSetFromBase64: + case Builtin::kUint8ArrayPrototypeToHex: + case Builtin::kUint8ArrayPrototypeSetFromHex: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_base_64); + + // --js-upsert + case Builtin::kMapPrototypeGetOrInsert: + case Builtin::kMapPrototypeGetOrInsertComputed: + case Builtin::kWeakMapPrototypeGetOrInsert: + case Builtin::kWeakMapPrototypeGetOrInsertComputed: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_upsert); + +#ifdef V8_INTL_SUPPORT + // --js-intl-locale-variants + case Builtin::kLocalePrototypeVariants: + RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.js_intl_locale_variants); +#endif // V8_INTL_SUPPORT + + default: { + // Treat all other JS builtins as mandatory core JS language builtins. + // This will allow us to detect optional builtins (because mandatory JS + // builtins must be installed somewhere by default) and we allowlist only + // them in this switch. + return HasJSLinkage(builtin) ? JSBuiltinStateFlag::kCoreJSMandatory + : JSBuiltinStateFlag::kDisabledNonJSBuiltin; + } + } + UNREACHABLE(); + +#undef CHECK_FEATURE_FLAG_IS_CONSISTENT +#undef RETURN_FLAG_DEPENDENT_BUILTIN_STATE +#undef RETURN_FLAG_DEPENDENT_LAZY_BUILTIN_STATE +} + +#ifdef DEBUG + +void Builtins::VerifyGetJSBuiltinState(bool allow_non_initial_state) { + CombinedHeapObjectIterator iterator(isolate_->heap()); + + // JS builtins installed in JSFunctions. + std::vector used_js_builtins( + static_cast(Builtins::kBuiltinCount)); + bool js_functions_exist = false; + + // Step 1: iterate the heap and record builtins that are installed in + // JSFunctions. + for (Tagged obj = iterator.Next(); !obj.is_null(); + obj = iterator.Next()) { + if (IsAnyHole(obj)) continue; + + Tagged func; + if (!TryCast(obj, &func)) continue; + js_functions_exist = true; + + Builtin builtin = func->code(isolate_)->builtin_id(); + size_t builtin_id = static_cast(builtin); + if (builtin_id < Builtins::kBuiltinCount) { + used_js_builtins[builtin_id] = true; + } + } + if (!js_functions_exist) { + // If there are no JSFunctions in the heap then the isolate instance + // must have not been initialized yet and thus checking builtins usages + // doesn't make sense. + return; + } + + // Step 2: make sure the results match the GetJSBuiltinState() predicate. + size_t bad_builtins_count = 0; + for (Builtin i = Builtins::kFirst; i <= Builtins::kLast; ++i) { + JSBuiltinStateFlags state = GetJSBuiltinState(i); + + bool is_enabled = state & JSBuiltinStateFlag::kEnabled; + bool is_flag_dependent = state & JSBuiltinStateFlag::kFlagDependent; + bool is_lazy = state & JSBuiltinStateFlag::kLazy; + bool is_core_JS = state & JSBuiltinStateFlag::kCoreJS; + bool has_JS_linkage = !(state & JSBuiltinStateFlag::kNonJSLinkage); + + // Some sanity checks. + CHECK_IMPLIES(is_core_JS, is_enabled && !is_flag_dependent); + + const char* error = nullptr; // No errors yet. + bool used = used_js_builtins[static_cast(i)]; + + if (has_JS_linkage != HasJSLinkage(i)) { + if (has_JS_linkage) { + error = "non-JS builtin doesn't have kNonJSLinkage flag"; + } else { + // Incorrectly set kNonJSLinkage flag. + error = "JS builtin has kNonJSLinkage flag"; + } + + } else if (is_enabled && !has_JS_linkage) { + // Builtins with non-JS linkage are not allowed to be installed into + // JSFunctions. Maybe the builtin was incorrectly attributed? + error = "builtin with non-JS linkage must be disabled"; + + } else if (used) { + // The builtin is installed into some JSFunction. + if (!is_enabled) { + // Only enabled builtins are allowed to be used. Possible reasons: + // - is this builtin belongs to a feature behind a flag? + // - is this a core JS language builtin which is supposed to be + // instantiated lazily? + error = "using disabled builtin"; + + } else if (is_lazy && !allow_non_initial_state) { + // This check is triggered by %VerifyGetJSBuiltinState(false); call + // and it expects and verifies that none of lazy functions are + // instantiated. Possible reasons it fails: + // - is the builtin was marked as lazy by mistake? + // - is %VerifyGetJSBuiltinState(false); called after instantiations + // of lazy builtins was triggered by the user code? + error = "unexpected usage of lazy builtin"; + } + + } else if (is_enabled && !allow_non_initial_state) { + // If builtin is not used and the builtins state is expected to be in + // initial state (i.e. user code hasn't monkey-patched things) then + // we could perform some additional checks. + + if (is_flag_dependent && !is_lazy) { + // This builtin might have been marked as enabled by mistake: + // - is the feature flag used correct? + // - is this builtin instantiated lazily? + error = "non-lazy optional builtin is not used while it was enabled"; + + } else if (is_core_JS && !is_lazy) { + // This builtin might have been marked as mandatory JS language feature + // by mistake: + // - is this builtin instantiated lazily? + // - is the builtin a new JS trampoline? + error = "mandatory JS builtin is not used while it was enabled"; + } + } + + if (error) { + if (bad_builtins_count == 0) { + PrintF( + "#\n# Builtins::GetJSBuiltinState() predicate is wrong for " + "the following builtins:\n#\n"); + } + bad_builtins_count++; + PrintF(" case Builtin::k%s: // %s\n", Builtins::name(i), error); + } + } + // If you see this check failing then you must have added a builtin with + // JS calling convention that's not installed into any JSFunction with + // the default set of V8 flags. + // The reasons might be: + // a) the builtin has JS calling convention but it's not supposed to be + // installed into any JSFunction (for example, ConstructFunction or + // various continuation builtins), + // b) the builtin is supposed to be installed into JSFunction but it belongs + // to an experimental, incomplete or otherwise disabled feature + // controlled by a certain runtime flag (for example, + // ShadowRealmConstructor), + // c) the builtin is supposed to be installed into JSFunction lazily and + // it belongs to an experimental, incomplete or otherwise disabled feature + // controlled by a certain runtime flag (for example, + // SharedStructConstructor), + // d) the builtin belongs to a JavaScript language feature but respective + // JSFunction instances are created lazily (for example, Temporal + // builtins). + // e) the builtin belongs a core V8 machinery (such as CompileLazy, + // HandleApiCallOrConstruct or similar). + // + // To fix this you should make Builtins::GetJSBuiltinState() return the + // following value for the builtin depending on the case mentioned above: + // a) return JSBuiltinStateFlag::kDisabledJSBuiltin; + // b) RETURN_FLAG_DEPENDENT_BUILTIN_STATE(v8_flags.the_feature_flag); + // c) RETURN_FLAG_DEPENDENT_LAZY_BUILTIN_STATE(v8_flags.the_feature_flag); + // d) return JSBuiltinStateFlag::kCoreJSLazy; + // e) return JSBuiltinStateFlag::kJSTrampoline; + // + // If you are adding builtins for experimental or otherwise disabled feature + // make sure you add a regression test for that flag too. + // For example, see: + // - test/mjsunit/harmony/shadowrealm-builtins.js + // - test/mjsunit/shared-memory/builtins.js + // + // This check might also fail for mandatory builtins if the JS code deleted + // a mandatory builtin function. If that's what the test is expected to do + // then disable the verification: --no-verify-get-js-builtin-state. + CHECK_WITH_MSG(bad_builtins_count == 0, + "Builtins::GetJSBuiltinState() predicate requires updating"); +} + +#endif // DEBUG + Builtin ExampleBuiltinForTorqueFunctionPointerType( size_t function_pointer_type_id) { switch (function_pointer_type_id) { diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index 71fbc8a9401e2b..e06627d9308808 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -308,7 +308,8 @@ class Builtins { V8_EXPORT_PRIVATE Tagged code(Builtin builtin); V8_EXPORT_PRIVATE Handle code_handle(Builtin builtin); - static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin); + V8_EXPORT_PRIVATE static CallInterfaceDescriptor CallInterfaceDescriptorFor( + Builtin builtin); V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Builtin builtin); V8_EXPORT_PRIVATE static bool HasJSLinkage(Builtin builtin); @@ -475,6 +476,94 @@ class Builtins { // Public for ia32-specific helper. enum class ForwardWhichFrame { kCurrentFrame, kParentFrame }; + // Flags describing properties of a builtin with JS calling convention. + // Builtins with non-JS linkage have kDisabledNonJSBuiltin flags. + // + // This machinery is used by V8 Sandbox to ensure that builtins that belong + // to disabled features can't be made callable (i.e. can't be installed + // into a JSFunction). + enum class JSBuiltinStateFlag { + kNone = 0, + + // Builtin is allowed to be installed as Code object into a JSFunction: + // - it has a JS calling convention, + // - it's an entry point builtin and not an arbitrary helper builtin that + // has JS calling convention for convenience. + kEnabled = 1 << 0, + + // Builtin represents some core V8 functionality which might or might not + // be currently used (for example, CompileLazy). + kCoreV8 = 1 << 1, + + // The builtin belongs to a JS language feature that's always available, + // i.e. the builtin must be installed in some JSFunction by default. + // For example, ArrayPrototypePush or StringPrototypeConcat. + kCoreJS = 1 << 2, + + // The builtin belongs to a feature that's enabled by a runtime flag, + // unlike core JS language builtins which are always available. + kFlagDependent = 1 << 3, + + // The builtin might belong either to core JS language feature or to + // to a feature behind runtime flag but in either case it's is installed + // to JSFunctions lazily. This means that there might be no JSFunction + // instances with this builtin in the heap even though the corresponding + // JS feature is enabled. + // For example, V8 lazily creates JSFunctions with + // AsyncGeneratorAwaitResolveClosure and AsyncGeneratorAwaitRejectClosure + // builtins as a part of AsyncGeneratorAwait logic, see + // AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(). + kLazy = 1 << 4, + + // Builtin has non-JS linkage. State of this flag must be equivalent to + // (!HasJSLinkage(..)) predicate. + kNonJSLinkage = 1 << 5, + + // Common combinations. + kJSTrampoline = kEnabled | kCoreV8, + kCoreJSMandatory = kEnabled | kCoreJS, + kCoreJSLazy = kEnabled | kCoreJS | kLazy, + + kDisabledJSBuiltin = kNone, + kDisabledNonJSBuiltin = kNonJSLinkage, + kEnabledFlagDependent = kEnabled | kFlagDependent, + kDisabledFlagDependent = kFlagDependent, + kEnabledFlagDependentLazy = kEnabled | kFlagDependent | kLazy, + kDisabledFlagDependentLazy = kFlagDependent | kLazy, + }; + using JSBuiltinStateFlags = base::Flags; + static JSBuiltinStateFlags GetJSBuiltinState(Builtin builtin); + + // Returns true for disabled builtins with JS linkage (all non-JS builtins + // are considered enabled). + static bool IsDisabled(Builtin builtin) { + auto flags = Builtins::GetJSBuiltinState(builtin); + DCHECK_EQ(Builtins::HasJSLinkage(builtin), + !(flags & Builtins::JSBuiltinStateFlag::kNonJSLinkage)); + if (!(flags & Builtins::JSBuiltinStateFlag::kNonJSLinkage) && + !(flags & Builtins::JSBuiltinStateFlag::kEnabled)) { + return true; + } + return false; + } + +#ifdef DEBUG + // Verify correctness of GetJSBuiltinState() which has to be maintained + // manually. The idea is to iterate the heap to figure out which builtins + // with JS calling convention are never installed into any JSFunction and + // make sure that GetJSBuiltinState() works properly for such builtins. + // + // |allow_non_initial_state| defines whether the strict verification is + // not applicable because user code might have changed the heap state. + // For example, user code could have + // - deleted mandatory builtin: delete String.prototype.concat; + // - triggered instantiation of some lazy builtins. + // + // In any case, even in this mode there must be no usages of disabled + // builtins. + void VerifyGetJSBuiltinState(bool allow_non_initial_state); +#endif + private: static void Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode); @@ -556,6 +645,8 @@ class Builtins { friend class SetupIsolateDelegate; }; +DEFINE_OPERATORS_FOR_FLAGS(Builtins::JSBuiltinStateFlags) + V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) { // Check for kNoBuiltinId first to abort early when the current // InstructionStream object is not a builtin. diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 6a762e9dd88a8f..483a6570fd2d2e 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -3180,6 +3180,9 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ JumpIfSmi(tmp, &allocate_vector); // Vector exists. Finish setting up the stack frame. + // Increment the total invocation count of the function. + __ add(FieldOperand(tmp, OFFSET_OF_DATA_START(FixedArray)), + Immediate(Smi::FromInt(1))); __ Push(tmp); // Feedback vector. __ mov(tmp, instance_data_slot); // Calling PC. __ Push(tmp); @@ -3229,6 +3232,9 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { // [ WASM_LIFTOFF_SETUP ] [ WASM ] <-- marker_slot // [ saved ebp ] [ saved ebp ] __ mov(marker_slot, Immediate(StackFrame::TypeToMarker(StackFrame::WASM))); + // Increment the total invocation count of the function. + __ add(FieldOperand(tmp, OFFSET_OF_DATA_START(FixedArray)), + Immediate(Smi::FromInt(1))); __ Push(tmp); // Feedback vector. __ mov(tmp, instance_data_slot); // Calling PC. __ Push(tmp); @@ -3939,6 +3945,46 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ ret(0); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont); + __ Push(kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(6, edi); + __ Move(Operand(esp, 0 * kSystemPointerSize), + Immediate(ExternalReference::isolate_address())); + __ mov(MemOperand(esp, 1 * kSystemPointerSize), esp); + __ mov(MemOperand(esp, 2 * kSystemPointerSize), ebp); + __ LoadLabelAddress(ecx, &resume); + __ mov(MemOperand(esp, 3 * kSystemPointerSize), ecx); + __ mov(MemOperand(esp, 4 * kSystemPointerSize), tag); + __ mov(MemOperand(esp, 5 * kSystemPointerSize), cont); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = edi; + __ mov(target_stack, kReturnRegister0); + __ Pop(kContextRegister); + cont = kReturnRegister0; + __ Pop(cont); + + Label ok; + __ cmp(target_stack, Immediate(0)); + __ j(not_equal, &ok); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + LoadJumpBuffer(masm, target_stack, true); + __ Trap(); + __ bind(&resume); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ ret(0); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { Register active_stack = ecx; __ LoadRootRelative(active_stack, IsolateData::active_stack_offset()); diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index a7812458307e79..a9458219c1ec1b 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -112,11 +112,29 @@ struct ForOfNextResult { extern transitioning macro ForOfNextHelper(Context, Object, Object): ForOfNextResult; -transitioning builtin ForOfNextBaseline( +transitioning builtin ForOfNext( context: Context, iterator: JSAny, next: JSFunction): ForOfNextResult { return ForOfNextHelper(context, iterator, next); } +transitioning builtin ForOfNextBaseline( + context: Context, iterator: JSAny, next: JSFunction, + resultSlot: RawPtr): Undefined { + const result: ForOfNextResult = ForOfNext(context, iterator, next); + + // The result slot address points to the first interpreter register slot. + const firstResultSlot = resultSlot; + // The second interpreter register slot is one word before, since stacks grow + // toward zero. + const secondResultSlot = + resultSlot + torque_internal::TimesSizeOf(-1); + + StoreFullTaggedNoWriteBarrier(firstResultSlot, result.value); + StoreFullTaggedNoWriteBarrier(secondResultSlot, result.done); + + return Undefined; +} + @export transitioning macro GetIteratorRecordAfterCreateAsyncFromSyncIterator( context: Context, asyncIterator: IteratorRecord): IteratorRecord { diff --git a/deps/v8/src/builtins/js-to-wasm.tq b/deps/v8/src/builtins/js-to-wasm.tq index 47c7203ed959c6..646658fb7e9809 100644 --- a/deps/v8/src/builtins/js-to-wasm.tq +++ b/deps/v8/src/builtins/js-to-wasm.tq @@ -529,9 +529,10 @@ macro JSToWasmWrapperHelper( } } - const sig = functionData.sig; + const internalFunction = functionData.internal; + const sig = internalFunction.sig; const implicitArg: WasmImportData|WasmTrustedInstanceData = - functionData.internal.implicit_arg; + internalFunction.implicit_arg; const paramCount = *GetRefAt(sig, kWasmParameterCountOffset); diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index 421491c185bb67..fe4e9daf445a1b 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -3000,6 +3000,18 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + + // Increment the total invocation count of the function. + __ LoadTaggedField(scratch, + FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + if (SmiValuesAre31Bits()) { + __ Add_w(scratch, scratch, Operand(Smi::FromInt(1))); + } else { + __ Add_d(scratch, scratch, Operand(Smi::FromInt(1))); + } + __ StoreTaggedField( + scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + __ Push(vector); __ Ret(); @@ -3562,6 +3574,47 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont, kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + DCHECK_NE(kCArgRegs[4], cont); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ PrepareCallCFunction(6, scratch); + } + __ Move(kCArgRegs[4], tag); + __ Move(kCArgRegs[5], cont); + __ li(kCArgRegs[0], ExternalReference::isolate_address()); + __ Move(kCArgRegs[1], sp); + __ Move(kCArgRegs[2], fp); + __ LoadLabelRelative(kCArgRegs[3], &resume); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = a1; + __ Move(target_stack, kReturnRegister0); + cont = kReturnRegister0; + __ Pop(cont, kContextRegister); + + Label ok; + __ Branch(&ok, ne, target_stack, Operand(zero_reg)); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + DCHECK_EQ(cont, kReturnRegister0); + LoadJumpBuffer(masm, target_stack, true, a3); + __ Trap(); + __ bind(&resume); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ Ret(); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { Register active_stack = a0; __ LoadRootRelative(active_stack, IsolateData::active_stack_offset()); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 207b3283a51f29..93d525be2a9a49 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -2892,6 +2892,16 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Ld(vector, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + + // Increment the total invocation count of the function. + __ Ld(scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + if (SmiValuesAre31Bits()) { + __ Addu(scratch, scratch, Operand(Smi::FromInt(1))); + } else { + __ Daddu(scratch, scratch, Operand(Smi::FromInt(1))); + } + __ Sd(scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + __ Push(vector); __ Ret(); @@ -3076,6 +3086,8 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) { void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { __ Trap(); } + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { __ Trap(); } void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/number-builtins-reducer-inl.h b/deps/v8/src/builtins/number-builtins-reducer-inl.h index 6acc62fed58a1f..994d4df4f817ca 100644 --- a/deps/v8/src/builtins/number-builtins-reducer-inl.h +++ b/deps/v8/src/builtins/number-builtins-reducer-inl.h @@ -45,8 +45,10 @@ class NumberBuiltinsReducer : public Next { // Feedback has been set already in `TaggedToWord32OrBigIntImpl`. TSA_DCHECK(this, __ FeedbackHas(BinaryOperationFeedback::kBigInt)); } - GOTO(done, __ CallRuntime_BigIntUnaryOp(isolate_, context, bigint_value, - ::Operation::kBitwiseNot)); + GOTO(done, __ template CallRuntime( + context, {.x = bigint_value, + .opcode = __ SmiConstant( + Smi::FromEnum(::Operation::kBitwiseNot))})); } BIND(done, result); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index e13608c59a1919..fe0856261e423a 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -3082,6 +3082,13 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { scratch); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + // Increment the total invocation count of the function. + __ LoadTaggedField( + scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray)), r0); + __ Move(r0, Smi::FromInt(1)); + __ AddS64(scratch, scratch, r0); + __ StoreTaggedField( + scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ push(kWasmImplicitArgRegister); __ push(vector); __ Ret(); @@ -3614,6 +3621,43 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ blr(); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont, kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(6, r0); + __ Move(kCArgRegs[4], tag); + __ Move(kCArgRegs[5], cont); + __ Move(kCArgRegs[0], ExternalReference::isolate_address()); + __ Move(kCArgRegs[1], sp); + __ Move(kCArgRegs[2], fp); + __ GetLabelAddress(kCArgRegs[3], &resume); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = r4; + __ Move(target_stack, kReturnRegister0); + cont = kReturnRegister0; + __ Pop(cont, kContextRegister); + + Label ok; + __ CmpU64(target_stack, Operand(0), r0); + __ bne(&ok); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + DCHECK_EQ(cont, kReturnRegister0); + LoadJumpBuffer(masm, target_stack, true, r6); + __ Trap(); + __ bind(&resume); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ Ret(); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { Register active_stack = r3; __ LoadRootRelative(active_stack, IsolateData::active_stack_offset()); diff --git a/deps/v8/src/builtins/riscv/builtins-riscv.cc b/deps/v8/src/builtins/riscv/builtins-riscv.cc index 2213858ea58e8d..be6ada8a428624 100644 --- a/deps/v8/src/builtins/riscv/builtins-riscv.cc +++ b/deps/v8/src/builtins/riscv/builtins-riscv.cc @@ -3269,6 +3269,18 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + + // Increment the total invocation count of the function. + __ LoadTaggedField(scratch, + FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + if (SmiValuesAre31Bits()) { + __ Add32(scratch, scratch, Operand(Smi::FromInt(1))); + } else { + __ AddWord(scratch, scratch, Operand(Smi::FromInt(1))); + } + __ StoreTaggedField( + scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + __ Push(vector); __ Ret(); @@ -3380,7 +3392,7 @@ void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, Register argc_input, Register argv_input) { using ER = ExternalReference; - __ li(kSwitchFlagRegister, 0); + __ mv(kSwitchFlagRegister, zero_reg); __ mv(kOldSPRegister, sp); // Using x2-x4 as temporary registers, because they will be rewritten @@ -4096,6 +4108,44 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + auto regs = RegisterAllocator::WithAllocatableGeneralRegisters(); + DEFINE_REG(scratch); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont, kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(6, scratch); + __ Move(kCArgRegs[4], tag); + __ Move(kCArgRegs[5], cont); + __ li(kCArgRegs[0], ExternalReference::isolate_address()); + __ Move(kCArgRegs[1], sp); + __ Move(kCArgRegs[2], fp); + __ LoadAddress(kCArgRegs[3], &resume); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = kReturnRegister1; + __ Move(target_stack, kReturnRegister0); + cont = kReturnRegister0; + __ Pop(cont, kContextRegister); + + Label ok; + __ Branch(&ok, ne, target_stack, Operand(zero_reg)); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + DCHECK_EQ(cont, kReturnRegister0); + LoadJumpBuffer(masm, target_stack, true, scratch); + __ Trap(); + __ bind(&resume); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ Ret(); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { auto regs = RegisterAllocator::WithAllocatableGeneralRegisters(); DEFINE_PINNED(active_stack, a0); @@ -4834,7 +4884,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ SubWord(sp, sp, kNumberOfRegisters * kSystemPointerSize); for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { if ((saved_regs.bits() & (1 << i)) != 0) { - __ StoreWord(ToRegister(i), MemOperand(sp, kSystemPointerSize * i)); + __ StoreWord(Register::from_code(i), + MemOperand(sp, kSystemPointerSize * i)); } } @@ -4991,7 +5042,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, int offset = (i * kSystemPointerSize) + FrameDescription::registers_offset(); if ((restored_regs.bits() & (1 << i)) != 0) { - __ LoadWord(ToRegister(i), MemOperand(t3, offset)); + __ LoadWord(Register::from_code(i), MemOperand(t3, offset)); } } diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index c7c42ccb95261a..5bf84f63b6bed8 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -3096,6 +3096,13 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + // Increment the total invocation count of the function. + __ LoadTaggedField(scratch, + FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); + __ Move(r1, Smi::FromInt(1)); + __ AddS64(scratch, scratch, r1); + __ StoreTaggedField( + scratch, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray))); __ push(kWasmImplicitArgRegister); __ push(vector); __ Ret(); @@ -3616,6 +3623,44 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont, kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(6, r0); + __ StoreU64(cont, MemOperand(sp, kStackFrameExtraParamSlot * + kSystemPointerSize)); // arg 5 + __ Move(kCArgRegs[4], tag); + __ Move(kCArgRegs[0], ExternalReference::isolate_address()); + __ Move(kCArgRegs[1], sp); + __ Move(kCArgRegs[2], fp); + __ GetLabelAddress(kCArgRegs[3], &resume); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = r3; + __ Move(target_stack, kReturnRegister0); + cont = kReturnRegister0; + __ Pop(cont, kContextRegister); + + Label ok; + __ CmpU64(target_stack, Operand(0)); + __ bne(&ok); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + DCHECK_EQ(cont, kReturnRegister0); + LoadJumpBuffer(masm, target_stack, true, r5); + __ Trap(); + __ bind(&resume); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ Ret(); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { Register active_stack = r2; __ LoadRootRelative(active_stack, IsolateData::active_stack_offset()); diff --git a/deps/v8/src/builtins/string-match-search.tq b/deps/v8/src/builtins/string-match-search.tq index 54983581bc857d..b86b9e2660c1ab 100644 --- a/deps/v8/src/builtins/string-match-search.tq +++ b/deps/v8/src/builtins/string-match-search.tq @@ -46,10 +46,10 @@ transitioning macro StringMatchSearch( return functor.CallFast(UnsafeCast(heapRegexp), string); } label Slow deferred { - // 2. If regexp is neither undefined nor null, then - if (regexp != Undefined && regexp != Null) { + // 2. If regexp is an Object, then + if (Is(regexp)) { try { - // a. Let fn be ? GetMethod(regexp, @@match/@@search). + // a. Let fn be ? GetMethod(regexp, %Symbol.match%/%Symbol.search%). // b. If fn is not undefined, then const fn = GetMethod(regexp, functor.FnSymbol()) otherwise FnSymbolIsNullOrUndefined; @@ -64,7 +64,7 @@ transitioning macro StringMatchSearch( // 4. Let rx be ? RegExpCreate(regexp, undefined). const rx = regexp::RegExpCreate(context, regexp, kEmptyString); - // 5. Return ? Invoke(rx, @@match/@@search, « string »). + // 5. Return ? Invoke(rx, %Symbol.match%/%Symbol.search%, « string »). const fn = GetProperty(rx, functor.FnSymbol()); return Call(context, fn, rx, string); } diff --git a/deps/v8/src/builtins/string-replaceall.tq b/deps/v8/src/builtins/string-replaceall.tq index 7aca3b06f469cb..250aa88cdc788b 100644 --- a/deps/v8/src/builtins/string-replaceall.tq +++ b/deps/v8/src/builtins/string-replaceall.tq @@ -38,8 +38,8 @@ transitioning javascript builtin StringPrototypeReplaceAll( // 1. Let O be ? RequireObjectCoercible(this value). RequireObjectCoercible(receiver, 'String.prototype.replaceAll'); - // 2. If searchValue is neither undefined nor null, then - if (searchValue != Undefined && searchValue != Null) { + // 2. If searchValue is an Object, then + if (Is(searchValue)) { // a. Let isRegExp be ? IsRegExp(searchString). // b. If isRegExp is true, then // i. Let flags be ? Get(searchValue, "flags"). @@ -52,7 +52,7 @@ transitioning javascript builtin StringPrototypeReplaceAll( // TODO(joshualitt): We could easily add fast paths for string // searchValues and potential FastRegExps. - // c. Let replacer be ? GetMethod(searchValue, @@replace). + // c. Let replacer be ? GetMethod(searchValue, %Symbol.replace%). // d. If replacer is not undefined, then // i. Return ? Call(replacer, searchValue, « O, replaceValue »). try { diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index a16d9e5ec5d675..fca963da6699fd 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -33,6 +33,7 @@ extern runtime WasmThrow(Context, Object, FixedArray): JSAny; extern runtime WasmReThrow(Context, Object): JSAny; extern runtime WasmTriggerTierUp(Context, WasmTrustedInstanceData): JSAny; extern runtime WasmStackGuard(Context, Smi): JSAny; +extern runtime WasmStackGuardLoop(Context): JSAny; extern runtime ThrowWasmStackOverflow(Context): JSAny; extern runtime WasmTraceGlobal(NoContext, Smi): JSAny; extern runtime WasmTraceMemory(NoContext, Smi): JSAny; @@ -88,6 +89,7 @@ extern runtime AllocateInSharedHeap(Context, Smi, Smi): HeapObject; extern runtime WasmConfigureAllPrototypesOpt( Context, Object, JSAny, WasmTrustedInstanceData): Object; +extern runtime WasmTypeAssertionFailed(NoContext): never; } extern operator '.wasm_exported_function_data' macro @@ -488,6 +490,10 @@ builtin WasmStackGuard(): JSAny { tail runtime::WasmStackGuard(LoadContextFromFrame(), SmiConstant(0)); } +builtin WasmStackGuardLoop(): JSAny { + tail runtime::WasmStackGuardLoop(LoadContextFromFrame()); +} + builtin WasmStackOverflow(): JSAny { tail runtime::ThrowWasmStackOverflow(LoadContextFromFrame()); } @@ -902,6 +908,10 @@ builtin ThrowWasmTrapFuncSigMismatch(): JSAny { tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncSigMismatch)); } +builtin ThrowWasmTrapNullFunc(): JSAny { + tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapNullFunc)); +} + builtin ThrowWasmTrapDataSegmentOutOfBounds(): JSAny { tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapDataSegmentOutOfBounds)); } @@ -939,6 +949,14 @@ builtin ThrowWasmTrapStringOffsetOutOfBounds(): JSAny { tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapStringOffsetOutOfBounds)); } +builtin WasmTypeAssertionFailed(): JSAny { + runtime::WasmTypeAssertionFailed(kNoContext); +} + +builtin ThrowWasmTrapResume(): JSAny { + tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapResume)); +} + macro GetRefAt(base: From, offset: intptr): &T { return torque_internal::unsafe::NewOffHeapReference( %RawDownCast>(base + offset)); diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index dd0a2b1c6acb26..888ce4f7f8df92 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -5,11 +5,9 @@ #if V8_TARGET_ARCH_X64 #include "src/api/api-arguments.h" -#include "src/base/bits-iterator.h" #include "src/base/iterator.h" #include "src/builtins/builtins-descriptors.h" #include "src/builtins/builtins-inl.h" -#include "src/codegen/code-factory.h" #include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. #include "src/codegen/macro-assembler-inl.h" @@ -21,10 +19,8 @@ #include "src/execution/frames.h" #include "src/heap/heap-inl.h" #include "src/logging/counters.h" -#include "src/objects/cell.h" #include "src/objects/code.h" #include "src/objects/debug-objects.h" -#include "src/objects/foreign.h" #include "src/objects/heap-number.h" #include "src/objects/js-generator.h" #include "src/objects/objects-inl.h" @@ -3309,6 +3305,9 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Label allocate_vector, done; __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); + // Increment the total invocation count of the function. + __ SmiAddConstant(FieldOperand(vector, OFFSET_OF_DATA_START(FixedArray)), + Smi::FromInt(1)); __ Push(kWasmImplicitArgRegister); __ Push(vector); __ Push(calling_pc); @@ -3450,15 +3449,16 @@ void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_stack) { // Updates the stack limit and central stack info, and validates the switch. void SwitchStacks(MacroAssembler* masm, ExternalReference fn, - Register old_stack, Label* saved_pc, Register maybe_suspender, + Register target_stack, Label* saved_pc, + Register maybe_suspender, const std::initializer_list keep) { for (auto reg : keep) { __ Push(reg); } { FrameScope scope(masm, StackFrame::MANUAL); - DCHECK(old_stack.is_valid()); - __ Move(kCArgRegs[1], old_stack); + DCHECK(target_stack.is_valid()); + __ Move(kCArgRegs[1], target_stack); bool is_return = fn == ExternalReference::wasm_return_stack(); DCHECK_IMPLIES(is_return, maybe_suspender == no_reg); int num_args = is_return ? 2 : maybe_suspender.is_valid() ? 6 : 5; @@ -4044,6 +4044,50 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ ret(0); } +void Builtins::Generate_WasmFXSuspend(MacroAssembler* masm) { + __ EnterFrame(StackFrame::WASM_STACK_EXIT); + Register tag = WasmFXSuspendDescriptor::GetRegisterParameter(0); + Register cont = WasmFXSuspendDescriptor::GetRegisterParameter(1); + Label resume; + __ Push(cont); + __ Push(kContextRegister); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(6); +#ifdef V8_TARGET_OS_WIN + __ movq(MemOperand(rsp, 4 * kSystemPointerSize), tag); + __ movq(MemOperand(rsp, 5 * kSystemPointerSize), cont); +#else + DCHECK_NE(kCArgRegs[4], cont); + __ Move(kCArgRegs[4], tag); + __ Move(kCArgRegs[5], cont); +#endif + __ Move(kCArgRegs[0], ExternalReference::isolate_address()); + __ Move(kCArgRegs[1], rsp); + __ Move(kCArgRegs[2], rbp); + __ leaq(kCArgRegs[3], MemOperand(&resume, 0)); + __ CallCFunction(ExternalReference::wasm_suspend_wasmfx_stack(), 6); + } + Register target_stack = rbx; + __ Move(target_stack, kReturnRegister0); + __ Pop(kContextRegister); + cont = kReturnRegister0; + __ Pop(cont); + + Label ok; + __ JumpIf(not_equal, target_stack, 0, &ok); + // No handler found. + __ CallRuntime(Runtime::kThrowWasmSuspendError); + + __ bind(&ok); + LoadJumpBuffer(masm, target_stack, true); + __ Trap(); + __ bind(&resume); + __ endbr64(); + __ LeaveFrame(StackFrame::WASM_STACK_EXIT); + __ ret(0); +} + void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { Register active_stack = rax; __ LoadRootRelative(active_stack, IsolateData::active_stack_offset()); diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 0c76db99fd8174..cd2cfa8badbb31 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -253,9 +253,14 @@ bool RelocInfo::IsCodedSpecially() { bool RelocInfo::IsInConstantPool() { Instruction* instr = reinterpret_cast(pc_); - DCHECK_IMPLIES(instr->IsLdrLiteralW(), COMPRESS_POINTERS_BOOL); - return instr->IsLdrLiteralX() || - (COMPRESS_POINTERS_BOOL && instr->IsLdrLiteralW()); + if (instr->IsLdrLiteralX()) return true; + if (!instr->IsLdrLiteralW()) return false; +#ifdef DEBUG + uint32_t value = *reinterpret_cast(instr->ImmPCOffsetTarget()); + DCHECK(COMPRESS_POINTERS_BOOL || + JSDispatchTable::MaybeValidJSDispatchHandle(value)); +#endif // DEBUG + return true; } uint32_t RelocInfo::wasm_call_tag() const { @@ -4963,7 +4968,7 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump, return; } - DCHECK(pc_offset() < unresolved_branches_first_limit()); + CHECK_LT(pc_offset(), unresolved_branches_first_limit()); // Some short sequence of instruction mustn't be broken up by veneer pool // emission, such sequences are protected by calls to BlockVeneerPoolFor and diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h index 3d84c916f6f8fa..7fd9b9d990c144 100644 --- a/deps/v8/src/codegen/arm64/instructions-arm64.h +++ b/deps/v8/src/codegen/arm64/instructions-arm64.h @@ -527,7 +527,6 @@ class Instruction { void SetBranchImmTarget(Instruction* target, WritableJitAllocation* jit_allocation = nullptr) { DCHECK(IsAligned(DistanceTo(target), kInstrSize)); - DCHECK(IsValidImmPCOffset(branch_type, DistanceTo(target))); int offset = static_cast(DistanceTo(target) >> kInstrSizeLog2); Instr branch_imm = 0; uint32_t imm_mask = 0; diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 8e1cc864aa97b8..551fdfbce8dfb0 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -2670,25 +2670,19 @@ void MacroAssembler::JumpJSFunction(Register function_object, void MacroAssembler::ResolveWasmCodePointer(Register target, uint64_t signature_hash) { ASM_CODE_COMMENT(this); - ExternalReference global_jump_table = - ExternalReference::wasm_code_pointer_table(); UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); - Mov(scratch, global_jump_table); + Mov(scratch, ExternalReference::wasm_code_pointer_table()); #ifdef V8_ENABLE_SANDBOX - // Mask `target` to be within [0, WasmCodePointerTable::kMaxWasmCodePointers). - static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers < - (kMaxUInt32 / sizeof(wasm::WasmCodePointerTableEntry))); - static_assert(base::bits::IsPowerOfTwo( - wasm::WasmCodePointerTable::kMaxWasmCodePointers)); - And(target.W(), target.W(), - wasm::WasmCodePointerTable::kMaxWasmCodePointers - 1); - - // Shift to multiply by `sizeof(WasmCodePointerTableEntry)`. - Add(target, scratch, - Operand(target, LSL, - base::bits::WhichPowerOfTwo( - sizeof(wasm::WasmCodePointerTableEntry)))); + static constexpr int kNumRelevantBits = base::bits::WhichPowerOfTwo( + wasm::WasmCodePointerTable::kMaxWasmCodePointers); + static constexpr int kLeftShift = + base::bits::WhichPowerOfTwo(sizeof(wasm::WasmCodePointerTableEntry)); + + // Keep `kNumRelevantBits` bits, shifted by `kLeftShift`. + Ubfiz(target.W(), target.W(), kLeftShift, kNumRelevantBits); + + Add(target, scratch, target); Ldr(scratch, MemOperand(target, wasm::WasmCodePointerTable::kOffsetOfSignatureHash)); @@ -2723,27 +2717,20 @@ void MacroAssembler::CallWasmCodePointer(Register target, } void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) { - ExternalReference global_jump_table = - ExternalReference::wasm_code_pointer_table(); + ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); - Mov(scratch, global_jump_table); - - // Mask `target` to be within [0, WasmCodePointerTable::kMaxWasmCodePointers). - static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers < - (kMaxUInt32 / sizeof(wasm::WasmCodePointerTableEntry))); - static_assert(base::bits::IsPowerOfTwo( - wasm::WasmCodePointerTable::kMaxWasmCodePointers)); - And(target.W(), target.W(), - wasm::WasmCodePointerTable::kMaxWasmCodePointers - 1); - - // Shift to multiply by `sizeof(WasmCodePointerTableEntry)`. - Add(target, scratch, - Operand(target, LSL, - base::bits::WhichPowerOfTwo( - sizeof(wasm::WasmCodePointerTableEntry)))); + Mov(scratch, ExternalReference::wasm_code_pointer_table()); - Ldr(target, MemOperand(target)); + static constexpr int kNumRelevantBits = base::bits::WhichPowerOfTwo( + wasm::WasmCodePointerTable::kMaxWasmCodePointers); + static constexpr int kLeftShift = + base::bits::WhichPowerOfTwo(sizeof(wasm::WasmCodePointerTableEntry)); + + // Keep `kNumRelevantBits` bits, shifted by `kLeftShift`. + Ubfiz(target.W(), target.W(), kLeftShift, kNumRelevantBits); + + Ldr(target, MemOperand(scratch, target)); Call(target); } @@ -2978,13 +2965,11 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count, Bind(®ular_invoke); } -void MacroAssembler::CallDebugOnFunctionCall( - Register fun, Register new_target, - Register expected_parameter_count_or_dispatch_handle, - Register actual_parameter_count) { +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + Register dispatch_handle, + Register actual_parameter_count) { ASM_CODE_COMMENT(this); - DCHECK(!AreAliased(x5, fun, new_target, - expected_parameter_count_or_dispatch_handle, + DCHECK(!AreAliased(x5, fun, new_target, dispatch_handle, actual_parameter_count)); // Load receiver to pass it later to DebugOnFunctionCall hook. Peek(x5, ReceiverOperand()); @@ -2994,18 +2979,17 @@ void MacroAssembler::CallDebugOnFunctionCall( if (!new_target.is_valid()) new_target = padreg; // Save values on stack. - SmiTag(expected_parameter_count_or_dispatch_handle); + // We must not Smi-tag the dispatch handle, because its top bits are + // meaningful; and we also don't need to, because its low bits are zero. + static_assert(kJSDispatchHandleShift >= 1); SmiTag(actual_parameter_count); - Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count, - new_target, fun); + Push(dispatch_handle, actual_parameter_count, new_target, fun); Push(fun, x5); CallRuntime(Runtime::kDebugOnFunctionCall); // Restore values from stack. - Pop(fun, new_target, actual_parameter_count, - expected_parameter_count_or_dispatch_handle); + Pop(fun, new_target, actual_parameter_count, dispatch_handle); SmiUntag(actual_parameter_count); - SmiUntag(expected_parameter_count_or_dispatch_handle); } #ifdef V8_ENABLE_LEAPTIERING diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index 227714b15d3b04..e6761ad670da24 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -359,8 +359,11 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { bool is_constant_pool_available() const { if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { // We need to disable constant pool here for embeded builtins - // because the metadata section is not adjacent to instructions - return constant_pool_available_ && !options().isolate_independent_code; + // because the metadata section is not adjacent to instructions. + // We also need to disable it on Wasm as the constant pool register is not + // yet handled during stack switching. + return constant_pool_available_ && !options().isolate_independent_code && + !options().is_wasm; } else { // Embedded constant pool not supported on this architecture. UNREACHABLE(); diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h index 462b49fc21c281..44831813ecbae0 100644 --- a/deps/v8/src/codegen/bailout-reason.h +++ b/deps/v8/src/codegen/bailout-reason.h @@ -122,7 +122,8 @@ namespace internal { V(kFastCallFallbackInvalid, "Fast call fallback returned incorrect type") \ V(k32BitValueInRegisterIsNotSignExtended, \ "32 bit value in register is not sign-extended") \ - V(kUnexpectedSandboxMode, "The sandboxing mode is not as expected") + V(kUnexpectedSandboxMode, "The sandboxing mode is not as expected") \ + V(kLastReason, "") #define TERMINAL_BAILOUT_MESSAGES_LIST(V) \ V(kFunctionTooBig, "Function is too big to be optimized") \ diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index fceedaa5400340..6a533de4906298 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -2690,11 +2690,10 @@ TNode CodeStubAssembler::LoadNameHashAssumeComputed(TNode name) { TNode CodeStubAssembler::LoadNameHash(TNode name, Label* if_hash_not_computed) { + DCHECK_NOT_NULL(if_hash_not_computed); TNode raw_hash_field = LoadNameRawHashField(name); - if (if_hash_not_computed != nullptr) { - GotoIf(IsSetWord32(raw_hash_field, Name::kHashNotComputedMask), - if_hash_not_computed); - } + GotoIf(IsSetWord32(raw_hash_field, Name::kHashNotComputedMask), + if_hash_not_computed); return DecodeWord32(raw_hash_field); } @@ -7950,6 +7949,7 @@ TNode CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() { return TaggedEqual(cell_value, invalid); } +// TODO(petamoriken): delete this unused function TNode CodeStubAssembler::IsNumberStringNotRegexpLikeProtectorCellInvalid() { TNode invalid = SmiConstant(Protectors::kProtectorInvalid); @@ -11672,7 +11672,8 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( var_value = CallGetterIfAccessor( value_or_accessor, object, var_details.value(), context, - object, next_key, &slow_load, kCallJSGetterUseCachedName); + object, kExpectingJSReceiver, next_key, &slow_load, + kCallJSGetterUseCachedName); Goto(&value_ready); BIND(&slow_load); @@ -12168,15 +12169,11 @@ template void CodeStubAssembler::LoadPropertyFromDictionary( TNode dictionary, TNode name_index, TVariable* var_details, TVariable* var_value); -// |value| is the property backing store's contents, which is either a value or -// an accessor pair, as specified by |details|. |holder| is a JSReceiver or a -// PropertyCell. Returns either the original value, or the result of the getter -// call. TNode CodeStubAssembler::CallGetterIfAccessor( - TNode value, TNode> holder, + TNode value, std::optional> holder, TNode details, TNode context, TNode receiver, - TNode name, Label* if_bailout, GetOwnPropertyMode mode, - ExpectedReceiverMode expected_receiver_mode) { + ExpectedReceiverMode expected_receiver_mode, TNode name, + Label* if_bailout, GetOwnPropertyMode mode) { TVARIABLE(Object, var_value, value); Label done(this), if_accessor_info(this, Label::kDeferred); @@ -12217,44 +12214,51 @@ TNode CodeStubAssembler::CallGetterIfAccessor( BIND(&if_function_template_info); { - Label use_cached_property(this); - TNode cached_property_name = LoadObjectField( - getter, FunctionTemplateInfo::kCachedPropertyNameOffset); - - Label* has_cached_property = mode == kCallJSGetterUseCachedName - ? &use_cached_property - : if_bailout; - GotoIfNot(IsTheHole(cached_property_name), has_cached_property); - - TNode js_receiver; - switch (expected_receiver_mode) { - case kExpectingJSReceiver: - js_receiver = CAST(receiver); - break; - case kExpectingAnyReceiver: - // TODO(ishell): in case the function template info has a signature - // and receiver is not a JSReceiver the signature check in - // CallFunctionTemplate builtin will fail anyway, so we can short - // cut it here and throw kIllegalInvocation immediately. - js_receiver = ToObject_Inline(context, receiver); - break; - } - TNode holder_receiver = CAST(holder); - TNode creation_context = - GetCreationContext(holder_receiver, if_bailout); - TNode caller_context = context; - var_value = CallBuiltin( - Builtin::kCallFunctionTemplate_Generic, creation_context, getter, - Int32Constant(i::JSParameterCount(0)), caller_context, js_receiver); - Goto(&done); + if (holder.has_value()) { + Label use_cached_property(this); + TNode cached_property_name = LoadObjectField( + getter, FunctionTemplateInfo::kCachedPropertyNameOffset); + + Label* has_cached_property = mode == kCallJSGetterUseCachedName + ? &use_cached_property + : if_bailout; + GotoIfNot(IsTheHole(cached_property_name), has_cached_property); + + TNode js_receiver; + switch (expected_receiver_mode) { + case kExpectingJSReceiver: + js_receiver = CAST(receiver); + break; + case kExpectingAnyReceiver: + // TODO(ishell): in case the function template info has a + // signature and receiver is not a JSReceiver the signature check + // in CallFunctionTemplate builtin will fail anyway, so we can + // short cut it here and throw kIllegalInvocation immediately. + js_receiver = ToObject_Inline(context, receiver); + break; + } + TNode holder_receiver = *holder; + TNode creation_context = + GetCreationContext(holder_receiver, if_bailout); + TNode caller_context = context; + var_value = CallBuiltin(Builtin::kCallFunctionTemplate_Generic, + creation_context, getter, + Int32Constant(i::JSParameterCount(0)), + caller_context, js_receiver); + Goto(&done); - if (mode == kCallJSGetterUseCachedName) { - Bind(&use_cached_property); + if (mode == kCallJSGetterUseCachedName) { + Bind(&use_cached_property); - var_value = - GetProperty(context, holder_receiver, cached_property_name); + var_value = + GetProperty(context, holder_receiver, cached_property_name); - Goto(&done); + Goto(&done); + } + } else { + // |holder| must be available in order to handle lazy AccessorPair + // case (we need it for computing the function's context). + Unreachable(); } } } else { @@ -12266,56 +12270,61 @@ TNode CodeStubAssembler::CallGetterIfAccessor( // AccessorInfo case. BIND(&if_accessor_info); { - TNode accessor_info = CAST(value); - Label if_array(this), if_function(this), if_wrapper(this); - - // Dispatch based on {holder} instance type. - TNode holder_map = LoadMap(holder); - TNode holder_instance_type = LoadMapInstanceType(holder_map); - GotoIf(IsJSArrayInstanceType(holder_instance_type), &if_array); - GotoIf(IsJSFunctionInstanceType(holder_instance_type), &if_function); - Branch(IsJSPrimitiveWrapperInstanceType(holder_instance_type), &if_wrapper, - if_bailout); - - // JSArray AccessorInfo case. - BIND(&if_array); - { - // We only deal with the "length" accessor on JSArray. - GotoIfNot(IsLengthString( - LoadObjectField(accessor_info, AccessorInfo::kNameOffset)), - if_bailout); - TNode array = CAST(holder); - var_value = LoadJSArrayLength(array); - Goto(&done); - } - - // JSFunction AccessorInfo case. - BIND(&if_function); - { - // We only deal with the "prototype" accessor on JSFunction here. - GotoIfNot(IsPrototypeString( - LoadObjectField(accessor_info, AccessorInfo::kNameOffset)), - if_bailout); + if (holder.has_value()) { + TNode accessor_info = CAST(value); + Label if_array(this), if_function(this), if_wrapper(this); + // Dispatch based on {holder} instance type. + TNode holder_map = LoadMap(*holder); + TNode holder_instance_type = LoadMapInstanceType(holder_map); + GotoIf(IsJSArrayInstanceType(holder_instance_type), &if_array); + GotoIf(IsJSFunctionInstanceType(holder_instance_type), &if_function); + Branch(IsJSPrimitiveWrapperInstanceType(holder_instance_type), + &if_wrapper, if_bailout); + + // JSArray AccessorInfo case. + BIND(&if_array); + { + // We only deal with the "length" accessor on JSArray. + GotoIfNot(IsLengthString(LoadObjectField(accessor_info, + AccessorInfo::kNameOffset)), + if_bailout); + TNode array = CAST(*holder); + var_value = LoadJSArrayLength(array); + Goto(&done); + } - TNode function = CAST(holder); - GotoIfPrototypeRequiresRuntimeLookup(function, holder_map, if_bailout); - var_value = LoadJSFunctionPrototype(function, if_bailout); - Goto(&done); - } + // JSFunction AccessorInfo case. + BIND(&if_function); + { + // We only deal with the "prototype" accessor on JSFunction here. + GotoIfNot(IsPrototypeString(LoadObjectField(accessor_info, + AccessorInfo::kNameOffset)), + if_bailout); + + TNode function = CAST(*holder); + GotoIfPrototypeRequiresRuntimeLookup(function, holder_map, if_bailout); + var_value = LoadJSFunctionPrototype(function, if_bailout); + Goto(&done); + } - // JSPrimitiveWrapper AccessorInfo case. - BIND(&if_wrapper); - { - // We only deal with the "length" accessor on JSPrimitiveWrapper string - // wrappers. - GotoIfNot(IsLengthString( - LoadObjectField(accessor_info, AccessorInfo::kNameOffset)), - if_bailout); - TNode holder_value = LoadJSPrimitiveWrapperValue(CAST(holder)); - GotoIfNot(TaggedIsNotSmi(holder_value), if_bailout); - GotoIfNot(IsString(CAST(holder_value)), if_bailout); - var_value = LoadStringLengthAsSmi(CAST(holder_value)); - Goto(&done); + // JSPrimitiveWrapper AccessorInfo case. + BIND(&if_wrapper); + { + // We only deal with the "length" accessor on JSPrimitiveWrapper string + // wrappers. + GotoIfNot(IsLengthString(LoadObjectField(accessor_info, + AccessorInfo::kNameOffset)), + if_bailout); + TNode holder_value = LoadJSPrimitiveWrapperValue(CAST(*holder)); + GotoIfNot(TaggedIsNotSmi(holder_value), if_bailout); + GotoIfNot(IsString(CAST(holder_value)), if_bailout); + var_value = LoadStringLengthAsSmi(CAST(holder_value)); + Goto(&done); + } + } else { + // |holder| must be available in order to handle AccessorInfo case (we + // need to pass it to the callback). + Unreachable(); } } @@ -12400,7 +12409,7 @@ void CodeStubAssembler::TryGetOwnProperty( } TNode value = CallGetterIfAccessor( var_value->value(), object, var_details->value(), context, receiver, - unique_name, if_bailout, mode, expected_receiver_mode); + expected_receiver_mode, unique_name, if_bailout, mode); *var_value = value; Goto(if_found_value); } @@ -17394,7 +17403,9 @@ TNode CodeStubAssembler::GetResultValueForHole(TNode value) { std::pair, TNode> CodeStubAssembler::CallIteratorNext( TNode iterator, TNode next_method, TNode context) { Label callable(this), not_callable(this, Label::kDeferred); - Branch(IsCallable(CAST(next_method)), &callable, ¬_callable); + GotoIf(TaggedIsSmi(next_method), ¬_callable); + Branch(IsCallable(UncheckedCast(next_method)), &callable, + ¬_callable); BIND(¬_callable); { CallRuntime(Runtime::kThrowCalledNonCallable, context, next_method); @@ -17439,6 +17450,9 @@ ForOfNextResult CodeStubAssembler::ForOfNextHelper(TNode context, TNode is_array_iterator = IsJSArrayIterator(CAST(object)); GotoIfNot(is_array_iterator, &slow_path); + // Check that the array iterator prototype chain is intact. + GotoIf(IsArrayIteratorProtectorCellInvalid(), &slow_path); + // Fast path for JSArrayIterator. { TNode array_iterator = CAST(object); @@ -18180,7 +18194,8 @@ TNode CodeStubAssembler::NeedsAnyPromiseHooks(TNode flags) { } TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { - CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount))); + CSA_SBXCHECK(this, + SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount))); TNode offset = ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS); @@ -18250,6 +18265,7 @@ void CodeStubAssembler::LoadSharedFunctionInfoTrustedDataAndDispatch( #endif } +// LINT.IfChange(GetSharedFunctionInfoCode) TNode CodeStubAssembler::GetSharedFunctionInfoCode( TNode shared_info, TVariable* data_type_out, Label* if_compile_lazy) { @@ -18396,6 +18412,7 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( BIND(&done); return sfi_code.value(); } +// LINT.ThenChange(/src/objects/shared-function-info.cc:GetSharedFunctionInfoCode) TNode CodeStubAssembler::LoadCodeInstructionStart( TNode code, CodeEntrypointTag tag) { diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 0ff1bd3f53ae80..00e7b9ea56fdb5 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -1461,10 +1461,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsDictionaryMap(TNode map); // Load the Name::hash() value of a name as an uint32 value. - // If {if_hash_not_computed} label is specified then it also checks if - // hash is actually computed. - TNode LoadNameHash(TNode name, - Label* if_hash_not_computed = nullptr); + // The caller is responsible to deal with hashes that are not computed via + // |if_hash_not_computed|. If the hash is guaranteed to be computed, use + // LoadNameHashAssumeComputed instead. + TNode LoadNameHash(TNode name, Label* if_hash_not_computed); + // Load the Name::hash() value of a name as uint32 value. Use only if the hash + // is guaranteed to be computed, otherwise use LoadNameHash and handle the + // non-computed case manually. TNode LoadNameHashAssumeComputed(TNode name); // Load the Name::RawHash() value of a name as an uint32 value. Follows @@ -4643,12 +4646,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler const ForEachKeyValueFunction& body, Label* bailout); + // |value| is the property backing store's contents, which is either a value + // or an accessor pair, as specified by |details|. |holder| is a JSReceiver + // or empty std::nullopt if holder is not available. + // Returns either the original value, or the result of the getter call. TNode CallGetterIfAccessor( - TNode value, TNode> holder, + TNode value, std::optional> holder, TNode details, TNode context, TNode receiver, - TNode name, Label* if_bailout, - GetOwnPropertyMode mode = kCallJSGetterDontUseCachedName, - ExpectedReceiverMode expected_receiver_mode = kExpectingJSReceiver); + ExpectedReceiverMode expected_receiver_mode, TNode name, + Label* if_bailout, + GetOwnPropertyMode mode = kCallJSGetterDontUseCachedName); TNode TryToIntptr(TNode key, Label* if_not_intptr, TVariable* var_instance_type = nullptr); diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index a22093e1c011de..e594c4ca179ccc 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -54,6 +54,8 @@ #include "src/objects/feedback-cell-inl.h" #include "src/objects/js-function-inl.h" #include "src/objects/js-function.h" +#include "src/objects/literal-objects-inl.h" +#include "src/objects/literal-objects.h" #include "src/objects/map.h" #include "src/objects/object-list-macros.h" #include "src/objects/objects-body-descriptors-inl.h" @@ -957,12 +959,12 @@ bool FinalizeDeferredUnoptimizedCompilationJobs( } // A wrapper to access the optimized code cache slots on the feedback vector. -class OptimizedCodeCache : public AllStatic { +class OptimizedOSRCodeCache : public AllStatic { public: static V8_WARN_UNUSED_RESULT MaybeHandle Get( Isolate* isolate, DirectHandle function, BytecodeOffset osr_offset, CodeKind code_kind) { - DCHECK_IMPLIES(V8_ENABLE_LEAPTIERING_BOOL, IsOSR(osr_offset)); + CHECK(IsOSR(osr_offset)); if (!CodeKindIsStoredInOptimizedCodeCache(code_kind)) return {}; if (!function->has_feedback_vector()) return {}; @@ -972,38 +974,22 @@ class OptimizedCodeCache : public AllStatic { Tagged code; Tagged feedback_vector = function->feedback_vector(); - if (IsOSR(osr_offset)) { - Handle bytecode(shared->GetBytecodeArray(isolate), - isolate); - interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt()); - // Bytecode may be different, so make sure we're at a valid OSR entry. - SBXCHECK(it.CurrentBytecodeIsValidOSREntry()); - std::optional> maybe_code = - feedback_vector->GetOptimizedOsrCode(isolate, bytecode, - it.GetSlotOperand(2)); - if (maybe_code.has_value()) code = maybe_code.value(); - } else { -#ifdef V8_ENABLE_LEAPTIERING - UNREACHABLE(); -#else - feedback_vector->EvictOptimizedCodeMarkedForDeoptimization( - isolate, shared, "OptimizedCodeCache::Get"); - code = feedback_vector->optimized_code(isolate); -#endif // V8_ENABLE_LEAPTIERING - } - // Normal tierup should never request a code-kind we already have. In case - // of OSR it can happen that we OSR from ignition to turbofan. This is - // explicitly allowed here by reusing any larger-kinded than requested - // code. - DCHECK_IMPLIES(!code.is_null() && code->kind() > code_kind, - IsOSR(osr_offset)); + Handle bytecode(shared->GetBytecodeArray(isolate), isolate); + interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt()); + // Bytecode may be different, so make sure we're at a valid OSR entry. + SBXCHECK(it.CurrentBytecodeIsValidOSREntry()); + std::optional> maybe_code = + feedback_vector->GetOptimizedOsrCode(isolate, bytecode, + it.GetSlotOperand(2)); + if (maybe_code.has_value()) code = maybe_code.value(); + if (code.is_null() || code->kind() < code_kind) return {}; DCHECK(!code->marked_for_deoptimization()); DCHECK(shared->is_compiled()); DCHECK(CodeKindIsStoredInOptimizedCodeCache(code->kind())); - DCHECK_IMPLIES(IsOSR(osr_offset), CodeKindCanOSR(code->kind())); + DCHECK(CodeKindCanOSR(code->kind())); CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset, code_kind); @@ -1013,44 +999,20 @@ class OptimizedCodeCache : public AllStatic { static void Insert(Isolate* isolate, Tagged function, BytecodeOffset osr_offset, Tagged code, bool is_function_context_specializing) { - DCHECK_IMPLIES(V8_ENABLE_LEAPTIERING_BOOL, IsOSR(osr_offset)); + CHECK(IsOSR(osr_offset)); const CodeKind kind = code->kind(); if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return; Tagged feedback_vector = function->feedback_vector(); - if (IsOSR(osr_offset)) { - DCHECK(CodeKindCanOSR(kind)); - DCHECK(!is_function_context_specializing); - Tagged shared = function->shared(); - Handle bytecode(shared->GetBytecodeArray(isolate), - isolate); - interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt()); - // Bytecode may be different, so make sure we're at a valid OSR entry. - SBXCHECK(it.CurrentBytecodeIsValidOSREntry()); - feedback_vector->SetOptimizedOsrCode(isolate, it.GetSlotOperand(2), code); - return; - } - -#ifdef V8_ENABLE_LEAPTIERING - UNREACHABLE(); -#else - DCHECK(!IsOSR(osr_offset)); - - if (is_function_context_specializing) { - // Function context specialization folds-in the function context, so no - // sharing can occur. Make sure the optimized code cache is cleared. - // Only do so if the specialized code's kind matches the cached code kind. - if (feedback_vector->has_optimized_code() && - feedback_vector->optimized_code(isolate)->kind() == code->kind()) { - feedback_vector->ClearOptimizedCode(); - } - return; - } - - function->shared()->set_function_context_independent_compiled(true); - feedback_vector->SetOptimizedCode(isolate, code); -#endif // V8_ENABLE_LEAPTIERING + DCHECK(CodeKindCanOSR(kind)); + DCHECK(!is_function_context_specializing); + Tagged shared = function->shared(); + Handle bytecode(shared->GetBytecodeArray(isolate), isolate); + interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt()); + // Bytecode may be different, so make sure we're at a valid OSR entry. + SBXCHECK(it.CurrentBytecodeIsValidOSREntry()); + feedback_vector->SetOptimizedOsrCode(isolate, it.GetSlotOperand(2), code); } }; @@ -1102,8 +1064,8 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate, // Success! job->RecordCompilationStats(ConcurrencyMode::kSynchronous, isolate); DCHECK(!isolate->has_exception()); - if (!V8_ENABLE_LEAPTIERING_BOOL || job->compilation_info()->is_osr()) { - OptimizedCodeCache::Insert( + if (job->compilation_info()->is_osr()) { + OptimizedOSRCodeCache::Insert( isolate, *compilation_info->closure(), compilation_info->osr_offset(), *compilation_info->code(), compilation_info->function_context_specializing()); @@ -1353,8 +1315,7 @@ MaybeHandle GetOrCompileOptimized( // Always reset the OSR urgency to ensure we reset it on function entry. int invocation_count = function->feedback_vector()->invocation_count(kRelaxedLoad); - if (!(V8_UNLIKELY(v8_flags.testing_d8_test_runner || - v8_flags.allow_natives_syntax) && + if (!(V8_UNLIKELY(v8_flags.allow_natives_syntax) && ManualOptimizationTable::IsMarkedForManualOptimization(isolate, *function)) && invocation_count < v8_flags.minimum_invocations_before_optimization) { @@ -1380,11 +1341,10 @@ MaybeHandle GetOrCompileOptimized( // turbo_filter. if (!ShouldOptimize(code_kind, shared)) return {}; - if (!V8_ENABLE_LEAPTIERING_BOOL || IsOSR(osr_offset)) { + if (IsOSR(osr_offset)) { Handle cached_code; - if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind) + if (OptimizedOSRCodeCache::Get(isolate, function, osr_offset, code_kind) .ToHandle(&cached_code)) { - DCHECK_IMPLIES(!IsOSR(osr_offset), cached_code->kind() <= code_kind); return cached_code; } @@ -1834,6 +1794,8 @@ class MergeAssumptionChecker final : public ObjectVisitor { if (IsSharedFunctionInfo(obj)) { CHECK((current_object_kind_ == kConstantPool && !is_weak) || (current_object_kind_ == kScriptInfosList && is_weak) || + (current_object_kind_ == kObjectBoilerplateDescription && + !is_weak) || (IsScript(host) && current.address() == host.address() + @@ -1850,6 +1812,11 @@ class MergeAssumptionChecker final : public ObjectVisitor { // Constant pools can contain nested fixed arrays, which in turn can // point to SFIs. QueueVisit(obj, kConstantPool); + } else if (IsObjectBoilerplateDescription(obj) && + current_object_kind_ == kConstantPool) { + // Constant pools can contain ObjectBoilerplates, which in turn can + // point to SFIs. + QueueVisit(obj, kObjectBoilerplateDescription); } QueueVisit(obj, kNormalObject); @@ -1878,6 +1845,7 @@ class MergeAssumptionChecker final : public ObjectVisitor { kNormalObject, kConstantPool, kScriptInfosList, + kObjectBoilerplateDescription }; // If the object hasn't yet been added to the worklist, add it. Subsequent @@ -2209,6 +2177,22 @@ class ConstantPoolPointerForwarder { } else if (!scope_infos_to_update_.empty() && IsScopeInfo(heap_obj, cage_base_)) { VisitScopeInfo(constant_pool, i, Cast(heap_obj)); + } else if (IsObjectBoilerplateDescription(heap_obj, cage_base_)) { + VisitObjectBoilerplateDescription( + Cast(heap_obj)); + } + } + + void VisitObjectBoilerplateDescription( + Tagged boilerplate) { + for (int idx = 0; idx < boilerplate->boilerplate_properties_count(); + ++idx) { + // there is an SFI at entry "idx" + if (Tagged new_sfi; + TryCast(boilerplate->value(idx), &new_sfi)) { + // The same SFI on the old script by function_literal_id + VisitSharedFunctionInfo(boilerplate, idx, new_sfi); + } } } @@ -3161,7 +3145,7 @@ void Compiler::CompileOptimized(Isolate* isolate, } #ifdef DEBUG - if (V8_ENABLE_LEAPTIERING_BOOL && mode == ConcurrencyMode::kConcurrent) { + if (mode == ConcurrencyMode::kConcurrent) { DCHECK_IMPLIES(code_kind == CodeKind::MAGLEV, !function->ActiveTierIsMaglev(isolate)); DCHECK_IMPLIES(code_kind == CodeKind::TURBOFAN_JS, @@ -3178,7 +3162,6 @@ void Compiler::CompileOptimized(Isolate* isolate, DCHECK_IMPLIES(v8_flags.log_function_events, function->IsLoggingRequested(isolate)); } else { -#ifdef V8_ENABLE_LEAPTIERING // We can get here from CompileLazy when we have requested optimized code // which isn't yet ready. Without Leaptiering, we'll already have set the // function's code to the bytecode/baseline code on the SFI. However, in the @@ -3187,7 +3170,6 @@ void Compiler::CompileOptimized(Isolate* isolate, function->UpdateCodeKeepTieringRequests( isolate, function->shared()->GetCode(isolate)); } -#endif // V8_ENABLE_LEAPTIERING } #ifdef DEBUG @@ -4413,8 +4395,8 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job, if (V8_LIKELY(use_result)) { function->SetTieringInProgress(isolate, false, job->compilation_info()->osr_offset()); - if (!V8_ENABLE_LEAPTIERING_BOOL || IsOSR(osr_offset)) { - OptimizedCodeCache::Insert( + if (IsOSR(osr_offset)) { + OptimizedOSRCodeCache::Insert( isolate, *compilation_info->closure(), compilation_info->osr_offset(), *compilation_info->code(), compilation_info->function_context_specializing()); @@ -4494,9 +4476,9 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job, } DCHECK(code->is_maglevved()); - if (!V8_ENABLE_LEAPTIERING_BOOL || IsOSR(osr_offset)) { - OptimizedCodeCache::Insert(isolate, *function, osr_offset, *code, - job->specialize_to_function_context()); + if (IsOSR(osr_offset)) { + OptimizedOSRCodeCache::Insert(isolate, *function, osr_offset, *code, + job->specialize_to_function_context()); } RecordMaglevFunctionCompilation(isolate, function, @@ -4531,24 +4513,6 @@ void Compiler::PostInstantiation(Isolate* isolate, // are just creating a new closure that shares the same feedback cell. JSFunction::InitializeFeedbackCell(isolate, function, is_compiled_scope, false); - -#ifndef V8_ENABLE_LEAPTIERING - if (function->has_feedback_vector()) { - // Evict any deoptimized code on feedback vector. We need to do this after - // creating the closure, since any heap allocations could trigger a GC and - // deoptimized the code on the feedback vector. So check for any - // deoptimized code just before installing it on the function. - function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization( - isolate, *shared, "new function from shared function info"); - Tagged code = function->feedback_vector()->optimized_code(isolate); - if (!code.is_null()) { - // Caching of optimized code enabled and optimized code found. - DCHECK(!code->marked_for_deoptimization()); - DCHECK(function->shared()->is_compiled()); - function->UpdateOptimizedCode(isolate, code); - } - } -#endif // !V8_ENABLE_LEAPTIERING } if (shared->is_toplevel() || shared->is_wrapped()) { diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc index 8db1c3f45df393..43cc237b43bba1 100644 --- a/deps/v8/src/codegen/external-reference.cc +++ b/deps/v8/src/codegen/external-reference.cc @@ -557,7 +557,7 @@ ExternalPointerHandle AllocateAndInitializeYoungExternalPointerTableEntry( #ifdef V8_ENABLE_SANDBOX return isolate->external_pointer_table().AllocateAndInitializeEntry( isolate->heap()->young_external_pointer_space(), pointer, - kExternalObjectValueTag); + kFastApiExternalTypeTag); #else return 0; #endif // V8_ENABLE_SANDBOX @@ -613,6 +613,7 @@ FUNCTION_REFERENCE(wasm_suspender_has_js_frames, wasm::suspender_has_js_frames) FUNCTION_REFERENCE(wasm_suspend_stack, wasm::suspend_stack) FUNCTION_REFERENCE(wasm_resume_jspi_stack, wasm::resume_jspi_stack) FUNCTION_REFERENCE(wasm_resume_wasmfx_stack, wasm::resume_wasmfx_stack) +FUNCTION_REFERENCE(wasm_suspend_wasmfx_stack, wasm::suspend_wasmfx_stack) FUNCTION_REFERENCE(wasm_return_stack, wasm::return_stack) FUNCTION_REFERENCE(wasm_switch_to_the_central_stack, wasm::switch_to_the_central_stack) @@ -1252,14 +1253,16 @@ FUNCTION_REFERENCE(libc_memset_function, libc_memset) void relaxed_memcpy(volatile base::Atomic8* dest, volatile const base::Atomic8* src, size_t n) { - base::Relaxed_Memcpy(dest, src, n); + base::Relaxed_Memcpy(const_cast(dest), + const_cast(src), n); } FUNCTION_REFERENCE(relaxed_memcpy_function, relaxed_memcpy) void relaxed_memmove(volatile base::Atomic8* dest, volatile const base::Atomic8* src, size_t n) { - base::Relaxed_Memmove(dest, src, n); + base::Relaxed_Memmove(const_cast(dest), + const_cast(src), n); } FUNCTION_REFERENCE(relaxed_memmove_function, relaxed_memmove) diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h index 47b6ff43f1d138..3d0aa54ce95e6d 100644 --- a/deps/v8/src/codegen/external-reference.h +++ b/deps/v8/src/codegen/external-reference.h @@ -260,6 +260,7 @@ enum class IsolateFieldId : uint8_t; IF_WASM(V, wasm_suspend_stack, "wasm_suspend_stack") \ IF_WASM(V, wasm_resume_jspi_stack, "wasm_resume_jspi_stack") \ IF_WASM(V, wasm_resume_wasmfx_stack, "wasm_resume_wasmfx_stack") \ + IF_WASM(V, wasm_suspend_wasmfx_stack, "wasm_suspend_wasmfx_stack") \ IF_WASM(V, wasm_return_stack, "wasm_return_stack") \ IF_WASM(V, wasm_switch_to_the_central_stack, \ "wasm::switch_to_the_central_stack") \ diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index d6c9004c3fc139..b75a864a6545cc 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -2505,9 +2505,10 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, if (argc_operand == nullptr) { DCHECK_NE(slots_to_drop_on_return, 0); - __ ret(slots_to_drop_on_return * kSystemPointerSize); + __ Ret(slots_to_drop_on_return * kSystemPointerSize, scratch); + } else { - __ pop(scratch); + __ PopReturnAddressTo(scratch); // {argc_operand} was loaded into {argc_reg} above. __ lea(esp, Operand(esp, argc_reg, times_system_pointer_size, slots_to_drop_on_return * kSystemPointerSize)); diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h index 2288da3f8445b1..95e55f5f477c28 100644 --- a/deps/v8/src/codegen/interface-descriptors-inl.h +++ b/deps/v8/src/codegen/interface-descriptors-inl.h @@ -815,6 +815,10 @@ constexpr auto WasmToJSWrapperDescriptor::return_double_registers() { constexpr auto WasmFXResumeDescriptor::registers() { return RegisterArray(wasm::kGpParamRegisters[0]); } +constexpr auto WasmFXSuspendDescriptor::registers() { + // Reg 0 is the context register. + return RegisterArray(wasm::kGpParamRegisters[1], wasm::kGpParamRegisters[2]); +} #endif #define DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER(Name, DescriptorName) \ diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h index 1a3da92c686068..041fd47e94b84b 100644 --- a/deps/v8/src/codegen/interface-descriptors.h +++ b/deps/v8/src/codegen/interface-descriptors.h @@ -151,6 +151,7 @@ namespace internal { V(Void) \ IF_WASM(V, WasmAllocateShared) \ IF_WASM(V, WasmFXResume) \ + IF_WASM(V, WasmFXSuspend) \ V(WasmDummy) \ V(WasmFloat32ToNumber) \ V(WasmFloat64ToTagged) \ @@ -918,6 +919,19 @@ class WasmFXResumeDescriptor final static constexpr int kMaxRegisterParams = 1; static constexpr inline auto registers(); }; + +class WasmFXSuspendDescriptor final + : public StaticCallInterfaceDescriptor { + INTERNAL_DESCRIPTOR() + SANDBOXING_MODE(kSandboxed) + DEFINE_RESULT_AND_PARAMETERS(0, kTag, kContinuation) + DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), + MachineType::TaggedPointer()) + DECLARE_DESCRIPTOR(WasmFXSuspendDescriptor) + + static constexpr int kMaxRegisterParams = 2; + static constexpr inline auto registers(); +}; #endif class NewHeapNumberDescriptor diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h index 490721ff2a99fd..7057d911b4f76d 100644 --- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h +++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h @@ -720,14 +720,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void Move(FPURegister dst, uint64_t src); // AddOverflow_d sets overflow register to a negative value if - // overflow occured, otherwise it is zero or positive + // overflow occurred, otherwise it is zero or positive void AddOverflow_d(Register dst, Register left, const Operand& right, Register overflow); // SubOverflow_d sets overflow register to a negative value if - // overflow occured, otherwise it is zero or positive + // overflow occurred, otherwise it is zero or positive void SubOverflow_d(Register dst, Register left, const Operand& right, Register overflow); - // MulOverflow_{w/d} set overflow register to zero if no overflow occured + // MulOverflow_{w/d} set overflow register to zero if no overflow occurred void MulOverflow_w(Register dst, Register left, const Operand& right, Register overflow); void MulOverflow_d(Register dst, Register left, const Operand& right, diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h index 7e04ee0f0e6620..d0edc5dc1cb9e9 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -814,14 +814,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void Move(FPURegister dst, uint64_t src); // DaddOverflow sets overflow register to a negative value if - // overflow occured, otherwise it is zero or positive + // overflow occurred, otherwise it is zero or positive void DaddOverflow(Register dst, Register left, const Operand& right, Register overflow); // DsubOverflow sets overflow register to a negative value if - // overflow occured, otherwise it is zero or positive + // overflow occurred, otherwise it is zero or positive void DsubOverflow(Register dst, Register left, const Operand& right, Register overflow); - // [D]MulOverflow set overflow register to zero if no overflow occured + // [D]MulOverflow set overflow register to zero if no overflow occurred void MulOverflow(Register dst, Register left, const Operand& right, Register overflow); void DMulOverflow(Register dst, Register left, const Operand& right, diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc index 877fe20d571db4..4548e5683c897c 100644 --- a/deps/v8/src/codegen/pending-optimization-table.cc +++ b/deps/v8/src/codegen/pending-optimization-table.cc @@ -16,7 +16,7 @@ namespace internal { void ManualOptimizationTable::MarkFunctionForManualOptimization( Isolate* isolate, DirectHandle function, IsCompiledScope* is_compiled_scope) { - DCHECK(v8_flags.testing_d8_test_runner || v8_flags.allow_natives_syntax); + DCHECK(v8_flags.allow_natives_syntax); DCHECK(is_compiled_scope->is_compiled()); DCHECK(function->has_feedback_vector()); @@ -36,7 +36,7 @@ void ManualOptimizationTable::MarkFunctionForManualOptimization( // sandbox is enabled. So instead, we reference the BytecodeArray's // in-sandbox wrapper object. table = ObjectHashTable::Put( - table, shared_info, + isolate, table, shared_info, direct_handle(shared_info->GetBytecodeArray(isolate)->wrapper(), isolate)); isolate->heap()->SetFunctionsMarkedForManualOptimization(*table); @@ -44,7 +44,7 @@ void ManualOptimizationTable::MarkFunctionForManualOptimization( bool ManualOptimizationTable::IsMarkedForManualOptimization( Isolate* isolate, Tagged function) { - DCHECK(v8_flags.testing_d8_test_runner || v8_flags.allow_natives_syntax); + DCHECK(v8_flags.allow_natives_syntax); DirectHandle table( isolate->heap()->functions_marked_for_manual_optimization(), isolate); diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h index b1794aa551e1b6..ea53f361bea270 100644 --- a/deps/v8/src/codegen/reloc-info.h +++ b/deps/v8/src/codegen/reloc-info.h @@ -166,6 +166,8 @@ class RelocInfo { FIRST_BUILTIN_ENTRY_MODE = OFF_HEAP_TARGET, LAST_BUILTIN_ENTRY_MODE = NEAR_BUILTIN_ENTRY, FIRST_SHAREABLE_RELOC_MODE = WASM_CALL, + FIRST_DEOPT_MODE = DEOPT_SCRIPT_OFFSET, + LAST_DEOPT_MODE = DEOPT_NODE_ID, }; static_assert(NUMBER_OF_MODES <= kBitsPerInt); @@ -222,6 +224,9 @@ class RelocInfo { } static constexpr bool IsConstPool(Mode mode) { return mode == CONST_POOL; } static constexpr bool IsVeneerPool(Mode mode) { return mode == VENEER_POOL; } + static constexpr bool IsDeoptMode(Mode mode) { + return mode >= FIRST_DEOPT_MODE && mode <= LAST_DEOPT_MODE; + } static constexpr bool IsDeoptPosition(Mode mode) { return mode == DEOPT_SCRIPT_OFFSET || mode == DEOPT_INLINING_ID; } diff --git a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h index 60904a6c46c223..f3df7e16129131 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h @@ -54,15 +54,33 @@ namespace internal { [[nodiscard]] static inline Instr SetLo12Offset(int32_t lo12, Instr instr); bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } +EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } void Assembler::CheckBuffer() { - if (buffer_space() <= kGap) { + if (V8_UNLIKELY(buffer_space() <= kGap)) { GrowBuffer(); } } -// ----------------------------------------------------------------------------- -// WritableRelocInfo. +void Assembler::CheckConstantPoolQuick(int margin) { + if (V8_UNLIKELY(pc_offset() >= constpool_.NextCheckIn() - margin)) { + constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin); + } +} + +void Assembler::CheckTrampolinePoolQuick(int margin) { + DEBUG_PRINTF("\tCheckTrampolinePoolQuick pc_offset:%d %d\n", pc_offset(), + trampoline_check_ - margin); + if (V8_UNLIKELY(pc_offset() >= trampoline_check_ - margin)) { + CheckTrampolinePool(); + } +} + +void Assembler::DisassembleInstruction(uint8_t* pc) { + if (V8_UNLIKELY(v8_flags.riscv_debug)) { + DisassembleInstructionHelper(pc); + } +} void WritableRelocInfo::apply(intptr_t delta) { if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { @@ -353,8 +371,6 @@ Address RelocInfo::target_off_heap_target() { return Assembler::target_address_at(pc_, constant_pool_); } -EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } - int32_t Assembler::target_constant32_at(Address pc) { Instruction* instr0 = Instruction::At((unsigned char*)pc); Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize)); diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.cc b/deps/v8/src/codegen/riscv/assembler-riscv.cc index 910a9ad5111e8a..9e282acf0456d0 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/assembler-riscv.cc @@ -86,6 +86,7 @@ static unsigned SimulatorFeatures() { answer |= 1u << ZBB; answer |= 1u << ZBS; answer |= 1u << ZICOND; + answer |= 1u << ZICFISS; answer |= 1u << FPU; return answer; } @@ -143,52 +144,6 @@ void CpuFeatures::PrintFeatures() { CpuFeatures::IsSupported(ZBA), CpuFeatures::IsSupported(ZBB), CpuFeatures::IsSupported(ZBS), CpuFeatures::IsSupported(ZICOND)); } -int ToNumber(Register reg) { - DCHECK(reg.is_valid()); - const int kNumbers[] = { - 0, // zero_reg - 1, // ra - 2, // sp - 3, // gp - 4, // tp - 5, // t0 - 6, // t1 - 7, // t2 - 8, // s0/fp - 9, // s1 - 10, // a0 - 11, // a1 - 12, // a2 - 13, // a3 - 14, // a4 - 15, // a5 - 16, // a6 - 17, // a7 - 18, // s2 - 19, // s3 - 20, // s4 - 21, // s5 - 22, // s6 - 23, // s7 - 24, // s8 - 25, // s9 - 26, // s10 - 27, // s11 - 28, // t3 - 29, // t4 - 30, // t5 - 31, // t6 - }; - return kNumbers[reg.code()]; -} - -Register ToRegister(int num) { - DCHECK(num >= 0 && num < kNumRegisters); - const Register kRegisters[] = { - zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5, - a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6}; - return kRegisters[num]; -} // ----------------------------------------------------------------------------- // Implementation of RelocInfo. @@ -273,10 +228,38 @@ Assembler::Assembler(const AssemblerOptions& options, constpool_(this) { reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); - next_buffer_check_ = v8_flags.force_long_branches - ? kMaxInt - : kMaxBranchOffset - BlockTrampolinePoolScope::kGap; - trampoline_emitted_ = v8_flags.force_long_branches; + trampoline_check_ = v8_flags.force_long_branches + ? kMaxInt + : kMaxBranchOffset - BlockPoolsScope::kGap; + CHECK(!v8_flags.force_long_branches || is_trampoline_emitted()); +} + +void Assembler::StartBlockPools(ConstantPoolEmission cpe, int margin) { + int current = pools_blocked_nesting_; + if (current == 0) { + if (cpe == ConstantPoolEmission::kCheck) { + CheckConstantPoolQuick(margin); + } + CheckTrampolinePoolQuick(margin); + constpool_.DisableNextCheckIn(); + // TODO(kasperl): Once we can compute the next trampoline check + // reliably, we can also disable the trampoline checks here. + } + pools_blocked_nesting_ = current + 1; + DEBUG_PRINTF("\tStartBlockPools @ %d (nesting=%d)\n", pc_offset(), + pools_blocked_nesting_); +} + +void Assembler::EndBlockPools() { + pools_blocked_nesting_--; + DEBUG_PRINTF("\tEndBlockPools @ %d (nesting=%d)\n", pc_offset(), + pools_blocked_nesting_); + DCHECK_GE(pools_blocked_nesting_, 0); + if (pools_blocked_nesting_ > 0) return; // Still blocked. + DCHECK(constpool_.IsInRangeIfEmittedAt(Jump::kRequired, pc_offset())); + constpool_.EnableNextCheckIn(); + CheckConstantPoolQuick(0); + CheckTrampolinePoolQuick(0); } void Assembler::AbortedCodeGeneration() { constpool_.Clear(); } @@ -288,6 +271,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) { void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc, SafepointTableBuilderBase* safepoint_table_builder, int handler_table_offset) { + // In most cases, the constant pool will already have been emitted when + // we get here - sometimes through a call to {FinishCode}. For now, it + // is safe to call this again, but it might be worth changing its name + // to make that clearer. + FinishCode(); + DCHECK(constpool_.IsEmpty()); + // As a crutch to avoid having to add manual Align calls wherever we use a // raw workflow to create InstructionStream objects (mostly in tests), add // another Align call here. It does no harm - the end of the InstructionStream @@ -297,11 +287,8 @@ void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc, // comments). DataAlign(InstructionStream::kMetadataAlignment); - ForceConstantPoolEmissionWithoutJump(); - int code_comments_size = WriteCodeComments(); - - DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. + DCHECK_GE(buffer_space(), 0); // No buffer overflow. AllocateAndInstallRequestedHeapNumbers(isolate); @@ -358,8 +345,8 @@ void Assembler::CodeTargetAlign() { // The link chain is terminated by a value in the instruction of 0, // which is an otherwise illegal value (branch 0 is inf loop). When this case // is detected, return an position of -1, an otherwise illegal position. -const int kEndOfChain = -1; -const int kEndOfJumpChain = 0; +static constexpr int kEndOfChain = -1; +static constexpr int kEndOfJumpChain = 0; int Assembler::target_at(int pos, bool is_internal) { if (is_internal) { @@ -528,8 +515,8 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) { return !RelocInfo::IsNoInfo(rmode); } -void Assembler::DisassembleInstruction(uint8_t* pc) { - if (!v8_flags.riscv_debug) return; +void Assembler::DisassembleInstructionHelper(uint8_t* pc) { + CHECK(v8_flags.riscv_debug); disasm::NameConverter converter; disasm::Disassembler disasm(converter); base::EmbeddedVector disasm_buffer; @@ -646,19 +633,16 @@ void Assembler::print(const Label* L) { void Assembler::bind_to(Label* L, int pos) { DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. DEBUG_PRINTF("\tbinding %d to label %p\n", pos, L); - int trampoline_pos = kInvalidSlotPos; - bool is_internal = false; - if (L->is_linked() && !trampoline_emitted_) { + if (L->is_linked() && !is_trampoline_emitted()) { unbound_labels_count_--; - if (!is_internal_reference(L)) { - next_buffer_check_ += kTrampolineSlotsSize; - } + trampoline_check_ += kTrampolineSlotsSize; } + int trampoline_pos = kInvalidSlotPos; while (L->is_linked()) { int fixup_pos = L->pos(); int dist = pos - fixup_pos; - is_internal = is_internal_reference(L); + bool is_internal = is_internal_reference(L); next(L, is_internal); // Call next before overwriting link with target // at fixup_pos. Instr instr = instr_at(fixup_pos); @@ -669,7 +653,7 @@ void Assembler::bind_to(Label* L, int pos) { if (IsBranch(instr)) { if (dist > kMaxBranchOffset) { if (trampoline_pos == kInvalidSlotPos) { - trampoline_pos = get_trampoline_entry(fixup_pos); + trampoline_pos = GetTrampolineEntry(fixup_pos); } DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos); CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); @@ -680,7 +664,7 @@ void Assembler::bind_to(Label* L, int pos) { } else if (IsJal(instr)) { if (dist > kMaxJumpOffset) { if (trampoline_pos == kInvalidSlotPos) { - trampoline_pos = get_trampoline_entry(fixup_pos); + trampoline_pos = GetTrampolineEntry(fixup_pos); } CHECK((trampoline_pos - fixup_pos) <= kMaxJumpOffset); DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos); @@ -699,6 +683,7 @@ void Assembler::bind_to(Label* L, int pos) { void Assembler::bind(Label* L) { DCHECK(!L->is_bound()); // Label can only be bound once. bind_to(L, pc_offset()); + VU.clear(); } void Assembler::next(Label* L, bool is_internal) { @@ -767,44 +752,14 @@ int Assembler::PatchBranchLongOffset(Address pc, Instr instr_auipc, } // Returns the next free trampoline entry. -int32_t Assembler::get_trampoline_entry(int32_t pos) { - DEBUG_PRINTF("\ttrampoline start: %d,pos: %d\n", trampoline_.start(), pos); +int32_t Assembler::GetTrampolineEntry(int32_t pos) { + DEBUG_PRINTF("\ttrampoline start: %d, pos: %d\n", trampoline_.start(), pos); CHECK(trampoline_.start() > pos); int32_t entry = trampoline_.take_slot(); CHECK_NE(entry, kInvalidSlotPos); return entry; } -uintptr_t Assembler::jump_address(Label* L) { - intptr_t target_pos; - DEBUG_PRINTF("\tjump_address: %p to %p (%d)\n", L, - reinterpret_cast(buffer_start_ + pc_offset()), - pc_offset()); - if (L->is_bound()) { - target_pos = L->pos(); - } else { - if (L->is_linked()) { - target_pos = L->pos(); // L's link. - L->link_to(pc_offset()); - } else { - L->link_to(pc_offset()); - if (!trampoline_emitted_) { - unbound_labels_count_++; - next_buffer_check_ -= kTrampolineSlotsSize; - } - DEBUG_PRINTF("\tstarted link\n"); - return kEndOfJumpChain; - } - } - uintptr_t imm = reinterpret_cast(buffer_start_) + target_pos; - if (v8_flags.riscv_c_extension) { - DCHECK_EQ(imm & 1, 0); - } else { - DCHECK_EQ(imm & 3, 0); - } - return imm; -} - int32_t Assembler::branch_long_offset(Label* L) { intptr_t target_pos; @@ -813,15 +768,17 @@ int32_t Assembler::branch_long_offset(Label* L) { pc_offset()); if (L->is_bound()) { target_pos = L->pos(); + DEBUG_PRINTF("\tbound: %" PRIdPTR "\n", target_pos); } else { if (L->is_linked()) { target_pos = L->pos(); // L's link. L->link_to(pc_offset()); + DEBUG_PRINTF("\tadded to link: %" PRIdPTR "\n", target_pos); } else { L->link_to(pc_offset()); - if (!trampoline_emitted_) { + if (!is_trampoline_emitted()) { unbound_labels_count_++; - next_buffer_check_ -= kTrampolineSlotsSize; + trampoline_check_ -= kTrampolineSlotsSize; } DEBUG_PRINTF("\tstarted link\n"); return kEndOfJumpChain; @@ -834,40 +791,13 @@ int32_t Assembler::branch_long_offset(Label* L) { DCHECK_EQ(offset & 3, 0); } DCHECK(is_int32(offset)); - VU.clear(); return static_cast(offset); } int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { - int32_t target_pos; - - DEBUG_PRINTF("\tbranch_offset_helper: %p to %p (%d)\n", L, - reinterpret_cast(buffer_start_ + pc_offset()), - pc_offset()); - if (L->is_bound()) { - target_pos = L->pos(); - DEBUG_PRINTF("\tbound: %d", target_pos); - } else { - if (L->is_linked()) { - target_pos = L->pos(); - L->link_to(pc_offset()); - DEBUG_PRINTF("\tadded to link: %d\n", target_pos); - } else { - L->link_to(pc_offset()); - if (!trampoline_emitted_) { - unbound_labels_count_++; - next_buffer_check_ -= kTrampolineSlotsSize; - } - DEBUG_PRINTF("\tstarted link\n"); - return kEndOfJumpChain; - } - } - - int32_t offset = target_pos - pc_offset(); + int32_t offset = branch_long_offset(L); DCHECK(is_intn(offset, bits)); - DCHECK_EQ(offset & 1, 0); DEBUG_PRINTF("\toffset = %d\n", offset); - VU.clear(); return offset; } @@ -895,7 +825,7 @@ void Assembler::EBREAK() { // Assembler Pseudo Instructions (Tables 25.2 and 25.3, RISC-V Unprivileged ISA) -void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); } +void Assembler::nop() { addi(zero_reg, zero_reg, 0); } inline int64_t SignExtend(uint64_t V, int N) { return static_cast(V << (64 - N)) >> (64 - N); @@ -976,7 +906,7 @@ void Assembler::GeneralLi(Register rd, int64_t imm) { } } else { sim_low = low_12; - ori(rd, zero_reg, low_12); + addi(rd, zero_reg, low_12); } } if (sim_low & 0x100000000) { @@ -1008,7 +938,7 @@ void Assembler::GeneralLi(Register rd, int64_t imm) { addi(temp_reg, temp_reg, low_12); } } else { - ori(temp_reg, zero_reg, low_12); + addi(temp_reg, zero_reg, low_12); } // Put it at the bgining of register slli(temp_reg, temp_reg, 32); @@ -1031,7 +961,7 @@ void Assembler::GeneralLi(Register rd, int64_t imm) { addi(rd, rd, low_12); } } else { - ori(rd, zero_reg, low_12); + addi(rd, zero_reg, low_12); } // upper part already in rd. Each part to be added to rd, has maximum of 11 // bits, and always starts with a 1. rd is shifted by the size of the part @@ -1104,8 +1034,8 @@ void Assembler::li_ptr(Register rd, int64_t imm) { } void Assembler::li_constant(Register rd, int64_t imm) { - DEBUG_PRINTF("\tli_constant(%d, %" PRIx64 " <%" PRId64 ">)\n", ToNumber(rd), - imm, imm); + DEBUG_PRINTF("\tli_constant(%d, %" PRIx64 " <%" PRId64 ">)\n", rd.code(), imm, + imm); lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >> 48); // Bits 63:48 addiw(rd, rd, @@ -1121,7 +1051,7 @@ void Assembler::li_constant(Register rd, int64_t imm) { void Assembler::li_constant32(Register rd, int32_t imm) { ASM_CODE_COMMENT(this); - DEBUG_PRINTF("\tli_constant(%d, %x <%d>)\n", ToNumber(rd), imm, imm); + DEBUG_PRINTF("\tli_constant(%d, %x <%d>)\n", rd.code(), imm, imm); int32_t high_20 = ((imm + 0x800) >> 12); // bits31:12 int32_t low_12 = imm & 0xfff; // bits11:0 lui(rd, high_20); @@ -1172,7 +1102,7 @@ void Assembler::li_ptr(Register rd, int32_t imm) { void Assembler::li_constant(Register rd, int32_t imm) { ASM_CODE_COMMENT(this); - DEBUG_PRINTF("\tli_constant(%d, %x <%d>)\n", ToNumber(rd), imm, imm); + DEBUG_PRINTF("\tli_constant(%d, %x <%d>)\n", rd.code(), imm, imm); int32_t high_20 = ((imm + 0x800) >> 12); // bits31:12 int32_t low_12 = imm & 0xfff; // bits11:0 lui(rd, high_20); @@ -1207,10 +1137,6 @@ void Assembler::stop(uint32_t code) { #endif } -// Original MIPS Instructions - -// ------------Memory-instructions------------- - bool Assembler::NeedAdjustBaseAndOffset(const MemOperand& src, OffsetAccessType access_type, int second_access_add_to_offset) { @@ -1364,20 +1290,17 @@ void Assembler::GrowBuffer() { void Assembler::db(uint8_t data) { DEBUG_PRINTF("%p(%d): constant 0x%x\n", pc_, pc_offset(), data); - CheckBuffer(); - EmitHelper(data); + EmitHelper(data, false); } void Assembler::dd(uint32_t data) { DEBUG_PRINTF("%p(%d): constant 0x%x\n", pc_, pc_offset(), data); - CheckBuffer(); - EmitHelper(data); + EmitHelper(data, false); } void Assembler::dq(uint64_t data) { DEBUG_PRINTF("%p(%d): constant 0x%" PRIx64 "\n", pc_, pc_offset(), data); - CheckBuffer(); - EmitHelper(data); + EmitHelper(data, false); } #if defined(V8_TARGET_ARCH_RISCV64) @@ -1385,17 +1308,17 @@ void Assembler::dq(Label* label) { #elif defined(V8_TARGET_ARCH_RISCV32) void Assembler::dd(Label* label) { #endif - uintptr_t data; - CheckBuffer(); - if (label->is_bound()) { - internal_reference_positions_.insert(pc_offset()); - data = reinterpret_cast(buffer_start_ + label->pos()); - } else { - data = jump_address(label); + int32_t offset = branch_long_offset(label); + uintptr_t data = (offset != kEndOfJumpChain) + ? reinterpret_cast(pc_ + offset) + : kEndOfJumpChain; + if (label->is_linked()) { + // We only need to query the kind of use for unbound labels, so + // we only insert those in the set. internal_reference_positions_.insert(label->pos()); } RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); - EmitHelper(data); + EmitHelper(data, false); } void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { @@ -1407,14 +1330,16 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } void Assembler::CheckTrampolinePool() { - if (trampoline_emitted_) return; + // Once we've emitted the trampoline pool, we bump the next check position, + // so we shouldn't get here again. + CHECK(!is_trampoline_emitted()); + // Some small sequences of instructions must not be broken up by the // insertion of a trampoline pool; such sequences are protected by increasing - // trampoline_pool_blocked_nesting_. This is also used to block recursive - // calls to CheckTrampolinePool. - DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n", - trampoline_pool_blocked_nesting_); - if (is_trampoline_pool_blocked()) { + // pools_blocked_nesting_. This is also used to block recursive calls to + // CheckTrampolinePool. + DEBUG_PRINTF("\tpools_blocked_nesting: %d\n", pools_blocked_nesting_); + if (pools_blocked()) { // Emission is currently blocked; we will check again when we leave the // blocking scope. We shouldn't move the next check position here, because // it would interfere with the adjustments we make when we produce new @@ -1422,20 +1347,22 @@ void Assembler::CheckTrampolinePool() { return; } - DCHECK_GE(unbound_labels_count_, 0); - if (unbound_labels_count_ > 0) { + DCHECK_GE(UnboundLabelsCount(), 0); + if (UnboundLabelsCount() > 0) { // First we emit jump, then we emit trampoline pool. int size = - kTrampolinePoolOverhead + unbound_labels_count_ * kTrampolineSlotsSize; + kTrampolinePoolOverhead + UnboundLabelsCount() * kTrampolineSlotsSize; DEBUG_PRINTF("inserting trampoline pool at %p (%d) with size %d\n", reinterpret_cast(buffer_start_ + pc_offset()), pc_offset(), size); int pc_offset_for_safepoint_before = pc_offset_for_safepoint(); USE(pc_offset_for_safepoint_before); // Only used in DCHECK below. - // Mark the trampoline pool as emitted eagerly to avoid recursive - // emissions occurring from the blocking scope. - trampoline_emitted_ = true; + // As we are only going to emit the trampoline pool once, we do not have + // to check ever again. To avoid recursive emissions occurring when the + // pools are blocked below, we *eagerly* set the next check position to + // something we will never reach. + trampoline_check_ = kMaxInt; // By construction, we know that any branch or jump up until this point // can reach the last entry in the trampoline pool. Therefore, we can @@ -1444,36 +1371,30 @@ void Assembler::CheckTrampolinePool() { static_assert(kMaxBranchOffset <= kMaxJumpOffset - kTrampolineSlotsSize); int preamble_start = pc_offset(); USE(preamble_start); // Only used in DCHECK. - BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip, size); + BlockPoolsScope block_pools(this, ConstantPoolEmission::kSkip, size); j(size); int pool_start = pc_offset(); DCHECK_EQ(pool_start - preamble_start, kTrampolinePoolOverhead); - for (int i = 0; i < unbound_labels_count_; i++) { + for (int i = 0; i < UnboundLabelsCount(); i++) { // Emit a dummy far branch. It will be patched later when one of the // unbound labels are bound. auipc(t6, 0); // Read pc into t6. jr(t6, 0); // Jump to t6 - the auipc instruction. } - trampoline_ = Trampoline(pool_start, unbound_labels_count_); + trampoline_ = Trampoline(pool_start, UnboundLabelsCount()); int pool_size = pc_offset() - pool_start; USE(pool_size); // Only used in DCHECK. - DCHECK_EQ(pool_size, unbound_labels_count_ * kTrampolineSlotsSize); + DCHECK_EQ(pool_size, UnboundLabelsCount() * kTrampolineSlotsSize); // Make sure we didn't mess with the recorded pc for the next safepoint // as part of emitting the branch trampolines. DCHECK_EQ(pc_offset_for_safepoint(), pc_offset_for_safepoint_before); - - // As we are only going to emit the trampoline pool once, we do not have - // to check ever again. We set the next check position to something we - // will never reach. - next_buffer_check_ = kMaxInt; } else { // Number of branches to unbound label at this point is zero, so we can - // move next buffer check to maximum. - next_buffer_check_ = - pc_offset() + kMaxBranchOffset - BlockTrampolinePoolScope::kGap; + // move next trampoline check to maximum. + trampoline_check_ = pc_offset() + kMaxBranchOffset - BlockPoolsScope::kGap; } } @@ -1737,33 +1658,22 @@ void Assembler::EmitPoolGuard() { // ----------------------------------------------------------------------------- // Assembler. template -void Assembler::EmitHelper(T x) { - *reinterpret_cast(pc_) = x; - pc_ += sizeof(x); -} - -void Assembler::emit(Instr x) { - DEBUG_PRINTF("%p(%d): ", pc_, pc_offset()); +void Assembler::EmitHelper(T x, bool disassemble) { + uint8_t* pc = pc_; + *reinterpret_cast(pc) = x; + if (disassemble) { + DEBUG_PRINTF("%p(%d): ", pc, static_cast(pc - buffer_start())); + DisassembleInstruction(pc); + } + pc_ = pc + sizeof(x); CheckBuffer(); - EmitHelper(x); - DisassembleInstruction(pc_ - sizeof(x)); - CheckTrampolinePoolQuick(); + CheckConstantPoolQuick(0); + CheckTrampolinePoolQuick(0); } -void Assembler::emit(ShortInstr x) { - DEBUG_PRINTF("%p(%d): ", pc_, pc_offset()); - CheckBuffer(); - EmitHelper(x); - DisassembleInstruction(pc_ - sizeof(x)); - CheckTrampolinePoolQuick(); -} +void Assembler::emit(Instr x) { EmitHelper(x, true); } -void Assembler::emit(uint64_t data) { - DEBUG_PRINTF("%p(%d): ", pc_, pc_offset()); - CheckBuffer(); - EmitHelper(data); - CheckTrampolinePoolQuick(); -} +void Assembler::emit(ShortInstr x) { EmitHelper(x, true); } void Assembler::instr_at_put(int pos, Instr instr, WritableJitAllocation* jit_allocation) { diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.h b/deps/v8/src/codegen/riscv/assembler-riscv.h index e9dbdbb0695a27..b49df1a1e84529 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv.h @@ -178,7 +178,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, public AssemblerRISCVZifencei, public AssemblerRISCVZicsr, public AssemblerRISCVZicond, - public AssemblerRISCVZimop, + public AssemblerRISCVZicfiss, public AssemblerRISCVZfh, public AssemblerRISCVV { public: @@ -262,9 +262,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, // Returns the branch offset to the given label from the current code // position. Links the label to the current position if it is still unbound. - // Manages the jump elimination optimization if the second parameter is true. int32_t branch_offset_helper(Label* L, OffsetSize bits) override; - uintptr_t jump_address(Label* L); int32_t branch_long_offset(Label* L); // During code generation builtin targets in PC-relative call/jump @@ -305,10 +303,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, static bool IsConstantPoolAt(Instruction* instr); static int ConstantPoolSizeAt(Instruction* instr); - // See Assembler::CheckConstPool for more info. void EmitPoolGuard(); - void FinishCode() { ForceConstantPoolEmissionWithoutJump(); } + bool pools_blocked() const { return pools_blocked_nesting_ > 0; } + void StartBlockPools(ConstantPoolEmission cpe, int margin); + void EndBlockPools(); + + void FinishCode() { constpool_.Check(Emission::kForced, Jump::kOmitted); } #if defined(V8_TARGET_ARCH_RISCV64) static void set_target_value_at( @@ -473,56 +474,39 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, return SizeOfCodeGeneratedSince(label) / kInstrSize; } - // Class for scoping postponing the trampoline pool generation. - class V8_NODISCARD BlockTrampolinePoolScope { + // Blocks the trampoline pool and constant pools emissions. Emits pools if + // necessary to ensure that {margin} more bytes can be emitted without + // triggering pool emission. + class V8_NODISCARD BlockPoolsScope { public: // We leave space for a number of trampoline pool slots, so we do not // have to pass in an explicit margin for all scopes. static constexpr int kGap = kTrampolineSlotsSize * 16; - explicit BlockTrampolinePoolScope(Assembler* assem, int margin = 0) + explicit BlockPoolsScope(Assembler* assem, int margin = 0) + : BlockPoolsScope(assem, ConstantPoolEmission::kCheck, margin) {} + + BlockPoolsScope(Assembler* assem, ConstantPoolEmission cpe, int margin = 0) : assem_(assem), margin_(margin) { - if (margin > 0) { - assem->CheckTrampolinePoolQuick(margin); - } - assem->StartBlockTrampolinePool(); + assem->StartBlockPools(cpe, margin); start_offset_ = assem->pc_offset(); } - ~BlockTrampolinePoolScope() { + ~BlockPoolsScope() { int generated = assem_->pc_offset() - start_offset_; USE(generated); // Only used in DCHECK. int allowed = margin_; if (allowed == 0) allowed = kGap - kTrampolinePoolOverhead; DCHECK_GE(generated, 0); DCHECK_LE(generated, allowed); - assem_->EndBlockTrampolinePool(); + assem_->EndBlockPools(); } private: Assembler* const assem_; const int margin_; int start_offset_; - DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); - }; - - class V8_NODISCARD BlockPoolsScope { - public: - // Block Trampoline Pool and Constant Pool. Emits pools if necessary to - // ensure that {margin} more bytes can be emitted without triggering pool - // emission. - explicit BlockPoolsScope(Assembler* assem, int margin = 0) - : block_const_pool_(assem, margin), - block_trampoline_pool_(assem, margin) {} - BlockPoolsScope(Assembler* assem, PoolEmissionCheck check, int margin = 0) - : block_const_pool_(assem, check), - block_trampoline_pool_(assem, margin) {} - ~BlockPoolsScope() {} - - private: - ConstantPool::BlockScope block_const_pool_; - BlockTrampolinePoolScope block_trampoline_pool_; DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope); }; @@ -581,52 +565,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, void instr_at_put(int pos, ShortInstr instr, WritableJitAllocation* jit_allocation = nullptr); - Address toAddress(int pos) { - return reinterpret_cast
(buffer_start_ + pos); - } - - void CheckTrampolinePool(); - // Get the code target object for a pc-relative call or jump. - V8_INLINE Handle relative_code_target_object_handle_at( - Address pc_) const; - - inline int UnboundLabelsCount() { return unbound_labels_count_; } - - void RecordConstPool(int size, const BlockPoolsScope& scope); - - void ForceConstantPoolEmissionWithoutJump() { - constpool_.Check(Emission::kForced, Jump::kOmitted); - } - - // Check if the const pool needs to be emitted while pretending that {margin} - // more bytes of instructions have already been emitted. This variant is used - // in positions in code that we might fall through to. - void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) { - constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin); - } - - // Check if the const pool needs to be emitted while pretending that {margin} - // more bytes of instructions have already been emitted. This variant is used - // at unreachable positions in the code, such as right after an unconditional - // transfer of control (jump, return). - void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) { - constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin); - } - - RelocInfoStatus RecordEntry64(uint64_t data, RelocInfo::Mode rmode) { - return constpool_.RecordEntry64(data, rmode); - } - - void CheckTrampolinePoolQuick(int margin = 0) { - DEBUG_PRINTF("\tCheckTrampolinePoolQuick pc_offset:%d %d\n", pc_offset(), - next_buffer_check_ - margin); - if (pc_offset() >= next_buffer_check_ - margin) { - CheckTrampolinePool(); - } - } + inline Handle relative_code_target_object_handle_at(Address pc) const; - int next_buffer_check() const { return next_buffer_check_; } + inline int UnboundLabelsCount() const { return unbound_labels_count_; } friend class VectorUnit; class VectorUnit { @@ -643,6 +585,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, explicit VectorUnit(Assembler* assm) : assm_(assm) {} + // Sets the floating-point rounding mode. + // Updating the rounding mode can be expensive, and therefore isn't done + // for every basic block. Instead, we assume that the rounding mode is + // RNE. Any instruction sequence that changes the rounding mode must + // change it back to RNE before it finishes. void set(FPURoundingMode mode) { if (mode_ != mode) { assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift); @@ -757,6 +704,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, } void clear() { + // If the rounding mode isn't RNE, then we forgot to change it back. + DCHECK_EQ(RNE, mode_); avl_ = -1; sew_ = kVsInvalid; lmul_ = kVlInvalid; @@ -772,7 +721,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, VectorUnit VU; - void ClearVectorunit() override { VU.clear(); } + void ClearVectorUnit() override { VU.clear(); } protected: // Readable constants for base and offset adjustment helper, these indicate if @@ -812,39 +761,33 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, // Record the current pc for the next safepoint. void RecordPcForSafepoint() override { - pc_offset_for_safepoint_ = pc_offset(); + set_pc_offset_for_safepoint(pc_offset()); } - void StartBlockTrampolinePool() { - DEBUG_PRINTF("\tStartBlockTrampolinePool %d\n", pc_offset()); - trampoline_pool_blocked_nesting_++; + void set_pc_offset_for_safepoint(int pc_offset) { + pc_offset_for_safepoint_ = pc_offset; } - void EndBlockTrampolinePool() { - DEBUG_PRINTF("\tEndBlockTrampolinePool\n"); - trampoline_pool_blocked_nesting_--; - DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n", - trampoline_pool_blocked_nesting_); - if (trampoline_pool_blocked_nesting_ == 0) { - CheckTrampolinePoolQuick(); - } + // Check if the const pool needs to be emitted while pretending that {margin} + // more bytes of instructions have already been emitted. This variant is used + // at unreachable positions in the code, such as right after an unconditional + // transfer of control (jump, return). + void EmitConstPoolWithoutJumpIfNeeded() { + constpool_.Check(Emission::kIfNeeded, Jump::kOmitted); } - bool is_trampoline_pool_blocked() const { - return trampoline_pool_blocked_nesting_ > 0; + RelocInfoStatus RecordEntry64(uint64_t data, RelocInfo::Mode rmode) { + return constpool_.RecordEntry64(data, rmode); } - bool is_trampoline_emitted() const { return trampoline_emitted_; } + void RecordConstPool(int size, const BlockPoolsScope& scope); + + bool is_trampoline_emitted() const { return trampoline_check_ == kMaxInt; } private: // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512 * MB; - // Buffer size and constant pool distance are checked together at regular - // intervals of kBufferCheckInterval emitted bytes. - static constexpr int kBufferCheckInterval = 1 * KB / 2; - - // InstructionStream generation. // The relocation writer's position is at least kGap bytes below the end of // the generated instructions. This is so that multi-instruction sequences do // not have to check for overflow. The same is true for writes of large @@ -852,18 +795,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, static constexpr int kGap = 64; static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap); - // Repeated checking whether the trampoline pool should be emitted is rather - // expensive. By default we only check again once a number of instructions - // has been generated. - static constexpr int kCheckConstIntervalInst = 32; - static constexpr int kCheckConstInterval = - kCheckConstIntervalInst * kInstrSize; - - int next_buffer_check_; // pc offset of next buffer check. - - // Emission of the trampoline pool may be blocked in some code sequences. The - // nesting is zero when the pool isn't blocked. - int trampoline_pool_blocked_nesting_ = 0; + // Emission of the pools may be blocked in some code sequences. The + // nesting is zero when the pools aren't blocked. + int pools_blocked_nesting_ = 0; // Relocation information generation. // Each relocation is encoded as a variable size value. @@ -880,11 +814,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, void GrowBuffer(); void emit(Instr x) override; void emit(ShortInstr x) override; - void emit(uint64_t x) override; template - inline void EmitHelper(T x); + inline void EmitHelper(T x, bool disassemble); - static void DisassembleInstruction(uint8_t* pc); + inline void DisassembleInstruction(uint8_t* pc); + static void DisassembleInstructionHelper(uint8_t* pc); // Labels. void print(const Label* L); @@ -935,41 +869,40 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, int free_slot_count_; }; - int32_t get_trampoline_entry(int32_t pos); - int unbound_labels_count_ = 0; - // After trampoline is emitted, long branches are used in generated code for - // the forward branches whose target offsets could be beyond reach of branch - // instruction. We use this information to trigger different mode of - // branch instruction generation, where we use jump instructions rather - // than regular branch instructions. - bool trampoline_emitted_ = false; static constexpr int kInvalidSlotPos = -1; + Trampoline trampoline_; + + int unbound_labels_count_ = 0; + int trampoline_check_; // The pc offset of next trampoline pool check. - // Internal reference positions, required for unbounded internal reference - // labels. + void CheckTrampolinePool(); + inline void CheckTrampolinePoolQuick(int margin); + inline void CheckConstantPoolQuick(int margin); + int32_t GetTrampolineEntry(int32_t pos); + + // We keep track of the position of all internal reference uses of labels, + // so we can distinguish the use site from other kinds of uses. The other + // uses can be recognized by looking at the generated code at the position, + // but internal references are just data (like jump table entries), so we + // need something extra to tell them apart from other kinds of uses. std::set internal_reference_positions_; - bool is_internal_reference(Label* L) { - return internal_reference_positions_.find(L->pos()) != - internal_reference_positions_.end(); + bool is_internal_reference(Label* L) const { + DCHECK(L->is_linked()); + return internal_reference_positions_.contains(L->pos()); } - Trampoline trampoline_; - RegList scratch_register_list_; DoubleRegList scratch_double_register_list_; - - private: ConstantPool constpool_; void PatchInHeapNumberRequest(Address pc, Handle object) override; int WriteCodeComments(); - friend class RegExpMacroAssemblerRISCV; - friend class RelocInfo; - friend class BlockTrampolinePoolScope; friend class EnsureSpace; friend class ConstantPool; + friend class RelocInfo; + friend class RegExpMacroAssemblerRISCV; }; class EnsureSpace { diff --git a/deps/v8/src/codegen/riscv/base-assembler-riscv.cc b/deps/v8/src/codegen/riscv/base-assembler-riscv.cc index 6d9f7dd232ff5c..2ff4eb25bafca3 100644 --- a/deps/v8/src/codegen/riscv/base-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/base-assembler-riscv.cc @@ -429,7 +429,7 @@ void AssemblerRiscvBase::GenInstrCSR_ir(uint8_t funct3, Register rd, void AssemblerRiscvBase::GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr, uint8_t imm5) { - GenInstrI(funct3, SYSTEM, rd, ToRegister(imm5), csr); + GenInstrI(funct3, SYSTEM, rd, Register::from_code(imm5), csr); } void AssemblerRiscvBase::GenInstrShiftW_ri(bool arithshift, uint8_t funct3, @@ -447,7 +447,7 @@ void AssemblerRiscvBase::GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, void AssemblerRiscvBase::GenInstrPriv(uint8_t funct7, Register rs1, Register rs2) { - GenInstrR(funct7, 0b000, SYSTEM, ToRegister(0), rs1, rs2); + GenInstrR(funct7, 0b000, SYSTEM, zero_reg, rs1, rs2); } void AssemblerRiscvBase::GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, diff --git a/deps/v8/src/codegen/riscv/base-assembler-riscv.h b/deps/v8/src/codegen/riscv/base-assembler-riscv.h index 929f3298892326..2e6790a0b2001f 100644 --- a/deps/v8/src/codegen/riscv/base-assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/base-assembler-riscv.h @@ -72,7 +72,7 @@ class DebugFile : public std::ofstream { }; #define DEBUG_PRINTF(...) /* force 80 cols */ \ - if (v8_flags.riscv_debug) [[unlikely]] { \ + if (V8_UNLIKELY(v8_flags.riscv_debug)) { \ if (v8_flags.riscv_debug_file_path) { \ base::EmbeddedVector chars; \ SNPrintF(chars, __VA_ARGS__); \ @@ -103,9 +103,8 @@ class AssemblerRiscvBase { virtual void emit(Instr x) = 0; virtual void emit(ShortInstr x) = 0; - virtual void emit(uint64_t x) = 0; - virtual void ClearVectorunit() = 0; + virtual void ClearVectorUnit() = 0; // Record the last known safepoint location to the current pc. virtual void RecordPcForSafepoint() = 0; diff --git a/deps/v8/src/codegen/riscv/base-riscv-i.cc b/deps/v8/src/codegen/riscv/base-riscv-i.cc index add950e84cf52a..18a3e3713fe282 100644 --- a/deps/v8/src/codegen/riscv/base-riscv-i.cc +++ b/deps/v8/src/codegen/riscv/base-riscv-i.cc @@ -19,47 +19,45 @@ void AssemblerRISCVI::auipc(Register rd, int32_t imm20) { void AssemblerRISCVI::jal(Register rd, int32_t imm21) { GenInstrJ(JAL, rd, imm21); // If we're linking, this could potentially be the location of a safepoint. - if (rd != zero_reg) RecordPcForSafepoint(); - ClearVectorunit(); + if (rd != zero_reg) { + RecordPcForSafepoint(); + ClearVectorUnit(); + } } void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) { GenInstrI(0b000, JALR, rd, rs1, imm12); // If we're linking, this could potentially be the location of a safepoint. - if (rd != zero_reg) RecordPcForSafepoint(); - ClearVectorunit(); + if (rd != zero_reg) { + RecordPcForSafepoint(); + ClearVectorUnit(); + } } // Branches void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) { GenInstrBranchCC_rri(0b000, rs1, rs2, imm13); - ClearVectorunit(); } void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) { GenInstrBranchCC_rri(0b001, rs1, rs2, imm13); - ClearVectorunit(); } void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) { GenInstrBranchCC_rri(0b100, rs1, rs2, imm13); - ClearVectorunit(); } void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) { GenInstrBranchCC_rri(0b101, rs1, rs2, imm13); - ClearVectorunit(); } void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) { GenInstrBranchCC_rri(0b110, rs1, rs2, imm13); - ClearVectorunit(); } void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) { GenInstrBranchCC_rri(0b111, rs1, rs2, imm13); - ClearVectorunit(); } // Loads @@ -183,29 +181,29 @@ void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) { void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) { DCHECK(is_uint4(pred) && is_uint4(succ)); uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8); - GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12); + GenInstrI(0b000, MISC_MEM, zero_reg, zero_reg, imm12); } void AssemblerRISCVI::fence_tso() { uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8); - GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12); + GenInstrI(0b000, MISC_MEM, zero_reg, zero_reg, imm12); } // Environment call / break void AssemblerRISCVI::ecall() { - GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 0); + GenInstrI(0b000, SYSTEM, zero_reg, zero_reg, 0); } void AssemblerRISCVI::ebreak() { - GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 1); + GenInstrI(0b000, SYSTEM, zero_reg, zero_reg, 1); } // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented // instruction (i.e., it should always trap, if your implementation has invalid // instruction traps). void AssemblerRISCVI::unimp() { - GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000); + GenInstrI(0b001, SYSTEM, zero_reg, zero_reg, 0b110000000000); } bool AssemblerRISCVI::IsBranch(Instr instr) { diff --git a/deps/v8/src/codegen/riscv/constant-pool-riscv.cc b/deps/v8/src/codegen/riscv/constant-pool-riscv.cc index d9575790599259..7e04bee7394179 100644 --- a/deps/v8/src/codegen/riscv/constant-pool-riscv.cc +++ b/deps/v8/src/codegen/riscv/constant-pool-riscv.cc @@ -9,9 +9,6 @@ namespace v8 { namespace internal { -ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {} -ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); } - RelocInfoStatus ConstantPool::RecordEntry64(uint64_t data, RelocInfo::Mode rmode) { ConstantPoolKey key(data, rmode); @@ -22,12 +19,10 @@ RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) { RelocInfoStatus status = GetRelocInfoStatusFor(key); if (status == RelocInfoStatus::kMustRecord) { size_t count = ++deduped_entry_count_; - if (count == 1) { - first_use_ = offset; - } else if (count > ConstantPool::kApproxMaxEntryCount) { - // Request constant pool emission after the next instruction. - SetNextCheckIn(kInstrSize); - } + if (count == 1) first_use_ = offset; + // The next check in position depends on the entry count, so we + // potentially update the position here. + MaybeUpdateNextCheckIn(); } entries_.insert(std::make_pair(key, offset)); return status; @@ -44,45 +39,42 @@ RelocInfoStatus ConstantPool::GetRelocInfoStatusFor( return RelocInfoStatus::kMustRecord; } -void ConstantPool::EmitAndClear(Jump require_jump) { - DCHECK(!IsBlocked()); - // Prevent recursive pool emission. We conservatively assume that we will - // have to add padding for alignment, so the margin is guaranteed to be - // at least as large as the actual size of the constant pool. - int margin = ComputeSize(require_jump, Alignment::kRequired); - Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip, +void ConstantPool::EmitAndClear(Jump jump) { + // Since we do not know how much space the constant pool is going to take + // up, we cannot handle getting here while the trampoline pool is blocked. + CHECK(!assm_->pools_blocked()); + + // Prevent recursive pool emission. + int margin = SizeIfEmittedAt(jump, assm_->pc_offset()); + Assembler::BlockPoolsScope block_pools(assm_, ConstantPoolEmission::kSkip, margin); // The pc offset may have changed as a result of blocking pools. We can - // now go ahead and compute the required alignment and the correct size. - Alignment require_alignment = - IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset()); - int size = ComputeSize(require_jump, require_alignment); - DCHECK_LE(size, margin); - Label size_check; - assm_->bind(&size_check); + // now go ahead and compute the required padding and the correct size. + int padding = PaddingIfEmittedAt(jump, assm_->pc_offset()); + int size = SizeOfPool(jump, padding); + Label before_pool; + assm_->bind(&before_pool); assm_->RecordConstPool(size, block_pools); - // Emit the constant pool. It is preceded by an optional branch if - // {require_jump} and a header which will: + // Emit the constant pool. It is preceded by an optional branch if {jump} + // is {Jump::kRequired} and a header which will: // 1) Encode the size of the constant pool, for use by the disassembler. // 2) Terminate the program, to try to prevent execution from accidentally // flowing into the constant pool. - // 3) align the pool entries to 64-bit. + // 3) Align the pool entries using the computed padding. - DEBUG_PRINTF("\tConstant Pool start\n") Label after_pool; - if (require_jump == Jump::kRequired) assm_->b(&after_pool); - assm_->RecordComment("[ Constant Pool"); - EmitPrologue(require_alignment); - if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size); + EmitPrologue(size, jump == Jump::kRequired ? &after_pool : nullptr); + for (int i = 0; i < padding; i++) assm_->db(0xcc); EmitEntries(); // Emit padding data to ensure the constant pool size matches the expected // constant count during disassembly. This can only happen if we ended up - // overestimating the size of the pool in {ComputeSize}. - int code_size = assm_->SizeOfCodeGeneratedSince(&size_check); + // overestimating the size of the pool in {ComputeSize} due to it being + // rounded up to kInt32Size. + int code_size = assm_->SizeOfCodeGeneratedSince(&before_pool); if (v8_flags.riscv_c_extension) { DCHECK_LE(code_size, size); while (code_size < size) { @@ -95,8 +87,7 @@ void ConstantPool::EmitAndClear(Jump require_jump) { assm_->RecordComment("]"); assm_->bind(&after_pool); - DEBUG_PRINTF("\tConstant Pool end\n") - DCHECK_EQ(size, assm_->SizeOfCodeGeneratedSince(&size_check)); + DCHECK_EQ(size, assm_->SizeOfCodeGeneratedSince(&before_pool)); Clear(); } @@ -104,153 +95,120 @@ void ConstantPool::Clear() { entries_.clear(); first_use_ = -1; deduped_entry_count_ = 0; - next_check_ = 0; -} - -void ConstantPool::StartBlock() { - if (blocked_nesting_ == 0) { - // Prevent constant pool checks from happening by setting the next check to - // the biggest possible offset. - next_check_ = kMaxInt; - } - ++blocked_nesting_; -} - -void ConstantPool::EndBlock() { - --blocked_nesting_; - if (blocked_nesting_ == 0) { - DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset())); - // Make sure a check happens quickly after getting unblocked. - next_check_ = 0; - } -} - -bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; } - -void ConstantPool::SetNextCheckIn(size_t bytes) { - next_check_ = assm_->pc_offset() + static_cast(bytes); } void ConstantPool::EmitEntries() { + int count = 0; + USE(count); // Only used in DCHECK below. for (auto iter = entries_.begin(); iter != entries_.end();) { DCHECK(IsAligned(assm_->pc_offset(), 8)); auto range = entries_.equal_range(iter->first); bool shared = iter->first.AllowsDeduplication(); for (auto it = range.first; it != range.second; ++it) { - SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first); - if (!shared) Emit(it->first); + SetLoadOffsetToConstPoolEntry(it->second, assm_->pc_offset(), it->first); + if (!shared) { + Emit(it->first); + count++; + } + } + if (shared) { + Emit(iter->first); + count++; } - if (shared) Emit(iter->first); iter = range.second; } + DCHECK_EQ(EntryCount(), count); } void ConstantPool::Emit(const ConstantPoolKey& key) { assm_->dq(key.value()); } -bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const { +bool ConstantPool::ShouldEmitNow(Jump jump, size_t margin) const { if (IsEmpty()) return false; if (EntryCount() > ConstantPool::kApproxMaxEntryCount) return true; - // We compute {dist}, i.e. the distance from the first instruction accessing - // an entry in the constant pool to any of the constant pool entries, - // respectively. This is required because we do not guarantee that entries - // are emitted in order of reference, i.e. it is possible that the entry with - // the earliest reference is emitted last. The constant pool should be emitted - // if either of the following is true: - // (A) {dist} will be out of range at the next check in. - // (B) Emission can be done behind an unconditional branch and {dist} - // exceeds {kOpportunityDistToPool}. - // (C) {dist} exceeds the desired approximate distance to the pool. - int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired); - size_t pool_end = assm_->pc_offset() + margin + worst_case_size; - size_t dist = pool_end - first_use_; - bool next_check_too_late = dist + 2 * kCheckInterval >= kMaxDistToPool; - bool opportune_emission_without_jump = - require_jump == Jump::kOmitted && (dist >= kOpportunityDistToPool); - bool approximate_distance_exceeded = dist >= kApproxDistToPool; - return next_check_too_late || opportune_emission_without_jump || - approximate_distance_exceeded; -} -int ConstantPool::ComputeSize(Jump require_jump, - Alignment require_alignment) const { - int prologue_size = PrologueSize(require_jump); - // TODO(kasperl): It would be nice to just compute the exact amount of - // padding needed, but that requires knowing the {pc_offset} where the - // constant pool will be emitted. For now, we will just compute the - // maximum padding needed and add additional padding after the pool if - // we overestimated it. - size_t max_padding = 0; - if (require_alignment == Alignment::kRequired) { - size_t instruction_size = kInstrSize; - if (v8_flags.riscv_c_extension) instruction_size = kShortInstrSize; - max_padding = kInt64Size - instruction_size; + // We compute {distance}, i.e. the distance from the first instruction + // accessing an entry in the constant pool to any of the constant pool + // entries, respectively. This is required because we do not guarantee + // that entries are emitted in order of reference, i.e. it is possible + // that the entry with the earliest reference is emitted last. + int pc_offset = assm_->pc_offset(); + int size = SizeIfEmittedAt(jump, pc_offset); + size_t pool_end = pc_offset + margin + size; + size_t distance = pool_end - first_use_; + + if (distance + kCheckInterval >= kMaxDistToPool) { + // We will be out of range at the next check. + return true; + } else if (jump == Jump::kOmitted && distance >= kOpportunityDistToPool) { + // We can emit the constant pool without a jump here and the distance + // indicates that this may be a good time. + return true; + } else { + // We ask to get the constant pool emitted if the {distance} exceeds + // the desired approximate distance to the pool. + return distance >= kApproxDistToPool; } - size_t entries_size = max_padding + EntryCount() * kInt64Size; - return prologue_size + static_cast(entries_size); } -Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump, - int pc_offset) const { - if (EntryCount() == 0) return Alignment::kOmitted; - int prologue_size = PrologueSize(require_jump); - return IsAligned(pc_offset + prologue_size, kInt64Size) - ? Alignment::kOmitted - : Alignment::kRequired; +int ConstantPool::AlignmentIfEmittedAt(Jump jump, int pc_offset) const { + // For now, the alignment does not depend on the {pc_offset}. + return IsEmpty() ? 0 : kInt64Size; } -bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) { - // Check that all entries are in range if the pool is emitted at {pc_offset}. - if (EntryCount() == 0) return true; - Alignment require_alignment = - IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset); - size_t pool_end = pc_offset + ComputeSize(Jump::kRequired, require_alignment); - return pool_end < first_use_ + kMaxDistToPool; -} - -ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin) - : pool_(&assm->constpool_) { - pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin); - pool_->StartBlock(); +int ConstantPool::PaddingIfEmittedAt(Jump jump, int pc_offset) const { + int alignment = AlignmentIfEmittedAt(jump, pc_offset); + if (alignment == 0) return 0; + int entries_offset = pc_offset + SizeOfPrologue(jump); + return RoundUp(entries_offset, alignment) - entries_offset; } -ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check) - : pool_(&assm->constpool_) { - DCHECK_EQ(check, PoolEmissionCheck::kSkip); - pool_->StartBlock(); -} - -ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); } - -void ConstantPool::MaybeCheck() { - if (assm_->pc_offset() >= next_check_) { - Check(Emission::kIfNeeded, Jump::kRequired); - } +bool ConstantPool::IsInRangeIfEmittedAt(Jump jump, int pc_offset) const { + // Check that all entries are in range if the pool is emitted at {pc_offset}. + if (IsEmpty()) return true; + size_t pool_end = pc_offset + SizeIfEmittedAt(jump, pc_offset); + return pool_end < first_use_ + kMaxDistToPool; } -void ConstantPool::EmitPrologue(Alignment require_alignment) { - // Recorded constant pool size is expressed in number of 32-bits words, +void ConstantPool::EmitPrologue(int size, Label* after) { + // Encoded constant pool size is expressed in number of 32-bits words, // and includes prologue and alignment, but not the jump around the pool // and the size of the marker itself. The word count may exceed 12 bits, // so 'auipc' is used as the marker. - const int kMarkerSize = kInstrSize; // Size of 'auipc' instruction. - int size = ComputeSize(Jump::kOmitted, require_alignment) - kMarkerSize; - int words = RoundUp(size, kInt32Size) / kInt32Size; - DCHECK(is_int20(words)); - assm_->auipc(zero_reg, words); + const int kAuipcSize = kInstrSize; + int encoded_size = size - kAuipcSize; + if (after) { + assm_->b(after); + encoded_size -= kInstrSize; // Jump isn't included in encoded size. + } + DCHECK(IsAligned(encoded_size, kInt32Size)); + int encoded_words = encoded_size / kInt32Size; + DCHECK(is_int20(encoded_words)); + assm_->auipc(zero_reg, encoded_words); assm_->EmitPoolGuard(); } -int ConstantPool::PrologueSize(Jump require_jump) const { +int ConstantPool::SizeOfPrologue(Jump jump) const { // Prologue is: - // j L ;; Optional, only if {require_jump}. + // j L ;; Optional, only if {jump} is required. // auipc x0, #words ;; Pool marker, encodes size in 32-bit words. // j 0x0 ;; Pool guard. + // ;; Optional to align the following constants. + // + // ;; Optional to round up to full 32-bit words. // L: - return (require_jump == Jump::kRequired) ? 3 * kInstrSize : 2 * kInstrSize; + return (jump == Jump::kRequired) ? 3 * kInstrSize : 2 * kInstrSize; +} + +int ConstantPool::SizeOfPool(Jump jump, int padding) const { + int prologue_size = SizeOfPrologue(jump); + int padding_after = RoundUp(padding, kInt32Size) - padding; + DCHECK(v8_flags.riscv_c_extension || padding_after == 0); + int entries_size = static_cast(EntryCount() * kInt64Size); + return prologue_size + padding + entries_size + padding_after; } void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset, - Instruction* entry_offset, + int entry_offset, const ConstantPoolKey& key) { Instr instr_auipc = assm_->instr_at(load_offset); Instr instr_load = assm_->instr_at(load_offset + 4); @@ -261,9 +219,7 @@ void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset, DCHECK(assm_->IsLoadWord(instr_load)); DCHECK_EQ(assm_->AuipcOffset(instr_auipc), 0); DCHECK_EQ(assm_->LoadOffset(instr_load), 1); - int32_t distance = static_cast( - reinterpret_cast
(entry_offset) - - reinterpret_cast
(assm_->toAddress(load_offset))); + int32_t distance = entry_offset - load_offset; CHECK(is_int32(distance + 0x800)); int32_t Hi20 = (static_cast(distance) + 0x800) >> 12; int32_t Lo12 = static_cast(distance) << 20 >> 20; @@ -271,11 +227,10 @@ void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset, assm_->instr_at_put(load_offset + 4, SetLo12Offset(Lo12, instr_load)); } -void ConstantPool::Check(Emission force_emit, Jump require_jump, - size_t margin) { +void ConstantPool::Check(Emission force_emit, Jump jump, size_t margin) { // Some short sequence of instruction must not be broken up by constant pool - // emission, such sequences are protected by a ConstPool::BlockScope. - if (IsBlocked() || assm_->is_trampoline_pool_blocked()) { + // emission, such sequences are protected by an Assembler::BlockPoolsScope. + if (assm_->pools_blocked()) { // Something is wrong if emission is forced and blocked at the same time. DCHECK_EQ(force_emit, Emission::kIfNeeded); return; @@ -285,24 +240,14 @@ void ConstantPool::Check(Emission force_emit, Jump require_jump, // * it is not empty // * emission is forced by parameter force_emit (e.g. at function end). // * emission is mandatory or opportune according to {ShouldEmitNow}. - if (!IsEmpty() && (force_emit == Emission::kForced || - ShouldEmitNow(require_jump, margin))) { - // Check that the code buffer is large enough before emitting the constant - // pool (this includes the gap to the relocation information). - int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired); - int needed_space = worst_case_size + assm_->kGap; - while (assm_->buffer_space() <= needed_space) { - assm_->GrowBuffer(); - } - - // Since we do not know how much space the constant pool is going to take - // up, we cannot handle getting here while the trampoline pool is blocked. - CHECK(!assm_->is_trampoline_pool_blocked()); - EmitAndClear(require_jump); + if (!IsEmpty() && + (force_emit == Emission::kForced || ShouldEmitNow(jump, margin))) { + EmitAndClear(jump); } - // Since a constant pool is (now) empty, move the check offset forward by - // the standard interval. - SetNextCheckIn(ConstantPool::kCheckInterval); + + // Update the last check position and maybe the next one. + check_last_ = assm_->pc_offset(); + MaybeUpdateNextCheckIn(); } } // namespace internal diff --git a/deps/v8/src/codegen/riscv/constant-pool-riscv.h b/deps/v8/src/codegen/riscv/constant-pool-riscv.h index f92109908f94e9..52d8e9d53bab94 100644 --- a/deps/v8/src/codegen/riscv/constant-pool-riscv.h +++ b/deps/v8/src/codegen/riscv/constant-pool-riscv.h @@ -65,9 +65,8 @@ inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) { // Constant pool generation enum class Jump { kOmitted, kRequired }; enum class Emission { kIfNeeded, kForced }; -enum class Alignment { kOmitted, kRequired }; enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate }; -enum class PoolEmissionCheck { kSkip }; +enum class ConstantPoolEmission { kSkip, kCheck }; // Pools are emitted in the instruction stream, preferably after unconditional // jumps or after returns from functions (in dead code locations). @@ -79,52 +78,37 @@ enum class PoolEmissionCheck { kSkip }; // if so, a relocation info entry is associated to the constant pool entry. class ConstantPool { public: - explicit ConstantPool(Assembler* assm); - ~ConstantPool(); + explicit ConstantPool(Assembler* assm) : assm_(assm) {} + + bool IsEmpty() const { return deduped_entry_count_ == 0; } + void Clear(); // Records a constant pool entry. Returns whether we need to write RelocInfo. RelocInfoStatus RecordEntry64(uint64_t data, RelocInfo::Mode rmode); - size_t EntryCount() const { return deduped_entry_count_; } - bool IsEmpty() const { return deduped_entry_count_ == 0; } - // Check if pool will be out of range at {pc_offset}. - bool IsInImmRangeIfEmittedAt(int pc_offset); - // Size in bytes of the constant pool. Depending on parameters, the size will - // include the branch over the pool and alignment padding. - int ComputeSize(Jump require_jump, Alignment require_alignment) const; - - // Emit the pool at the current pc with a branch over the pool if requested. - void EmitAndClear(Jump require); - bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const; - V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump, - size_t margin = 0); - - V8_EXPORT_PRIVATE void MaybeCheck(); - void Clear(); + bool IsInRangeIfEmittedAt(Jump jump, int pc_offset) const; - // Constant pool emission can be blocked temporarily. - bool IsBlocked() const; + void Check(Emission force_emission, Jump jump, size_t margin = 0); // Repeated checking whether the constant pool should be emitted is expensive; // only check once a number of bytes have been generated. - void SetNextCheckIn(size_t bytes); - - // Class for scoping postponing the constant pool generation. - class V8_EXPORT_PRIVATE V8_NODISCARD BlockScope { - public: - // BlockScope immediatelly emits the pool if necessary to ensure that - // during the block scope at least {margin} bytes can be emitted without - // pool emission becomming necessary. - explicit BlockScope(Assembler* pool, size_t margin = 0); - BlockScope(Assembler* pool, PoolEmissionCheck); - ~BlockScope(); - - private: - ConstantPool* pool_; - DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope); - }; + int NextCheckIn() const { return check_next_; } + + void EnableNextCheckIn() { + DCHECK_NE(kCheckNextNoSaved, check_next_saved_); + check_next_ = check_next_saved_; + check_next_saved_ = kCheckNextNoSaved; + } + + void DisableNextCheckIn() { + DCHECK_EQ(kCheckNextNoSaved, check_next_saved_); + check_next_saved_ = check_next_; + check_next_ = kMaxInt; + DCHECK_NE(kCheckNextNoSaved, check_next_saved_); + } + private: // Pool entries are accessed with pc relative load therefore this cannot be // more than 1 * MB. Since constant pool emission checks are interval based, // and we want to keep entries close to the code, we try to emit every 64KB. @@ -132,29 +116,57 @@ class ConstantPool { // Hard limit to the const pool which must not be exceeded. static const size_t kMaxDistToPool = 1 * MB; // Approximate distance where the pool should be emitted. - V8_EXPORT_PRIVATE static const size_t kApproxDistToPool = 64 * KB; + static const size_t kApproxDistToPool = 64 * KB; // Approximate distance where the pool may be emitted if no jump is required // (due to a recent unconditional jump). static const size_t kOpportunityDistToPool = 64 * KB; // PC distance between constant pool checks. - V8_EXPORT_PRIVATE static const size_t kCheckInterval = 128 * kInstrSize; + static const size_t kCheckInterval = 128 * kInstrSize; // Number of entries in the pool which trigger a check. static const size_t kApproxMaxEntryCount = 512; - private: - void StartBlock(); - void EndBlock(); + size_t EntryCount() const { return deduped_entry_count_; } + + // Emit the pool at the current pc with a branch over the pool if requested. + void EmitAndClear(Jump jump); + bool ShouldEmitNow(Jump jump, size_t margin = 0) const; void EmitEntries(); - void EmitPrologue(Alignment require_alignment); - int PrologueSize(Jump require_jump) const; + void EmitPrologue(int size, Label* after); + + // Size of the prologue in bytes. + int SizeOfPrologue(Jump jump) const; + int SizeOfPool(Jump jump, int padding) const; + + // Compute the position for the next check in. + int ComputeNextCheckIn() const { + int gap = (EntryCount() > kApproxMaxEntryCount) ? 0 : kCheckInterval; + return check_last_ + gap; + } + + // Update the next check in position, unless we've temporarily disabled + // the next check in through a call to {DisableNextCheckIn}. + void MaybeUpdateNextCheckIn() { + if (check_next_ == kMaxInt) return; + check_next_ = ComputeNextCheckIn(); + } + RelocInfoStatus RecordKey(ConstantPoolKey key, int offset); RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key); void Emit(const ConstantPoolKey& key); - void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset, + void SetLoadOffsetToConstPoolEntry(int load_offset, int entry_offset, const ConstantPoolKey& key); - Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump, - int pc_offset) const; + + // Alignment and padding in bytes if emitted at the given {pc_offset}. + int AlignmentIfEmittedAt(Jump jump, int pc_offset) const; + int PaddingIfEmittedAt(Jump jump, int pc_offset) const; + + // Size in bytes of the constant pool if emitted at the given {pc_offset}. + // Depending on parameters, the size will include the branch over the pool + // and padding for alignment. + int SizeIfEmittedAt(Jump jump, int pc_offset) const { + return SizeOfPool(jump, PaddingIfEmittedAt(jump, pc_offset)); + } Assembler* assm_; @@ -174,8 +186,11 @@ class ConstantPool { // which represents the total number of entries. size_t deduped_entry_count_ = 0; - int next_check_ = 0; - int blocked_nesting_ = 0; + // Check in positions. + int check_last_ = 0; + int check_next_ = 0; + int check_next_saved_ = kCheckNextNoSaved; + static constexpr int kCheckNextNoSaved = -1; }; } // namespace internal diff --git a/deps/v8/src/codegen/riscv/constants-riscv.h b/deps/v8/src/codegen/riscv/constants-riscv.h index 866ee96953b1f3..71f39175371d2b 100644 --- a/deps/v8/src/codegen/riscv/constants-riscv.h +++ b/deps/v8/src/codegen/riscv/constants-riscv.h @@ -27,6 +27,9 @@ constexpr int64_t kDQuietNanBit = 51; constexpr int64_t kDQuietNanMask = 0x1LL << kDQuietNanBit; constexpr int64_t kSQuietNanBit = 22; constexpr int64_t kSQuietNanMask = 0x1LL << kSQuietNanBit; +#ifdef USE_SIMULATOR +const int kInitialShadowStackSize = 1024; +#endif // USE_SIMULATOR } // namespace internal } // namespace v8 #endif // V8_CODEGEN_RISCV_CONSTANTS_RISCV_H_ diff --git a/deps/v8/src/codegen/riscv/extension-riscv-d.cc b/deps/v8/src/codegen/riscv/extension-riscv-d.cc index ab2cd12d958ebc..0e178c5de639d2 100644 --- a/deps/v8/src/codegen/riscv/extension-riscv-d.cc +++ b/deps/v8/src/codegen/riscv/extension-riscv-d.cc @@ -5,6 +5,7 @@ namespace v8 { namespace internal { + // RV32D Standard Extension void AssemblerRISCVD::fld(FPURegister rd, Register rs1, int16_t imm12) { @@ -85,7 +86,7 @@ void AssemblerRISCVD::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) { void AssemblerRISCVD::fcvt_s_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b0100000, frm, rd, rs1, Register::from_code(1)); } void AssemblerRISCVD::fcvt_d_s(FPURegister rd, FPURegister rs1, @@ -116,7 +117,7 @@ void AssemblerRISCVD::fcvt_w_d(Register rd, FPURegister rs1, void AssemblerRISCVD::fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b1100001, frm, rd, rs1, Register::from_code(1)); } void AssemblerRISCVD::fcvt_d_w(FPURegister rd, Register rs1, @@ -126,7 +127,7 @@ void AssemblerRISCVD::fcvt_d_w(FPURegister rd, Register rs1, void AssemblerRISCVD::fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b1101001, frm, rd, rs1, Register::from_code(1)); } #ifdef V8_TARGET_ARCH_RISCV64 @@ -134,12 +135,12 @@ void AssemblerRISCVD::fcvt_d_wu(FPURegister rd, Register rs1, void AssemblerRISCVD::fcvt_l_d(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b1100001, frm, rd, rs1, Register::from_code(2)); } void AssemblerRISCVD::fcvt_lu_d(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3)); + GenInstrALUFP_rr(0b1100001, frm, rd, rs1, Register::from_code(3)); } void AssemblerRISCVD::fmv_x_d(Register rd, FPURegister rs1) { @@ -148,12 +149,12 @@ void AssemblerRISCVD::fmv_x_d(Register rd, FPURegister rs1) { void AssemblerRISCVD::fcvt_d_l(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b1101001, frm, rd, rs1, Register::from_code(2)); } void AssemblerRISCVD::fcvt_d_lu(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3)); + GenInstrALUFP_rr(0b1101001, frm, rd, rs1, Register::from_code(3)); } void AssemblerRISCVD::fmv_d_x(FPURegister rd, Register rs1) { diff --git a/deps/v8/src/codegen/riscv/extension-riscv-f.cc b/deps/v8/src/codegen/riscv/extension-riscv-f.cc index 12e6268aec04c9..3d1815f67d21fe 100644 --- a/deps/v8/src/codegen/riscv/extension-riscv-f.cc +++ b/deps/v8/src/codegen/riscv/extension-riscv-f.cc @@ -91,7 +91,7 @@ void AssemblerRISCVF::fcvt_w_s(Register rd, FPURegister rs1, void AssemblerRISCVF::fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b1100000, frm, rd, rs1, Register::from_code(1)); } void AssemblerRISCVF::fmv_x_w(Register rd, FPURegister rs1) { @@ -121,7 +121,7 @@ void AssemblerRISCVF::fcvt_s_w(FPURegister rd, Register rs1, void AssemblerRISCVF::fcvt_s_wu(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b1101000, frm, rd, rs1, Register::from_code(1)); } void AssemblerRISCVF::fmv_w_x(FPURegister rd, Register rs1) { @@ -133,22 +133,22 @@ void AssemblerRISCVF::fmv_w_x(FPURegister rd, Register rs1) { void AssemblerRISCVF::fcvt_l_s(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b1100000, frm, rd, rs1, Register::from_code(2)); } void AssemblerRISCVF::fcvt_lu_s(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(3)); + GenInstrALUFP_rr(0b1100000, frm, rd, rs1, Register::from_code(3)); } void AssemblerRISCVF::fcvt_s_l(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b1101000, frm, rd, rs1, Register::from_code(2)); } void AssemblerRISCVF::fcvt_s_lu(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(3)); + GenInstrALUFP_rr(0b1101000, frm, rd, rs1, Register::from_code(3)); } #endif diff --git a/deps/v8/src/codegen/riscv/extension-riscv-zfh.cc b/deps/v8/src/codegen/riscv/extension-riscv-zfh.cc index f981fb8e10113d..7d55a82711e143 100644 --- a/deps/v8/src/codegen/riscv/extension-riscv-zfh.cc +++ b/deps/v8/src/codegen/riscv/extension-riscv-zfh.cc @@ -113,7 +113,7 @@ void AssemblerRISCVZfh::fcvt_h_w(FPURegister rd, Register rs1, void AssemblerRISCVZfh::fcvt_h_wu(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101010, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b1101010, frm, rd, rs1, Register::from_code(1)); } void AssemblerRISCVZfh::fcvt_w_h(Register rd, FPURegister rs1, @@ -123,7 +123,7 @@ void AssemblerRISCVZfh::fcvt_w_h(Register rd, FPURegister rs1, void AssemblerRISCVZfh::fcvt_wu_h(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100010, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b1100010, frm, rd, rs1, Register::from_code(1)); } void AssemblerRISCVZfh::fmv_h_x(FPURegister rd, Register rs1) { @@ -136,11 +136,11 @@ void AssemblerRISCVZfh::fmv_x_h(Register rd, FPURegister rs1) { void AssemblerRISCVZfh::fcvt_h_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b0100010, frm, rd, rs1, ToRegister(1)); + GenInstrALUFP_rr(0b0100010, frm, rd, rs1, Register::from_code(1)); } void AssemblerRISCVZfh::fcvt_d_h(FPURegister rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b0100001, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b0100001, frm, rd, rs1, Register::from_code(2)); } void AssemblerRISCVZfh::fcvt_h_s(FPURegister rd, FPURegister rs1, @@ -149,7 +149,7 @@ void AssemblerRISCVZfh::fcvt_h_s(FPURegister rd, FPURegister rs1, } void AssemblerRISCVZfh::fcvt_s_h(FPURegister rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b0100000, frm, rd, rs1, Register::from_code(2)); } #ifdef V8_TARGET_ARCH_RISCV64 @@ -157,22 +157,22 @@ void AssemblerRISCVZfh::fcvt_s_h(FPURegister rd, FPURegister rs1, void AssemblerRISCVZfh::fcvt_l_h(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100010, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b1100010, frm, rd, rs1, Register::from_code(2)); } void AssemblerRISCVZfh::fcvt_lu_h(Register rd, FPURegister rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1100010, frm, rd, rs1, ToRegister(3)); + GenInstrALUFP_rr(0b1100010, frm, rd, rs1, Register::from_code(3)); } void AssemblerRISCVZfh::fcvt_h_l(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101010, frm, rd, rs1, ToRegister(2)); + GenInstrALUFP_rr(0b1101010, frm, rd, rs1, Register::from_code(2)); } void AssemblerRISCVZfh::fcvt_h_lu(FPURegister rd, Register rs1, FPURoundingMode frm) { - GenInstrALUFP_rr(0b1101010, frm, rd, rs1, ToRegister(3)); + GenInstrALUFP_rr(0b1101010, frm, rd, rs1, Register::from_code(3)); } #endif diff --git a/deps/v8/src/codegen/riscv/extension-riscv-zifencei.cc b/deps/v8/src/codegen/riscv/extension-riscv-zifencei.cc index d934bedde81b51..5187d97c303c8b 100644 --- a/deps/v8/src/codegen/riscv/extension-riscv-zifencei.cc +++ b/deps/v8/src/codegen/riscv/extension-riscv-zifencei.cc @@ -10,7 +10,8 @@ namespace v8 { namespace internal { void AssemblerRISCVZifencei::fence_i() { - GenInstrI(0b001, MISC_MEM, ToRegister(0), ToRegister(0), 0); + GenInstrI(0b001, MISC_MEM, zero_reg, zero_reg, 0); } + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc index b9b3ab3fe0692e..9aed3d8bbb4cdb 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc @@ -3417,7 +3417,7 @@ void MacroAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) { bind(&bad); // scratch still holds proper max/min value Mv(rd, scratch); - li(result, 0); + mv(result, zero_reg); // set result to 1 if normal, otherwise set result to 0 for abnormal bind(&done); } else { @@ -3800,6 +3800,9 @@ void MacroAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, } vfadd_vf(dst, src, kScratchDoubleReg, MaskType::Mask); } + if (frm != RNE) { + VU.set(RNE); + } } void MacroAssembler::Ceil(VRegister vdst, VRegister vsrc, Register scratch, @@ -4774,10 +4777,6 @@ bool MacroAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, case cc_always: if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; j(offset); - // TODO(kasperl@rivosinc.com): This probably has no effect, because the - // trampoline pool is blocked and that effectively blocks the constant - // pool too. - EmitConstPoolWithoutJumpIfNeeded(); break; case eq: // rs == rt @@ -4890,9 +4889,9 @@ bool MacroAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, UNREACHABLE(); } - // TODO(kasperl@rivosinc.com): We're always blocking the trampoline pool - // here so is it really necessary to check? - CheckTrampolinePoolQuick(1 * kInstrSize); + // TODO(kasperl@rivosinc.com): If we've just emitted an unconditional branch + // it would be great if we could consider emitting the constant pool without + // a jump after leaving the pool blocking scope. return true; } @@ -5432,6 +5431,25 @@ void MacroAssembler::CallBuiltin(Builtin builtin) { break; } } + if (v8_flags.debug_code) { + // Since the 'Abort' below might do a runtime call, we need to remember + // the current call's pc-offset, and restore it after the abort. + int old_offset = pc_offset_for_safepoint(); + // Check that the builtin didn't leave the rounding mode in a bad state. + Label done; + li(kScratchReg, ExternalReference::supports_wasm_simd_128_address()); + // If != 0, then simd is available. + Branch(&done, eq, kScratchReg, Operand(zero_reg), Label::Distance::kNear); + + static_assert(RNE == 0, "RNE must be 0"); + // Get the floating-point control and status register. + csrr(kScratchReg, csr_fcsr); + And(kScratchReg, kScratchReg, Operand(kFcsrFrmMask)); + beqz(kScratchReg, &done); // Equal to RNE. + Abort(AbortReason::kUnexpectedFPCRMode); + bind(&done); + set_pc_offset_for_safepoint(old_offset); + } } void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond, @@ -5620,16 +5638,13 @@ void MacroAssembler::LoadAddress(Register dst, Label* target, // by any trampoline pool emission here. BlockPoolsScope block_pools(this); int32_t offset; - if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) { - CHECK(is_int32(offset + 0x800)); - int32_t Hi20 = (static_cast(offset) + 0x800) >> 12; - int32_t Lo12 = static_cast(offset) << 20 >> 20; - auipc(dst, Hi20); - AddWord(dst, dst, Lo12); - } else { - uintptr_t address = jump_address(target); - li(dst, Operand(address, rmode), ADDRESS_LOAD); - } + bool ok = CalculateOffset(target, &offset, OffsetSize::kOffset32); + CHECK(ok); + CHECK(is_int32(offset + 0x800)); + int32_t Hi20 = (static_cast(offset) + 0x800) >> 12; + int32_t Lo12 = static_cast(offset) << 20 >> 20; + auipc(dst, Hi20); + AddWord(dst, dst, Lo12); } void MacroAssembler::Switch(Register scratch, Register value, @@ -7568,14 +7583,15 @@ void MacroAssembler::BailoutIfDeoptimized() { #endif } -void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, - DeoptimizeKind kind, Label* ret, - Label*) { +void MacroAssembler::CallForDeoptimization( + Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, + Label* jump_deoptimization_entry_label) { ASM_CODE_COMMENT(this); + // make sure the label is within jal's 21 bit range(near) + DCHECK_WITH_MSG(is_near(jump_deoptimization_entry_label), + "deopt exit is too far from deopt entry jump"); BlockPoolsScope block_pools(this); - LoadWord(t6, MemOperand(kRootRegister, - IsolateData::BuiltinEntrySlotOffset(target))); - Call(t6); + Call(jump_deoptimization_entry_label); DCHECK_EQ(SizeOfCodeGeneratedSince(exit), (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize : Deoptimizer::kEagerDeoptExitSize); diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h index 5300792b074bd8..ac8e582884c3d2 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h @@ -369,7 +369,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label); - void Ret(COND_ARGS); // Emit code to discard a non-negative number of pointer-sized elements diff --git a/deps/v8/src/codegen/riscv/register-riscv.h b/deps/v8/src/codegen/riscv/register-riscv.h index 1c451d32eb0040..683902e5727b40 100644 --- a/deps/v8/src/codegen/riscv/register-riscv.h +++ b/deps/v8/src/codegen/riscv/register-riscv.h @@ -176,10 +176,6 @@ GENERAL_REGISTERS(DECLARE_REGISTER) constexpr Register no_reg = Register::no_reg(); -int ToNumber(Register reg); - -Register ToRegister(int num); - constexpr bool kPadArguments = false; constexpr AliasingKind kFPAliasing = AliasingKind::kIndependent; constexpr bool kSimdMaskRegisters = false; diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc index 68ce455d448310..a6cceab0d9eaa7 100644 --- a/deps/v8/src/codegen/s390/assembler-s390.cc +++ b/deps/v8/src/codegen/s390/assembler-s390.cc @@ -133,7 +133,7 @@ static bool supportsSTFLE() { bytes_read = read(fd, buffer, sizeof(buffer)); // Locate and read the platform field of AUXV if it is in the chunk for (auxv_element = buffer; - auxv_element + sizeof(auxv_element) <= buffer + bytes_read && + auxv_element < buffer + (bytes_read / sizeof(Elf64_auxv_t)) && auxv_element->a_type != AT_NULL; auxv_element++) { // We are looking for HWCAP entry in AUXV to search for STFLE support diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc index caf01acbc0852e..df2935dffcb32e 100644 --- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc +++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc @@ -1241,7 +1241,7 @@ void SharedMacroAssemblerBase::S128Load8Splat(XMMRegister dst, Operand src, XMMRegister scratch) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can - // determine if a trap occured in Wasm code due to a OOB load. Make sure the + // determine if a trap occurred in Wasm code due to a OOB load. Make sure the // first instruction in each case below is the one that loads. if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -1264,7 +1264,7 @@ void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src, XMMRegister scratch) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can - // determine if a trap occured in Wasm code due to a OOB load. Make sure the + // determine if a trap occurred in Wasm code due to a OOB load. Make sure the // first instruction in each case below is the one that loads. if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -1285,7 +1285,7 @@ void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src, void SharedMacroAssemblerBase::S128Load32Splat(XMMRegister dst, Operand src) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can - // determine if a trap occured in Wasm code due to a OOB load. Make sure the + // determine if a trap occurred in Wasm code due to a OOB load. Make sure the // first instruction in each case below is the one that loads. if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); diff --git a/deps/v8/src/codegen/turboshaft-builtins-assembler-inl.h b/deps/v8/src/codegen/turboshaft-builtins-assembler-inl.h index 5feb27caffd7ec..3b4c92b686b950 100644 --- a/deps/v8/src/codegen/turboshaft-builtins-assembler-inl.h +++ b/deps/v8/src/codegen/turboshaft-builtins-assembler-inl.h @@ -174,6 +174,13 @@ class BuiltinArgumentsTS { V base_; }; +// Deduction guide. +template +BuiltinArgumentsTS( + A*, compiler::turboshaft::V, + compiler::turboshaft::OptionalV) + -> BuiltinArgumentsTS; + } // namespace detail template @@ -265,8 +272,8 @@ class FeedbackCollectorReducer : public Next { return; } case SKIP_WRITE_BARRIER_SCOPE: + case SKIP_WRITE_BARRIER_FOR_GC: case UNSAFE_SKIP_WRITE_BARRIER: - UNIMPLEMENTED(); case UPDATE_WRITE_BARRIER: UNIMPLEMENTED(); case UPDATE_EPHEMERON_KEY_WRITE_BARRIER: @@ -445,9 +452,8 @@ class BuiltinsReducer : public Next { V exception = __ CatchBlockBegin(); __ CombineExceptionFeedback(); __ UpdateFeedback(); - __ template CallRuntime< - compiler::turboshaft::RuntimeCallDescriptor::ReThrow>( - __ data()->isolate(), __ NoContextConstant(), {exception}); + __ template CallRuntime( + __ NoContextConstant(), {.exception = exception}); __ Unreachable(); } } diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index 68d61311b783b5..24abcfe35f1e13 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -3735,16 +3735,16 @@ void MacroAssembler::CallWasmCodePointer(Register target, Move(kScratchRegister, ExternalReference::wasm_code_pointer_table()); #ifdef V8_ENABLE_SANDBOX - // Mask `target` to be within [0, WasmCodePointerTable::kMaxWasmCodePointers). - static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers < - (kMaxUInt32 / sizeof(wasm::WasmCodePointerTableEntry))); - static_assert(base::bits::IsPowerOfTwo( - wasm::WasmCodePointerTable::kMaxWasmCodePointers)); - andl(target, Immediate(wasm::WasmCodePointerTable::kMaxWasmCodePointers - 1)); - - // Shift to multiply by `sizeof(WasmCodePointerTableEntry)`. - shll(target, Immediate(base::bits::WhichPowerOfTwo( - sizeof(wasm::WasmCodePointerTableEntry)))); + // Execute a left shift followed by right shift to achieve two things: + // - Only keep `kNumRelevantBits` bits (to avoid OOB access to the table), + // - shift by `kLeftShift` to translate from index to offset into the table. + static constexpr int kLeftShift = + base::bits::WhichPowerOfTwo(sizeof(wasm::WasmCodePointerTableEntry)); + static constexpr int kNumRelevantBits = base::bits::WhichPowerOfTwo( + wasm::WasmCodePointerTable::kMaxWasmCodePointers); + static constexpr int kNumClearedHighBits = 32 - kNumRelevantBits; + shll(target, Immediate(kNumClearedHighBits)); + shrl(target, Immediate(kNumClearedHighBits - kLeftShift)); // Add `target` and `kScratchRegister` early to free `kScratchRegister` again. addq(target, kScratchRegister); @@ -3783,16 +3783,17 @@ void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) { Move(kScratchRegister, ExternalReference::wasm_code_pointer_table()); #ifdef V8_ENABLE_SANDBOX - // Mask `target` to be within [0, WasmCodePointerTable::kMaxWasmCodePointers). - static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers < - (kMaxUInt32 / sizeof(wasm::WasmCodePointerTableEntry))); - static_assert(base::bits::IsPowerOfTwo( - wasm::WasmCodePointerTable::kMaxWasmCodePointers)); - andl(target, Immediate(wasm::WasmCodePointerTable::kMaxWasmCodePointers - 1)); - - // Shift to multiply by `sizeof(WasmCodePointerTableEntry)`. - shll(target, Immediate(base::bits::WhichPowerOfTwo( - sizeof(wasm::WasmCodePointerTableEntry)))); + // Execute a left shift followed by right shift to achieve two things: + // - Only keep `kNumRelevantBits` bits (to avoid OOB access to the table), + // - shift by `kLeftShift` to translate from index to offset into the table. + static constexpr int kLeftShift = + base::bits::WhichPowerOfTwo(sizeof(wasm::WasmCodePointerTableEntry)); + static constexpr int kNumRelevantBits = base::bits::WhichPowerOfTwo( + wasm::WasmCodePointerTable::kMaxWasmCodePointers); + static constexpr int kNumClearedHighBits = 32 - kNumRelevantBits; + static_assert(kNumClearedHighBits == 9); + shll(target, Immediate(kNumClearedHighBits)); + shrl(target, Immediate(kNumClearedHighBits - kLeftShift)); call(Operand(kScratchRegister, target, ScaleFactor::times_1, 0)); #else @@ -4034,6 +4035,8 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { } else { PopReturnAddressTo(scratch); addq(rsp, Immediate(bytes_dropped)); + // Push and ret (instead of jmp) to keep the RSB and the CET shadow stack + // balanced. PushReturnAddressFrom(scratch); ret(0); } @@ -4694,10 +4697,9 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count, bind(®ular_invoke); } -void MacroAssembler::CallDebugOnFunctionCall( - Register fun, Register new_target, - Register expected_parameter_count_or_dispatch_handle, - Register actual_parameter_count) { +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + Register dispatch_handle, + Register actual_parameter_count) { ASM_CODE_COMMENT(this); // Load receiver to pass it later to DebugOnFunctionCall hook. // Receiver is located on top of the stack if we have a frame (usually a @@ -4708,8 +4710,10 @@ void MacroAssembler::CallDebugOnFunctionCall( FrameScope frame( this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL); - SmiTag(expected_parameter_count_or_dispatch_handle); - Push(expected_parameter_count_or_dispatch_handle); + // We must not Smi-tag the dispatch handle, because its top bits are + // meaningful; and we also don't need to, because its low bits are zero. + static_assert(kJSDispatchHandleShift >= 1); + Push(dispatch_handle); SmiTag(actual_parameter_count); Push(actual_parameter_count); @@ -4728,8 +4732,7 @@ void MacroAssembler::CallDebugOnFunctionCall( } Pop(actual_parameter_count); SmiUntag(actual_parameter_count); - Pop(expected_parameter_count_or_dispatch_handle); - SmiUntag(expected_parameter_count_or_dispatch_handle); + Pop(dispatch_handle); } void MacroAssembler::StubPrologue(StackFrame::Type type) { @@ -5329,7 +5332,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, if (argc_operand == nullptr) { DCHECK_NE(slots_to_drop_on_return, 0); - __ ret(slots_to_drop_on_return * kSystemPointerSize); + __ Ret(slots_to_drop_on_return * kSystemPointerSize, scratch); } else { __ PopReturnAddressTo(scratch); // {argc_operand} was loaded into {argc_reg} above. diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index 193aea31f4b0fd..058a48851e82c9 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -1182,7 +1182,6 @@ class OffHeapCompressedMaybeObjectSlot; class FullObjectSlot; class FullMaybeObjectSlot; class FullHeapObjectSlot; -class OffHeapFullObjectSlot; class OldSpace; class ReadOnlySpace; class RelocInfo; @@ -1257,8 +1256,8 @@ struct SlotTraits { using TObjectSlot = FullObjectSlot; using TMaybeObjectSlot = FullMaybeObjectSlot; using THeapObjectSlot = FullHeapObjectSlot; - using TOffHeapObjectSlot = OffHeapFullObjectSlot; - using TInstructionStreamSlot = OffHeapFullObjectSlot; + using TOffHeapObjectSlot = FullObjectSlot; + using TInstructionStreamSlot = FullObjectSlot; #endif // V8_COMPRESS_POINTERS #ifdef V8_ENABLE_SANDBOX using TProtectedPointerSlot = @@ -1357,7 +1356,7 @@ enum AllocationSpace { FIRST_MUTABLE_SPACE = NEW_SPACE, LAST_MUTABLE_SPACE = TRUSTED_LO_SPACE, FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE, - LAST_GROWABLE_PAGED_SPACE = TRUSTED_SPACE, + LAST_GROWABLE_PAGED_SPACE = SHARED_TRUSTED_SPACE, FIRST_SWEEPABLE_SPACE = NEW_SPACE, LAST_SWEEPABLE_SPACE = SHARED_TRUSTED_SPACE }; @@ -1460,6 +1459,8 @@ inline std::ostream& operator<<(std::ostream& os, AllocationType type) { enum class PerformHeapLimitCheck { kYes, kNo }; enum class PerformIneffectiveMarkCompactCheck { kYes, kNo }; +enum class RequestedGCKind : uint8_t { kMajor = 1, kLastResort = 1 << 1 }; + class AllocationHint final { public: AllocationHint() = default; @@ -2728,6 +2729,7 @@ enum IsolateAddressId { V(TrapDivUnrepresentable) \ V(TrapRemByZero) \ V(TrapFloatUnrepresentable) \ + V(TrapNullFunc) \ V(TrapFuncSigMismatch) \ V(TrapDataSegmentOutOfBounds) \ V(TrapElementSegmentOutOfBounds) \ @@ -2737,6 +2739,7 @@ enum IsolateAddressId { V(TrapIllegalCast) \ V(TrapArrayOutOfBounds) \ V(TrapArrayTooLarge) \ + V(TrapResume) \ V(TrapStringOffsetOutOfBounds) enum class KeyedAccessLoadMode : uint8_t { diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h index c9559e7a922191..b99dcf2439b165 100644 --- a/deps/v8/src/common/message-template.h +++ b/deps/v8/src/common/message-template.h @@ -643,7 +643,7 @@ namespace internal { T(AwaitExpressionFormalParameter, \ "Illegal await-expression in formal parameters of async function") \ T(TooManyArguments, \ - "Too many arguments in function call (only 65535 allowed)") \ + "Too many arguments in function call (only 65525 allowed)") \ T(TooManyParameters, \ "Too many parameters in function definition (only 65534 allowed)") \ T(TooManyProperties, "Too many properties to enumerate") \ @@ -698,7 +698,8 @@ namespace internal { T(WasmTrapRemByZero, "remainder by zero") \ T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \ T(WasmTrapTableOutOfBounds, "table index is out of bounds") \ - T(WasmTrapFuncSigMismatch, "null function or function signature mismatch") \ + T(WasmTrapNullFunc, "null function") \ + T(WasmTrapFuncSigMismatch, "function signature mismatch") \ T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \ T(WasmTrapJSTypeError, "type incompatibility when transforming from/to JS") \ T(WasmTrapDataSegmentOutOfBounds, "data segment out of bounds") \ @@ -711,6 +712,7 @@ namespace internal { T(WasmTrapStringInvalidUtf8, "invalid UTF-8 string") \ T(WasmTrapStringInvalidWtf8, "invalid WTF-8 string") \ T(WasmTrapStringOffsetOutOfBounds, "string offset out of bounds") \ + T(WasmTrapResume, "resuming an invalid continuation") \ T(WasmSuspendError, "trying to suspend without WebAssembly.promising") \ T(WasmTrapStringIsolatedSurrogate, \ "Failed to encode string as UTF-8: contains unpaired surrogate") \ diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc index 5d88f9d0fb6887..4da8835938626c 100644 --- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc +++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc @@ -220,6 +220,13 @@ bool OptimizingCompileTaskExecutor::TryQueueForOptimization( job_handle_->NotifyConcurrencyIncrease(); return true; } else { + if (v8_flags.trace_concurrent_recompilation) { + OptimizedCompilationInfo* info = job->compilation_info(); + DirectHandle function(*info->closure(), isolate); + PrintF(" ** Failed to enqueue a job for"); + ShortPrint(*function); + PrintF("\n"); + } return false; } } diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index c0bf470ada9443..77e2d37ee3306e 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -151,10 +151,10 @@ FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() { // static FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() { FieldAccess access = { - kTaggedBase, JSObject::kPropertiesOrHashOffset, - MaybeHandle(), OptionalMapRef(), - Type::Any(), MachineType::TaggedPointer(), - kPointerWriteBarrier, "JSObjectPropertiesOrHashKnownPointer"}; + kTaggedBase, JSObject::kPropertiesOrHashOffset, + MaybeHandle(), OptionalMapRef(), + Type::Any(), MachineType::AnyTagged(), + kFullWriteBarrier, "JSObjectPropertiesOrHashKnownPointer"}; return access; } @@ -233,7 +233,7 @@ FieldAccess AccessBuilder::ForJSExternalObjectValue() { "JSExternalObjectValue", ConstFieldInfo::None(), false, - kExternalObjectValueTag, + kFastApiExternalTypeTag, }; return access; } diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc index 8a728a35b49f56..0162f4930bc2c8 100644 --- a/deps/v8/src/compiler/access-info.cc +++ b/deps/v8/src/compiler/access-info.cc @@ -9,6 +9,7 @@ #include #include "src/builtins/accessors.h" +#include "src/common/globals.h" #include "src/compiler/compilation-dependencies.h" #include "src/compiler/heap-refs.h" #include "src/compiler/js-heap-broker-inl.h" @@ -563,7 +564,7 @@ std::optional AccessInfoFactory::ComputeElementAccessInfo( // Supporting receiver-is-first-param mode would require passing // the Proxy's handler to the eventual building of the Call node. if (wasm_data->receiver_is_first_param()) return {}; - const wasm::CanonicalSig* wasm_signature = wasm_data->sig(); + const wasm::CanonicalSig* wasm_signature = wasm_data->internal()->sig(); if (wasm_signature->parameter_count() < 2) return {}; wasm::CanonicalValueType key_type = wasm_signature->GetParam(1); @@ -861,7 +862,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( AccessMode access_mode) const { DCHECK(descriptor.is_found()); Handle descriptors = broker()->CanonicalPersistentHandle( - holder_map.object()->instance_descriptors(kRelaxedLoad)); + holder_map.object()->instance_descriptors(kAcquireLoad)); SLOW_DCHECK(descriptor == descriptors->Search(*name.object(), *holder_map.object(), true)); @@ -1360,7 +1361,7 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessorInHolder( isolate()->factory()->length_string()) && details.location() == PropertyLocation::kDescriptor) { Tagged descriptors = - holder.map(broker_).object()->instance_descriptors(kRelaxedLoad); + holder.map(broker_).object()->instance_descriptors(kAcquireLoad); SLOW_DCHECK(index == descriptors->Search(*name.object(), *holder.map(broker_).object(), true)); diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index 78f1aff35ce210..7b1b07d4abc28a 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -557,6 +557,10 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { +bool HasImmediateInput(Instruction* instr, size_t index) { + return instr->InputAt(index)->IsImmediate(); +} + void FlushPendingPushRegisters(MacroAssembler* masm, FrameAccessState* frame_access_state, ZoneVector* pending_pushes) { @@ -793,18 +797,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchCallJSFunction: { - Register func = i.InputRegister(0); - if (v8_flags.debug_code) { - UseScratchRegisterScope temps(masm()); - Register scratch = temps.Acquire(); - // Check the function's context matches the context argument. - __ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); - __ cmp(cp, scratch); - __ Assert(eq, AbortReason::kWrongFunctionContext); - } uint32_t num_arguments = i.InputUint32(instr->JSCallArgumentCountInputIndex()); - __ CallJSFunction(func, num_arguments); + if (HasImmediateInput(instr, 0)) { + Handle constant = + i.ToConstant(instr->InputAt(0)).ToHeapObject(); + __ Move(kJavaScriptCallTargetRegister, constant); + if (Handle function; TryCast(constant, &function)) { + if (function->shared()->HasBuiltinId()) { + Builtin builtin = function->shared()->builtin_id(); + size_t expected = Builtins::GetFormalParameterCount(builtin); + if (num_arguments == expected) { + __ CallBuiltin(builtin); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } else { + JSDispatchHandle dispatch_handle = function->dispatch_handle(); + size_t expected = + IsolateGroup::current()->js_dispatch_table()->GetParameterCount( + dispatch_handle); + if (num_arguments >= expected) { + __ CallJSDispatchEntry(dispatch_handle, expected); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } + } else { + __ CallJSFunction(kJavaScriptCallTargetRegister, num_arguments); + } + } else { + Register func = i.InputRegister(0); + if (v8_flags.debug_code) { + UseScratchRegisterScope temps(masm()); + Register scratch = temps.Acquire(); + // Check the function's context matches the context argument. + __ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); + __ cmp(cp, scratch); + __ Assert(eq, AbortReason::kWrongFunctionContext); + } + __ CallJSFunction(func, num_arguments); + } RecordCallPosition(instr); DCHECK_EQ(LeaveCC, i.OutputSBit()); frame_access_state()->ClearSPDelta(); @@ -3989,7 +4022,7 @@ void CodeGenerator::AssembleConstructFrame() { CommonFrameConstants::kFixedFrameSizeAboveFp)); __ Call(static_cast
(Builtin::kWasmHandleStackOverflow), RelocInfo::WASM_STUB_CALL); - // If the call succesfully grew the stack, we don't expect it to have + // If the call successfully grew the stack, we don't expect it to have // allocated any heap objects or otherwise triggered any GC. // If it was not able to grow the stack, it may have triggered a GC when // allocating the stack overflow exception object, but the call did not diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index 1b42c1e100a437..b63750600b3b63 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -791,6 +791,10 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { +bool HasImmediateInput(Instruction* instr, size_t index) { + return instr->InputAt(index)->IsImmediate(); +} + void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, @@ -1109,19 +1113,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchCallJSFunction: { - Register func = i.InputRegister(0); - if (v8_flags.debug_code) { - // Check the function's context matches the context argument. - UseScratchRegisterScope scope(masm()); - Register temp = scope.AcquireX(); - __ LoadTaggedField(temp, - FieldMemOperand(func, JSFunction::kContextOffset)); - __ cmp(cp, temp); - __ Assert(eq, AbortReason::kWrongFunctionContext); - } uint32_t num_arguments = i.InputUint32(instr->JSCallArgumentCountInputIndex()); - __ CallJSFunction(func, num_arguments); + if (HasImmediateInput(instr, 0)) { + Handle constant = + i.ToConstant(instr->InputAt(0)).ToHeapObject(); + __ Mov(kJavaScriptCallTargetRegister, constant); + if (Handle function; TryCast(constant, &function)) { + if (function->shared()->HasBuiltinId()) { + Builtin builtin = function->shared()->builtin_id(); + size_t expected = Builtins::GetFormalParameterCount(builtin); + if (num_arguments == expected) { + __ CallBuiltin(builtin); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } else { + JSDispatchHandle dispatch_handle = function->dispatch_handle(); + size_t expected = + IsolateGroup::current()->js_dispatch_table()->GetParameterCount( + dispatch_handle); + if (num_arguments >= expected) { + __ CallJSDispatchEntry(dispatch_handle, expected); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } + } else { + __ CallJSFunction(kJavaScriptCallTargetRegister, num_arguments); + } + } else { + Register func = i.InputRegister(0); + if (v8_flags.debug_code) { + // Check the function's context matches the context argument. + UseScratchRegisterScope scope(masm()); + Register temp = scope.AcquireX(); + __ LoadTaggedField(temp, + FieldMemOperand(func, JSFunction::kContextOffset)); + __ cmp(cp, temp); + __ Assert(eq, AbortReason::kWrongFunctionContext); + } + __ CallJSFunction(func, num_arguments); + } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -4288,7 +4321,7 @@ void CodeGenerator::AssembleConstructFrame() { CommonFrameConstants::kFixedFrameSizeAboveFp)); __ Call(static_cast
(Builtin::kWasmHandleStackOverflow), RelocInfo::WASM_STUB_CALL); - // If the call succesfully grew the stack, we don't expect it to have + // If the call successfully grew the stack, we don't expect it to have // allocated any heap objects or otherwise triggered any GC. // If it was not able to grow the stack, it may have triggered a GC when // allocating the stack overflow exception object, but the call did not diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 9795d9f55779c8..96adc52047e1ee 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -4372,14 +4372,16 @@ void InstructionSelector::VisitFloat32Mul(OpIndex node) { const FloatBinopOp& mul = this->Get(node).template Cast(); const Operation& lhs = this->Get(mul.left()); - if (lhs.Is() && CanCover(node, mul.left())) { + if (!ensure_deterministic_nan_ && lhs.Is() && + CanCover(node, mul.left())) { Emit(kArm64Float32Fnmul, g.DefineAsRegister(node), g.UseRegister(lhs.input(0)), g.UseRegister(mul.right())); return; } const Operation& rhs = this->Get(mul.right()); - if (rhs.Is() && CanCover(node, mul.right())) { + if (!ensure_deterministic_nan_ && rhs.Is() && + CanCover(node, mul.right())) { Emit(kArm64Float32Fnmul, g.DefineAsRegister(node), g.UseRegister(rhs.input(0)), g.UseRegister(mul.left())); return; diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index d06631b640b836..592a186b502c6f 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -75,6 +75,7 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage, resolver_(this), safepoints_(codegen_zone), handlers_(codegen_zone), + effect_handlers_(codegen_zone), deoptimization_exits_(codegen_zone), protected_deoptimization_literals_(codegen_zone), deoptimization_literals_(codegen_zone), @@ -172,11 +173,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( DeoptimizeReason deoptimization_reason = exit->reason(); Label* jump_deoptimization_entry_label = &jump_deoptimization_entry_labels_[static_cast(deopt_kind)]; - if (info()->source_positions() || - AlwaysPreserveDeoptReason(deoptimization_reason)) { - masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), - exit->pos(), deoptimization_id); - } if (deopt_kind == DeoptimizeKind::kLazy || deopt_kind == DeoptimizeKind::kLazyAfterFastCall) { @@ -191,6 +187,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( deopt_kind, exit->continue_label(), jump_deoptimization_entry_label); + // RecordDeoptReason has to be right after the call so that the deopt is + // associated with the correct pc. + if (info()->source_positions() || + AlwaysPreserveDeoptReason(deoptimization_reason)) { + masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), + exit->pos(), deoptimization_id); + } + exit->set_emitted(); return kSuccess; @@ -335,14 +339,6 @@ void CodeGenerator::AssembleCode() { masm()->InitializeRootRegister(); } } -#if defined(V8_TARGET_ARCH_RISCV32) || defined(V8_TARGET_ARCH_RISCV64) - // RVV uses VectorUnit to emit vset{i}vl{i}, reducing the static and dynamic - // overhead of the vset{i}vl{i} instruction. However there are some jumps - // back between blocks. the Rvv instruction may get an incorrect vtype. so - // here VectorUnit needs to be cleared to ensure that the vtype is correct - // within the block. - masm()->VU.clear(); -#endif if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) { ConstantPoolUnavailableScope constant_pool_unavailable(masm()); result_ = AssembleBlock(block); @@ -425,6 +421,10 @@ void CodeGenerator::AssembleCode() { } } +#if defined(V8_TARGET_ARCH_RISCV32) || defined(V8_TARGET_ARCH_RISCV64) + masm()->EndBlockPools(); +#endif // defined(V8_TARGET_ARCH_RISCV32) || defined(V8_TARGET_ARCH_RISCV64) + offsets_info_.pools = masm()->pc_offset(); // TODO(jgruber): Move all inlined metadata generation into a new, // architecture-independent version of FinishCode. Currently, this includes @@ -1109,7 +1109,7 @@ base::OwnedVector CodeGenerator::GenerateWasmDeoptimizationData() { wasm::WasmDeoptView view(base::VectorOf(result)); wasm::WasmDeoptData data = view.GetDeoptData(); DCHECK_EQ(data.deopt_exit_start_offset, deopt_exit_start_offset_); - DCHECK_EQ(data.deopt_literals_size, deoptimization_literals_.size()); + DCHECK_EQ(data.num_deopt_literals, deoptimization_literals_.size()); DCHECK_EQ(data.eager_deopt_count, eager_deopt_count_); DCHECK_EQ(data.entry_count, deoptimization_exits_.size()); DCHECK_EQ(data.translation_array_size, frame_translations.size()); @@ -1128,6 +1128,17 @@ base::OwnedVector CodeGenerator::GenerateWasmDeoptimizationData() { #endif return result; } + +base::OwnedVector +CodeGenerator::GenerateWasmEffectHandler() { + auto handlers = base::OwnedVector::New( + effect_handlers_.size()); + for (size_t i = 0; i < effect_handlers_.size(); ++i) { + handlers[i] = {effect_handlers_[i].pc_offset, effect_handlers_[i].tag_index, + effect_handlers_[i].handler->pos()}; + } + return handlers; +} #endif // V8_ENABLE_WEBASSEMBLY Label* CodeGenerator::AddJumpTable(base::Vector targets) { @@ -1146,10 +1157,23 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) { instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState); RecordSafepoint(instr->reference_map()); + InstructionOperandConverter i(this, instr); + int index = static_cast(instr->InputCount()) - 1; + if (instr->HasCallDescriptorFlag(CallDescriptor::kHasEffectHandler)) { + int num_handlers = i.ToConstant(instr->InputAt(index)).ToInt32(); + // Start from the first handler, order matters. + for (int handler_idx = index - 2 * num_handlers; handler_idx < index; + handler_idx += 2) { + RpoNumber handler_rpo = + i.ToConstant(instr->InputAt(handler_idx)).ToRpoNumber(); + int tag_index = i.ToConstant(instr->InputAt(handler_idx + 1)).ToInt32(); + effect_handlers_.push_back( + {tag_index, GetLabel(handler_rpo), masm()->pc_offset()}); + } + index = index - 2 * num_handlers - 1; + } if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) { - InstructionOperandConverter i(this, instr); - Constant handler_input = - i.ToConstant(instr->InputAt(instr->InputCount() - 1)); + Constant handler_input = i.ToConstant(instr->InputAt(index--)); if (handler_input.type() == Constant::Type::kRpoNumber) { RpoNumber handler_rpo = handler_input.ToRpoNumber(); DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler()); diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h index 0ed0cfbc796ed7..818258ed914312 100644 --- a/deps/v8/src/compiler/backend/code-generator.h +++ b/deps/v8/src/compiler/backend/code-generator.h @@ -95,6 +95,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { #if V8_ENABLE_WEBASSEMBLY base::OwnedVector GenerateWasmDeoptimizationData(); + base::OwnedVector GenerateWasmEffectHandler(); #endif base::OwnedVector GetSourcePositionTable(); @@ -401,6 +402,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { int pc_offset; }; + struct EffectHandlerInfo { + int tag_index; + Label* handler; + int pc_offset; + }; + friend class OutOfLineCode; friend class CodeGeneratorTester; @@ -420,6 +427,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { GapResolver resolver_; SafepointTableBuilder safepoints_; ZoneVector handlers_; + ZoneVector effect_handlers_; int next_deoptimization_id_ = 0; int deopt_exit_start_offset_ = 0; int eager_deopt_count_ = 0; diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 7b077a26f39dc9..2835b7ee3e4c38 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -4252,7 +4252,7 @@ void CodeGenerator::AssembleConstructFrame() { CommonFrameConstants::kFixedFrameSizeAboveFp))); __ wasm_call(static_cast
(Builtin::kWasmHandleStackOverflow), RelocInfo::WASM_STUB_CALL); - // If the call succesfully grew the stack, we don't expect it to have + // If the call successfully grew the stack, we don't expect it to have // allocated any heap objects or otherwise triggered any GC. // If it was not able to grow the stack, it may have triggered a GC when // allocating the stack overflow exception object, but the call did not diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc index cef93182d116d4..dc1428e553481e 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.cc +++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc @@ -14,48 +14,54 @@ namespace v8 { namespace internal { namespace compiler { -void InstructionScheduler::SchedulingQueueBase::AddNode( - ScheduleGraphNode* node) { - // We keep the ready list sorted by total latency so that we can quickly find - // the next best candidate to schedule. - auto it = nodes_.begin(); - while ((it != nodes_.end()) && - ((*it)->total_latency() >= node->total_latency())) { - ++it; - } - nodes_.insert(it, node); +InstructionScheduler::SchedulingQueue::SchedulingQueue(Zone* zone) + : ready_(zone), + waiting_(zone), + random_number_generator_( + base::RandomNumberGenerator(v8_flags.random_seed)) {} + +void InstructionScheduler::SchedulingQueue::AddNode(ScheduleGraphNode* node) { + waiting_.push_back(node); +} + +void InstructionScheduler::SchedulingQueue::AddReady(ScheduleGraphNode* node) { + ready_.push_back(node); +} + +void InstructionScheduler::SchedulingQueue::Advance(int cycle) { + auto IsReady = [cycle](ScheduleGraphNode* n) { + return cycle >= n->start_cycle(); + }; + auto it = std::partition(waiting_.begin(), waiting_.end(), IsReady); + ready_.insert(ready_.end(), waiting_.begin(), it); + waiting_.erase(waiting_.begin(), it); } InstructionScheduler::ScheduleGraphNode* -InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) { +InstructionScheduler::SchedulingQueue::PopBestCandidate(int cycle) { DCHECK(!IsEmpty()); - auto candidate = nodes_.end(); - for (auto iterator = nodes_.begin(); iterator != nodes_.end(); ++iterator) { - // We only consider instructions that have all their operands ready. - if (cycle >= (*iterator)->start_cycle()) { - candidate = iterator; - break; - } - } - if (candidate != nodes_.end()) { + if (ready_.empty()) return nullptr; + + if (V8_UNLIKELY(v8_flags.turbo_stress_instruction_scheduling)) { + // Pop a random node from the queue to perform stress tests on the + // scheduler. + auto candidate = ready_.begin(); + std::advance(candidate, random_number_generator_->NextInt( + static_cast(ready_.size()))); ScheduleGraphNode* result = *candidate; - nodes_.erase(candidate); + ready_.erase(candidate); return result; } - return nullptr; -} + // Prioritize the instruction with the highest latency on the path to reach + // the end of the graph. + auto best_candidate = std::max_element( + ready_.begin(), ready_.end(), + [](auto l, auto r) { return l->total_latency() < r->total_latency(); }); -InstructionScheduler::ScheduleGraphNode* -InstructionScheduler::StressSchedulerQueue::PopBestCandidate(int cycle) { - DCHECK(!IsEmpty()); - // Choose a random element from the ready list. - auto candidate = nodes_.begin(); - std::advance(candidate, random_number_generator()->NextInt( - static_cast(nodes_.size()))); - ScheduleGraphNode* result = *candidate; - nodes_.erase(candidate); + ScheduleGraphNode* result = *best_candidate; + ready_.erase(best_candidate); return result; } @@ -78,17 +84,13 @@ InstructionScheduler::InstructionScheduler(Zone* zone, InstructionSequence* sequence) : zone_(zone), sequence_(sequence), - graph_(zone), + graph_(sequence->instructions().size(), zone), + ready_list_(zone), last_side_effect_instr_(nullptr), pending_loads_(zone), last_live_in_reg_marker_(nullptr), last_deopt_or_trap_(nullptr), - operands_map_(zone) { - if (v8_flags.turbo_stress_instruction_scheduling) { - random_number_generator_ = - std::optional(v8_flags.random_seed); - } -} + operands_map_(zone) {} void InstructionScheduler::StartBlock(RpoNumber rpo) { DCHECK(graph_.empty()); @@ -100,32 +102,15 @@ void InstructionScheduler::StartBlock(RpoNumber rpo) { sequence()->StartBlock(rpo); } -void InstructionScheduler::EndBlock(RpoNumber rpo) { - if (v8_flags.turbo_stress_instruction_scheduling) { - Schedule(); - } else { - Schedule(); - } - sequence()->EndBlock(rpo); -} - -void InstructionScheduler::AddTerminator(Instruction* instr) { - ScheduleGraphNode* new_node = zone()->New(zone(), instr); - // Make sure that basic block terminators are not moved by adding them - // as successor of every instruction. - for (ScheduleGraphNode* node : graph_) { - node->AddSuccessor(new_node); - } - graph_.push_back(new_node); +void InstructionScheduler::EndBlock(RpoNumber rpo, Instruction* terminator) { + Schedule(); + sequence()->EndBlock(rpo, terminator); } void InstructionScheduler::AddInstruction(Instruction* instr) { - if (IsBarrier(instr)) { - if (v8_flags.turbo_stress_instruction_scheduling) { - Schedule(); - } else { - Schedule(); - } + int flags = GetInstructionFlags(instr); + if (IsBarrier(flags)) { + Schedule(); sequence()->AddInstruction(instr); return; } @@ -135,25 +120,23 @@ void InstructionScheduler::AddInstruction(Instruction* instr) { // We should not have branches in the middle of a block. DCHECK_NE(instr->flags_mode(), kFlags_branch); + if (last_live_in_reg_marker_ != nullptr) { + last_live_in_reg_marker_->AddSuccessor(new_node); + } + if (IsFixedRegisterParameter(instr)) { - if (last_live_in_reg_marker_ != nullptr) { - last_live_in_reg_marker_->AddSuccessor(new_node); - } last_live_in_reg_marker_ = new_node; } else { - if (last_live_in_reg_marker_ != nullptr) { - last_live_in_reg_marker_->AddSuccessor(new_node); - } - // Make sure that instructions are not scheduled before the last // deoptimization or trap point when they depend on it. - if ((last_deopt_or_trap_ != nullptr) && DependsOnDeoptOrTrap(instr)) { + if ((last_deopt_or_trap_ != nullptr) && + DependsOnDeoptOrTrap(instr, flags)) { last_deopt_or_trap_->AddSuccessor(new_node); } // Instructions with side effects and memory operations can't be // reordered with respect to each other. - if (HasSideEffect(instr)) { + if (HasSideEffect(flags)) { if (last_side_effect_instr_ != nullptr) { last_side_effect_instr_->AddSuccessor(new_node); } @@ -162,7 +145,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) { } pending_loads_.clear(); last_side_effect_instr_ = new_node; - } else if (IsLoadOperation(instr)) { + } else if (IsLoadOperation(flags)) { // Load operations can't be reordered with side effects instructions but // independent loads can be reordered with respect to each other. if (last_side_effect_instr_ != nullptr) { @@ -210,9 +193,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) { graph_.push_back(new_node); } -template void InstructionScheduler::Schedule() { - QueueType ready_list(this); // Compute total latencies so that we can schedule the critical path first. ComputeTotalLatencies(); @@ -220,16 +201,14 @@ void InstructionScheduler::Schedule() { // Add nodes which don't have dependencies to the ready list. for (ScheduleGraphNode* node : graph_) { if (!node->HasUnscheduledPredecessor()) { - ready_list.AddNode(node); + ready_list_.AddReady(node); } } // Go through the ready list and schedule the instructions. int cycle = 0; - while (!ready_list.IsEmpty()) { - ScheduleGraphNode* candidate = ready_list.PopBestCandidate(cycle); - - if (candidate != nullptr) { + while (!ready_list_.IsEmpty()) { + if (ScheduleGraphNode* candidate = ready_list_.PopBestCandidate(cycle)) { sequence()->AddInstruction(candidate->instruction()); for (ScheduleGraphNode* successor : candidate->successors()) { @@ -238,12 +217,12 @@ void InstructionScheduler::Schedule() { std::max(successor->start_cycle(), cycle + candidate->latency())); if (!successor->HasUnscheduledPredecessor()) { - ready_list.AddNode(successor); + ready_list_.AddNode(successor); } } } - cycle++; + ready_list_.Advance(cycle); } // Reset own state. diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.h b/deps/v8/src/compiler/backend/instruction-scheduler.h index 639c1de7135286..6810e8e4ad4e6e 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.h +++ b/deps/v8/src/compiler/backend/instruction-scheduler.h @@ -38,10 +38,9 @@ class InstructionScheduler final : public ZoneObject { InstructionSequence* sequence); V8_EXPORT_PRIVATE void StartBlock(RpoNumber rpo); - V8_EXPORT_PRIVATE void EndBlock(RpoNumber rpo); + V8_EXPORT_PRIVATE void EndBlock(RpoNumber rpo, Instruction* terminator); V8_EXPORT_PRIVATE void AddInstruction(Instruction* instr); - V8_EXPORT_PRIVATE void AddTerminator(Instruction* instr); static bool SchedulerSupported(); @@ -50,6 +49,8 @@ class InstructionScheduler final : public ZoneObject { // Represent an instruction and their dependencies. class ScheduleGraphNode : public ZoneObject { public: + using SuccessorList = SmallZoneVector; + ScheduleGraphNode(Zone* zone, Instruction* instr); // Mark the instruction represented by 'node' as a dependency of this one. @@ -69,7 +70,7 @@ class InstructionScheduler final : public ZoneObject { } Instruction* instruction() { return instr_; } - ZoneDeque& successors() { return successors_; } + SuccessorList& successors() { return successors_; } int latency() const { return latency_; } int total_latency() const { return total_latency_; } @@ -80,7 +81,7 @@ class InstructionScheduler final : public ZoneObject { private: Instruction* instr_; - ZoneDeque successors_; + SuccessorList successors_; // Number of unscheduled predecessors for this node. int unscheduled_predecessors_count_; @@ -104,70 +105,40 @@ class InstructionScheduler final : public ZoneObject { // have been scheduled. Note that this class is inteded to be extended by // concrete implementation of the scheduling queue which define the policy // to pop node from the queue. - class SchedulingQueueBase { + class SchedulingQueue { public: - explicit SchedulingQueueBase(InstructionScheduler* scheduler) - : scheduler_(scheduler), nodes_(scheduler->zone()) {} + explicit SchedulingQueue(Zone* zone); + void Advance(int cycle); void AddNode(ScheduleGraphNode* node); + void AddReady(ScheduleGraphNode* node); - bool IsEmpty() const { return nodes_.empty(); } - - protected: - InstructionScheduler* scheduler_; - ZoneLinkedList nodes_; - }; - - // A scheduling queue which prioritize nodes on the critical path (we look - // for the instruction with the highest latency on the path to reach the end - // of the graph). - class CriticalPathFirstQueue : public SchedulingQueueBase { - public: - explicit CriticalPathFirstQueue(InstructionScheduler* scheduler) - : SchedulingQueueBase(scheduler) {} - - // Look for the best candidate to schedule, remove it from the queue and - // return it. - ScheduleGraphNode* PopBestCandidate(int cycle); - }; - - // A queue which pop a random node from the queue to perform stress tests on - // the scheduler. - class StressSchedulerQueue : public SchedulingQueueBase { - public: - explicit StressSchedulerQueue(InstructionScheduler* scheduler) - : SchedulingQueueBase(scheduler) {} - + bool IsEmpty() const { return ready_.empty() && waiting_.empty(); } ScheduleGraphNode* PopBestCandidate(int cycle); private: - base::RandomNumberGenerator* random_number_generator() { - return scheduler_->random_number_generator(); - } + SmallZoneVector ready_; + SmallZoneVector waiting_; + std::optional random_number_generator_; }; // Perform scheduling for the current block specifying the queue type to // use to determine the next best candidate. - template void Schedule(); // Return the scheduling properties of the given instruction. V8_EXPORT_PRIVATE int GetInstructionFlags(const Instruction* instr) const; int GetTargetInstructionFlags(const Instruction* instr) const; - bool IsBarrier(const Instruction* instr) const { - return (GetInstructionFlags(instr) & kIsBarrier) != 0; - } + bool IsBarrier(int flags) const { return (flags & kIsBarrier) != 0; } // Check whether the given instruction has side effects (e.g. function call, // memory store). - bool HasSideEffect(const Instruction* instr) const { - return (GetInstructionFlags(instr) & kHasSideEffect) != 0; - } + bool HasSideEffect(int flags) const { return (flags & kHasSideEffect) != 0; } // Return true if the instruction is a memory load. - bool IsLoadOperation(const Instruction* instr) const { - return (GetInstructionFlags(instr) & kIsLoadOperation) != 0; + bool IsLoadOperation(int flags) const { + return (flags & kIsLoadOperation) != 0; } bool CanTrap(const Instruction* instr) const { @@ -189,9 +160,9 @@ class InstructionScheduler final : public ZoneObject { // Return true if the instruction cannot be moved before the last deopt or // trap point we encountered. - bool DependsOnDeoptOrTrap(const Instruction* instr) const { + bool DependsOnDeoptOrTrap(const Instruction* instr, int flags) const { return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() || - CanTrap(instr) || HasSideEffect(instr) || IsLoadOperation(instr); + CanTrap(instr) || HasSideEffect(flags) || IsLoadOperation(flags); } // Identify nops used as a definition point for live-in registers at @@ -211,13 +182,11 @@ class InstructionScheduler final : public ZoneObject { Zone* zone() { return zone_; } InstructionSequence* sequence() { return sequence_; } - base::RandomNumberGenerator* random_number_generator() { - return &random_number_generator_.value(); - } Zone* zone_; InstructionSequence* sequence_; ZoneVector graph_; + SchedulingQueue ready_list_; friend class InstructionSchedulerTester; @@ -242,9 +211,7 @@ class InstructionScheduler final : public ZoneObject { // Keep track of definition points for virtual registers. This is used to // record operand dependencies in the scheduling graph. - ZoneMap operands_map_; - - std::optional random_number_generator_; + ZoneUnorderedMap operands_map_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 01700f203e2da4..9172d0c4be1045 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -160,9 +160,9 @@ std::optional InstructionSelector::SelectInstructions() { AddInstruction(instructions_[start]); } UpdateRenames(instructions_[end]); - AddTerminator(instructions_[end]); } - EndBlock(this->rpo_number(block)); + Instruction* terminator = instructions_[end]; + EndBlock(this->rpo_number(block), terminator); } #if DEBUG sequence()->ValidateSSA(); @@ -179,21 +179,12 @@ void InstructionSelector::StartBlock(RpoNumber rpo) { } } -void InstructionSelector::EndBlock(RpoNumber rpo) { +void InstructionSelector::EndBlock(RpoNumber rpo, Instruction* terminator) { if (UseInstructionScheduling()) { DCHECK_NOT_NULL(scheduler_); - scheduler_->EndBlock(rpo); + scheduler_->EndBlock(rpo, terminator); } else { - sequence()->EndBlock(rpo); - } -} - -void InstructionSelector::AddTerminator(Instruction* instr) { - if (UseInstructionScheduling()) { - DCHECK_NOT_NULL(scheduler_); - scheduler_->AddTerminator(instr); - } else { - sequence()->AddInstruction(instr); + sequence()->EndBlock(rpo, terminator); } } @@ -1250,6 +1241,16 @@ void InstructionSelector::InitializeCallBuffer( break; } case CallDescriptor::kCallJSFunction: + // TODO(olivf): Implement the required kArchCallJSFunction with + // immediate argument on all architectures. +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \ + defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_PPC64) || \ + defined(V8_TARGET_ARCH_S390X) + if (this->IsHeapConstant(callee)) { + buffer->instruction_args.push_back(g.UseImmediate(callee)); + break; + } +#endif buffer->instruction_args.push_back( g.UseLocation(callee, buffer->descriptor->GetInputLocation(0))); break; @@ -2162,7 +2163,9 @@ void InstructionSelector::UpdateMaxPushedArgumentCount(size_t count) { *max_pushed_argument_count_ = std::max(count, *max_pushed_argument_count_); } -void InstructionSelector::VisitCall(OpIndex node, Block* handler) { +void InstructionSelector::VisitCall( + OpIndex node, Block* exception_handler, + base::Vector effect_handlers) { OperandGenerator g(this); const CallOp& call_op = Cast(node); const CallDescriptor* call_descriptor = call_op.descriptor->descriptor; @@ -2222,15 +2225,28 @@ void InstructionSelector::VisitCall(OpIndex node, Block* handler) { } // Pass label of exception handler block. - if (handler) { + bool lazy_deopt_on_throw = + call_op.descriptor->lazy_deopt_on_throw == LazyDeoptOnThrow::kYes; + if (exception_handler) { flags |= CallDescriptor::kHasExceptionHandler; - buffer.instruction_args.push_back(g.Label(handler)); - } else { - if (call_op.descriptor->lazy_deopt_on_throw == LazyDeoptOnThrow::kYes) { - flags |= CallDescriptor::kHasExceptionHandler; - buffer.instruction_args.push_back( - g.UseImmediate(kLazyDeoptOnThrowSentinel)); + buffer.instruction_args.push_back(g.Label(exception_handler)); + } else if (lazy_deopt_on_throw) { + flags |= CallDescriptor::kHasExceptionHandler; + buffer.instruction_args.push_back( + g.UseImmediate(kLazyDeoptOnThrowSentinel)); + } + if (!effect_handlers.empty()) { + flags |= CallDescriptor::kHasEffectHandler; + for (auto& handler : effect_handlers) { + buffer.instruction_args.push_back(g.Label(handler.block)); + buffer.instruction_args.push_back(g.UseImmediate(handler.tag_index)); } + buffer.instruction_args.push_back( + g.UseImmediate(static_cast(effect_handlers.size()))); + } else { + // This bit had a different meaning before isel, so ensure that it is + // cleared: + flags &= ~CallDescriptor::kHasEffectHandler; } // Select the appropriate opcode based on the call type. @@ -2692,7 +2708,8 @@ void InstructionSelector::VisitControl(const Block* block) { } case Opcode::kCheckException: { const CheckExceptionOp& check = op.Cast(); - VisitCall(check.throwing_operation(), check.catch_block); + VisitCall(check.throwing_operation(), check.catch_block, + check.effect_handlers); VisitGoto(check.didnt_throw_block); return; } diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index 258e26b8d5f0ad..c8e5f409e9f65b 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -463,9 +463,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final std::optional SelectInstructions(); void StartBlock(RpoNumber rpo); - void EndBlock(RpoNumber rpo); + void EndBlock(RpoNumber rpo, Instruction* terminator); void AddInstruction(Instruction* instr); - void AddTerminator(Instruction* instr); // =========================================================================== // ============= Architecture-independent code emission methods. ============= @@ -1390,7 +1389,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final void VisitPhi(turboshaft::OpIndex node); void VisitProjection(turboshaft::OpIndex node); void VisitConstant(turboshaft::OpIndex node); - void VisitCall(turboshaft::OpIndex call, turboshaft::Block* handler = {}); + void VisitCall( + turboshaft::OpIndex call, turboshaft::Block* exception_handler = {}, + base::Vector wasm_effect_handlers = {}); void VisitDeoptimizeIf(turboshaft::OpIndex node); void VisitTrapIf(turboshaft::OpIndex node); void VisitTailCall(turboshaft::OpIndex call); diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index aa84721f252c29..fc1436ccd272fb 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -1028,6 +1028,11 @@ void InstructionSequence::EndBlock(RpoNumber rpo) { current_block_ = nullptr; } +void InstructionSequence::EndBlock(RpoNumber rpo, Instruction* terminator) { + AddInstruction(terminator); + EndBlock(rpo); +} + int InstructionSequence::AddInstruction(Instruction* instr) { DCHECK_NOT_NULL(current_block_); int index = static_cast(instructions_.size()); diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index bb743f355294c7..d482a9e26f6154 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -1084,10 +1084,9 @@ class V8_EXPORT_PRIVATE Instruction final { // Keep in sync with instruction-selector.cc where the inputs are assembled. switch (arch_opcode()) { case kArchCallWasmFunctionIndirect: - return InputCount() - - (HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler) - ? 2 - : 1); + return InputCount() - 1 - + HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler) - + 2 * HasCallDescriptorFlag(CallDescriptor::kHasEffectHandler); case kArchTailCallWasmIndirect: return InputCount() - 3; default: @@ -1101,10 +1100,9 @@ class V8_EXPORT_PRIVATE Instruction final { // Keep in sync with instruction-selector.cc where the inputs are assembled. switch (arch_opcode()) { case kArchCallCodeObject: - return InputCount() - - (HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler) - ? 2 - : 1); + return InputCount() - 1 - + HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler) - + 2 * HasCallDescriptorFlag(CallDescriptor::kHasEffectHandler); case kArchTailCallCodeObject: return InputCount() - 3; default: @@ -1115,11 +1113,9 @@ class V8_EXPORT_PRIVATE Instruction final { // For JS call instructions, computes the index of the argument count input. size_t JSCallArgumentCountInputIndex() const { // Keep in sync with instruction-selector.cc where the inputs are assembled. - if (HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) { - return InputCount() - 2; - } else { - return InputCount() - 1; - } + return InputCount() - 1 - + HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler) - + 2 * HasCallDescriptorFlag(CallDescriptor::kHasEffectHandler); } enum GapPosition { @@ -1947,6 +1943,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final int AddInstruction(Instruction* instr); void StartBlock(RpoNumber rpo); void EndBlock(RpoNumber rpo); + void EndBlock(RpoNumber rpo, Instruction* terminator); void AddConstant(int virtual_register, Constant constant) { // TODO(titzer): allow RPO numbers as constants? @@ -1973,7 +1970,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final if (constant.type() == Constant::kRpoNumber) { // Ideally we would inline RPO numbers into the operand, however jump- // threading modifies RPO values and so we indirect through a vector - // of rpo_immediates to enable rewriting. We keep this seperate from the + // of rpo_immediates to enable rewriting. We keep this separate from the // immediates vector so that we don't repeatedly push the same rpo // number. RpoNumber rpo_number = constant.ToRpoNumber(); diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc index 2a0090f7a6237a..b5e84f7c727d05 100644 --- a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc +++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc @@ -307,13 +307,13 @@ class OutOfLineVerifySkippedWriteBarrier final : public OutOfLineCode { __ DecompressTagged(value_, value_); } + __ PreCheckSkippedWriteBarrier(object_, value_, scratch_, exit()); + if (must_save_ra_) { // We need to save and restore ra if the frame was elided. __ Push(ra); } - __ PreCheckSkippedWriteBarrier(object_, value_, scratch_, exit()); - SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() ? SaveFPRegsMode::kSave : SaveFPRegsMode::kIgnore; @@ -341,20 +341,30 @@ class OutOfLineVerifySkippedIndirectWriteBarrier final : public OutOfLineCode { : OutOfLineCode(gen), object_(object), value_(value), + must_save_ra_(!gen->frame_access_state()->has_frame()), zone_(gen->zone()) {} void Generate() final { + if (must_save_ra_) { + // We need to save and restore ra if the frame was elided. + __ Push(ra); + } + SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() ? SaveFPRegsMode::kSave : SaveFPRegsMode::kIgnore; __ CallVerifySkippedIndirectWriteBarrierStubSaveRegisters(object_, value_, save_fp_mode); + if (must_save_ra_) { + __ Pop(ra); + } } private: Register const object_; Register const value_; + const bool must_save_ra_; Zone* zone_; }; diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index f92106f5ec6688..d5f961f92ee828 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -720,6 +720,10 @@ void CodeGenerator::AssemblePrepareTailCall() { frame_access_state()->SetFrameAccessToSP(); } +bool HasImmediateInput(Instruction* instr, size_t index) { + return instr->InputAt(index)->IsImmediate(); +} + namespace { void FlushPendingPushRegisters(MacroAssembler* masm, @@ -937,17 +941,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallJSFunction: { v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( masm()); - Register func = i.InputRegister(0); - if (v8_flags.debug_code) { - // Check the function's context matches the context argument. - __ LoadTaggedField( - kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0); - __ CmpS64(cp, kScratchReg); - __ Assert(eq, AbortReason::kWrongFunctionContext); - } uint32_t num_arguments = i.InputUint32(instr->JSCallArgumentCountInputIndex()); - __ CallJSFunction(func, num_arguments); + if (HasImmediateInput(instr, 0)) { + Handle constant = + i.ToConstant(instr->InputAt(0)).ToHeapObject(); + __ Move(kJavaScriptCallTargetRegister, constant); + if (Handle function; TryCast(constant, &function)) { + if (function->shared()->HasBuiltinId()) { + Builtin builtin = function->shared()->builtin_id(); + size_t expected = Builtins::GetFormalParameterCount(builtin); + if (num_arguments == expected) { + __ CallBuiltin(builtin); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } else { + JSDispatchHandle dispatch_handle = function->dispatch_handle(); + size_t expected = + IsolateGroup::current()->js_dispatch_table()->GetParameterCount( + dispatch_handle); + if (num_arguments >= expected) { + __ CallJSDispatchEntry(dispatch_handle, expected); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } + } else { + __ CallJSFunction(kJavaScriptCallTargetRegister, num_arguments); + } + } else { + Register func = i.InputRegister(0); + if (v8_flags.debug_code) { + // Check the function's context matches the context argument. + __ LoadTaggedField(kScratchReg, + FieldMemOperand(func, JSFunction::kContextOffset), + r0); + __ CmpS64(cp, kScratchReg); + __ Assert(eq, AbortReason::kWrongFunctionContext); + } + __ CallJSFunction(func, num_arguments); + } RecordCallPosition(instr); DCHECK_EQ(LeaveRC, i.OutputRCBit()); frame_access_state()->ClearSPDelta(); @@ -3222,7 +3256,7 @@ void CodeGenerator::AssembleConstructFrame() { CommonFrameConstants::kFixedFrameSizeAboveFp)); __ Call(static_cast
(Builtin::kWasmHandleStackOverflow), RelocInfo::WASM_STUB_CALL); - // If the call succesfully grew the stack, we don't expect it to have + // If the call successfully grew the stack, we don't expect it to have // allocated any heap objects or otherwise triggered any GC. // If it was not able to grow the stack, it may have triggered a GC when // allocating the stack overflow exception object, but the call did not diff --git a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc index 6179a0e96e3de7..82cf08801d2928 100644 --- a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc +++ b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc @@ -2872,6 +2872,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ VU.set(FPURoundingMode::RTZ); __ vfncvt_x_f_w(i.OutputSimd128Register(), kSimd128ScratchReg, MaskType::Mask); + __ VU.set(FPURoundingMode::RNE); break; } case kRiscvI32x4TruncSatF64x2UZero: { @@ -2883,6 +2884,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ VU.set(FPURoundingMode::RTZ); __ vfncvt_xu_f_w(i.OutputSimd128Register(), kSimd128ScratchReg, MaskType::Mask); + __ VU.set(FPURoundingMode::RNE); break; } case kRiscvI32x4ShrU: { @@ -3473,12 +3475,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ VU.SetSimd128(E32); __ VU.set(FPURoundingMode::RTZ); __ vfcvt_f_xu_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ VU.set(FPURoundingMode::RNE); break; } case kRiscvF32x4SConvertI32x4: { __ VU.SetSimd128(E32); __ VU.set(FPURoundingMode::RTZ); __ vfcvt_f_x_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ VU.set(FPURoundingMode::RNE); break; } case kRiscvF32x4ReplaceLane: { @@ -3570,14 +3574,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // the 128-bit range. __ vslideup_vi(src0, src1, 4); __ VU.SetSimd128(E16); - __ VU.set(FPURoundingMode::RNE); __ vnclip_vi(dst, src0, 0); } else { CheckRegisterConstraints( opcode, i, RiscvRegisterConstraint::kRegisterGroupNoOverlap); __ VU.SetSimd128(E16); - __ VU.set(FPURoundingMode::RNE); // Implicitly uses src1, which is part of the register group. __ vnclip_vi(dst, src0, 0); } @@ -3599,21 +3601,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // Clip negative values to zero. __ vmax_vx(kSimd128ScratchReg, src0, zero_reg); __ VU.SetSimd128(E16); - __ VU.set(FPURoundingMode::RNE); __ vnclipu_vi(dst, kSimd128ScratchReg, 0); } else { CheckRegisterConstraints( opcode, i, RiscvRegisterConstraint::kRegisterGroupNoOverlap); // Clip negative values to zero. __ VU.SetSimd128x2(E32); - __ li(kScratchReg, 0); // Implicitly uses kSimd128ScratchReg2 and src1, which are part of the // register groups. DCHECK(kSimd128ScratchReg.code() + 1 == kSimd128ScratchReg2.code()); - __ vmax_vx(kSimd128ScratchReg, i.InputSimd128Register(0), kScratchReg); + __ vmax_vx(kSimd128ScratchReg, i.InputSimd128Register(0), zero_reg); // Convert the clipped values to 16-bit positive integers. __ VU.SetSimd128(E16); - __ VU.set(FPURoundingMode::RNE); // Implicitly uses kSimd128ScratchReg2, which is part of the register // group. __ vnclipu_vi(dst, kSimd128ScratchReg, 0); @@ -3633,7 +3632,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ li(kScratchReg, 2); __ vdivu_vx(temp3, temp2, kScratchReg); __ VU.SetSimd128(E8); - __ VU.set(FPURoundingMode::RNE); __ vnclipu_vi(i.OutputSimd128Register(), temp3, 0); break; } @@ -3651,13 +3649,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // the 128-bit range. __ vslideup_vi(src0, src1, 8); __ VU.SetSimd128(E8); - __ VU.set(FPURoundingMode::RNE); __ vnclip_vi(dst, src0, 0); } else { CheckRegisterConstraints( opcode, i, RiscvRegisterConstraint::kRegisterGroupNoOverlap); __ VU.SetSimd128(E8); - __ VU.set(FPURoundingMode::RNE); // If the vector size is only 128 bits, implicitly uses src1, which is // part of the register group. __ vnclip_vi(dst, src0, 0); @@ -3680,21 +3676,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // Clip negative values to zero. __ vmax_vx(kSimd128ScratchReg, src0, zero_reg); __ VU.SetSimd128(E8); - __ VU.set(FPURoundingMode::RNE); __ vnclipu_vi(dst, kSimd128ScratchReg, 0); } else { CheckRegisterConstraints( opcode, i, RiscvRegisterConstraint::kRegisterGroupNoOverlap); // Clip negative values to zero. __ VU.SetSimd128x2(E16); - __ li(kScratchReg, 0); // Implicitly uses kSimd128ScratchReg2 and src1, which are part of the // register groups. DCHECK(kSimd128ScratchReg.code() + 1 == kSimd128ScratchReg2.code()); - __ vmax_vx(kSimd128ScratchReg, src0, kScratchReg); + __ vmax_vx(kSimd128ScratchReg, src0, zero_reg); // Convert the clipped values. __ VU.SetSimd128(E8); - __ VU.set(FPURoundingMode::RNE); // Implicitly uses kSimd128ScratchReg2, which is part of the register // group. __ vnclipu_vi(dst, kSimd128ScratchReg, 0); @@ -3716,7 +3709,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ li(kScratchReg, 2); __ vdivu_vx(temp, temp, kScratchReg); __ VU.SetSimd128(E16); - __ VU.set(FPURoundingMode::RNE); // Reduces the register group down to a single register. __ vnclipu_vi(i.OutputSimd128Register(), temp, 0); break; @@ -3901,6 +3893,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vfcvt_x_f_v(kSimd128ScratchReg, i.InputSimd128Register(0), Mask); __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); } + __ VU.set(FPURoundingMode::RNE); break; } case kRiscvI32x4UConvertF32x4: { @@ -3916,6 +3909,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vfcvt_xu_f_v(kSimd128ScratchReg, i.InputSimd128Register(0), Mask); __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); } + __ VU.set(FPURoundingMode::RNE); break; } case kRiscvI32x4SConvertI16x8High: { @@ -5146,7 +5140,7 @@ void CodeGenerator::AssembleConstructFrame() { CommonFrameConstants::kFixedFrameSizeAboveFp)); __ Call(static_cast
(Builtin::kWasmHandleStackOverflow), RelocInfo::WASM_STUB_CALL); - // If the call succesfully grew the stack, we don't expect it to have + // If the call successfully grew the stack, we don't expect it to have // allocated any heap objects or otherwise triggered any GC. // If it was not able to grow the stack, it may have triggered a GC when // allocating the stack overflow exception object, but the call did not @@ -5328,7 +5322,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ Ret(); } -void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); } +void CodeGenerator::FinishCode() { __ FinishCode(); } void CodeGenerator::PrepareForDeoptimizationExits( ZoneDeque* exits) { @@ -5336,7 +5330,6 @@ void CodeGenerator::PrepareForDeoptimizationExits( // of the deoptimization exits, because it destroys our ability to compute // the deoptimization index based on the 'pc' and the offset of the start // of the exits section. - __ ForceConstantPoolEmissionWithoutJump(); int total_size = 0; for (DeoptimizationExit* exit : deoptimization_exits_) { if (exit->emitted()) continue; // May have been emitted inline. @@ -5344,7 +5337,25 @@ void CodeGenerator::PrepareForDeoptimizationExits( ? Deoptimizer::kLazyDeoptExitSize : Deoptimizer::kEagerDeoptExitSize; } - __ CheckTrampolinePoolQuick(total_size); + __ StartBlockPools(ConstantPoolEmission::kCheck, total_size); + + // Check which deopt kinds exist in this InstructionStream object, to avoid + // emitting jumps to unused entries. + bool saw_deopt_kind[kDeoptimizeKindCount] = {false}; + for (auto exit : *exits) { + saw_deopt_kind[static_cast(exit->kind())] = true; + } + // Emit the jumps to deoptimization entries. + static_assert(static_cast(kFirstDeoptimizeKind) == 0); + for (int i = 0; i < kDeoptimizeKindCount; i++) { + if (!saw_deopt_kind[i]) continue; + DeoptimizeKind kind = static_cast(i); + UseScratchRegisterScope temps(masm()); + Register scratch = temps.Acquire(); + __ bind(&jump_deoptimization_entry_labels_[i]); + __ LoadEntryFromBuiltin(Deoptimizer::GetDeoptimizationEntry(kind), scratch); + __ Jump(scratch); + } } void CodeGenerator::MoveToTempLocation(InstructionOperand* source, diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index c87dac52f5d9c9..53157f28bdf656 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -1284,17 +1284,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchCallJSFunction: { - Register func = i.InputRegister(0); - if (v8_flags.debug_code) { - // Check the function's context matches the context argument. - __ LoadTaggedField(kScratchReg, - FieldMemOperand(func, JSFunction::kContextOffset)); - __ CmpS64(cp, kScratchReg); - __ Assert(eq, AbortReason::kWrongFunctionContext); - } uint32_t num_arguments = i.InputUint32(instr->JSCallArgumentCountInputIndex()); - __ CallJSFunction(func, num_arguments); + if (HasImmediateInput(instr, 0)) { + Handle constant = + i.ToConstant(instr->InputAt(0)).ToHeapObject(); + __ Move(kJavaScriptCallTargetRegister, constant); + if (Handle function; TryCast(constant, &function)) { + if (function->shared()->HasBuiltinId()) { + Builtin builtin = function->shared()->builtin_id(); + size_t expected = Builtins::GetFormalParameterCount(builtin); + if (num_arguments == expected) { + __ CallBuiltin(builtin); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } else { + JSDispatchHandle dispatch_handle = function->dispatch_handle(); + size_t expected = + IsolateGroup::current()->js_dispatch_table()->GetParameterCount( + dispatch_handle); + if (num_arguments >= expected) { + __ CallJSDispatchEntry(dispatch_handle, expected); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } + } else { + __ CallJSFunction(kJavaScriptCallTargetRegister, num_arguments); + } + } else { + Register func = i.InputRegister(0); + if (v8_flags.debug_code) { + // Check the function's context matches the context argument. + __ LoadTaggedField(kScratchReg, + FieldMemOperand(func, JSFunction::kContextOffset)); + __ CmpS64(cp, kScratchReg); + __ Assert(eq, AbortReason::kWrongFunctionContext); + } + __ CallJSFunction(func, num_arguments); + } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -3661,7 +3690,7 @@ void CodeGenerator::AssembleConstructFrame() { CommonFrameConstants::kFixedFrameSizeAboveFp)); __ Call(static_cast
(Builtin::kWasmHandleStackOverflow), RelocInfo::WASM_STUB_CALL); - // If the call succesfully grew the stack, we don't expect it to have + // If the call successfully grew the stack, we don't expect it to have // allocated any heap objects or otherwise triggered any GC. // If it was not able to grow the stack, it may have triggered a GC when // allocating the stack overflow exception object, but the call did not diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index 81c9fcfc0e980d..164e2d76ab1b4d 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -1675,16 +1675,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchCallJSFunction: { - Register func = i.InputRegister(0); - if (v8_flags.debug_code) { - // Check the function's context matches the context argument. - __ cmp_tagged(rsi, FieldOperand(func, JSFunction::kContextOffset)); - __ Assert(equal, AbortReason::kWrongFunctionContext); - } static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); uint32_t num_arguments = i.InputUint32(instr->JSCallArgumentCountInputIndex()); - __ CallJSFunction(func, num_arguments); + if (HasImmediateInput(instr, 0)) { + Handle constant = + i.ToConstant(instr->InputAt(0)).ToHeapObject(); + __ Move(kJavaScriptCallTargetRegister, constant); + if (Handle function; TryCast(constant, &function)) { + if (function->shared()->HasBuiltinId()) { + Builtin builtin = function->shared()->builtin_id(); + size_t expected = Builtins::GetFormalParameterCount(builtin); + if (num_arguments == expected) { + __ CallBuiltin(builtin); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } else { + JSDispatchHandle dispatch_handle = function->dispatch_handle(); + size_t expected = + IsolateGroup::current()->js_dispatch_table()->GetParameterCount( + dispatch_handle); + if (num_arguments >= expected) { + __ CallJSDispatchEntry(dispatch_handle, expected); + } else { + __ AssertUnreachable(AbortReason::kJSSignatureMismatch); + } + } + } else { + __ CallJSFunction(kJavaScriptCallTargetRegister, num_arguments); + } + } else { + Register func = i.InputRegister(0); + if (v8_flags.debug_code) { + // Check the function's context matches the context argument. + __ cmp_tagged(rsi, FieldOperand(func, JSFunction::kContextOffset)); + __ Assert(equal, AbortReason::kWrongFunctionContext); + } + __ CallJSFunction(func, num_arguments); + } frame_access_state()->ClearSPDelta(); RecordCallPosition(instr); AssemblePlaceHolderForLazyDeopt(instr); @@ -1931,7 +1960,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( MachineRepresentation::kTagged, instr); } if (mode > RecordWriteMode::kValueIsPointer) { - __ MaybeJumpIfReadOnlyOrSmallSmi(value, ool->exit()); __ JumpIfSmi(value, ool->exit()); } #if V8_ENABLE_STICKY_MARK_BITS_BOOL @@ -7939,7 +7967,7 @@ void CodeGenerator::AssembleConstructFrame() { CommonFrameConstants::kFixedFrameSizeAboveFp))); __ near_call(static_cast
(Builtin::kWasmHandleStackOverflow), RelocInfo::WASM_STUB_CALL); - // If the call succesfully grew the stack, we don't expect it to have + // If the call successfully grew the stack, we don't expect it to have // allocated any heap objects or otherwise triggered any GC. // If it was not able to grow the stack, it may have triggered a GC when // allocating the stack overflow exception object, but the call did not diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc index d7632150350456..27d926fc35f966 100644 --- a/deps/v8/src/compiler/common-operator.cc +++ b/deps/v8/src/compiler/common-operator.cc @@ -626,6 +626,7 @@ const ExitMachineGraphParameters& ExitMachineGraphParametersOf( V(TrapRemByZero) \ V(TrapFloatUnrepresentable) \ V(TrapTableOutOfBounds) \ + V(TrapNullFunc) \ V(TrapFuncSigMismatch) #define CACHED_PARAMETER_LIST(V) \ diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc index b2d58c037e29c6..81ef4b12b5b9ae 100644 --- a/deps/v8/src/compiler/compilation-dependencies.cc +++ b/deps/v8/src/compiler/compilation-dependencies.cc @@ -125,7 +125,7 @@ class PendingDependencies final { explicit PendingDependencies(Zone* zone) : deps_(8, {}, ZoneAllocationPolicy(zone)) {} - void Register(Handle object, + void Register(Handle object, DependentCode::DependencyGroup group) { // InstructionStream, which are per-local Isolate, cannot depend on objects // in the shared or RO heaps. Shared and RO heap dependencies are designed @@ -133,8 +133,9 @@ class PendingDependencies final { // have transitions or change the shape of their fields. See // DependentCode::DeoptimizeDependencyGroups for corresponding DCHECK. if (HeapLayout::InWritableSharedSpace(*object) || - HeapLayout::InReadOnlySpace(*object)) + HeapLayout::InReadOnlySpace(*object)) { return; + } deps_.LookupOrInsert(object, HandleValueHash(object))->value |= group; } @@ -180,19 +181,20 @@ class PendingDependencies final { } private: - uint32_t HandleValueHash(DirectHandle handle) { + uint32_t HandleValueHash(DirectHandle handle) { return static_cast(base::hash_value(handle->ptr())); } struct HandleValueEqual { bool operator()(uint32_t hash1, uint32_t hash2, - DirectHandle lhs, - Handle rhs) const { + DirectHandle lhs, + Handle rhs) const { return hash1 == hash2 && lhs.is_identical_to(rhs); } }; - base::TemplateHashMapImpl, DependentCode::DependencyGroups, - HandleValueEqual, ZoneAllocationPolicy> + base::TemplateHashMapImpl, + DependentCode::DependencyGroups, HandleValueEqual, + ZoneAllocationPolicy> deps_; }; @@ -402,6 +404,8 @@ class ConstantInDictionaryPrototypeChainDependency final : ValidationResult::kFoundIncorrect; }; + // TODO(jkummerow): Consider supporting Wasm structs in this loop (by + // skipping over them) if that becomes a relevant use case. while (IsJSObject(prototype)) { // We only care about JSObjects because that's the only type of holder // (and types of prototypes on the chain to the holder) that diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc index 1388d51be6e14f..614d662ec4ef10 100644 --- a/deps/v8/src/compiler/heap-refs.cc +++ b/deps/v8/src/compiler/heap-refs.cc @@ -864,7 +864,7 @@ bool IsReadOnlyLengthDescriptor(Isolate* isolate, DirectHandle jsarray_map) { DCHECK(!jsarray_map->is_dictionary_map()); Tagged descriptors = - jsarray_map->instance_descriptors(isolate, kRelaxedLoad); + jsarray_map->instance_descriptors(isolate, kAcquireLoad); static_assert( JSArray::kLengthOffset == JSObject::kHeaderSize, "The length should be the first property on the descriptor array"); @@ -1615,7 +1615,7 @@ ObjectRef AllocationSiteRef::nested_site(JSHeapBroker* broker) const { return MakeRefAssumeMemoryFence(broker, object()->nested_site()); } -HEAP_ACCESSOR_C(AllocationSite, bool, CanInlineCall) +HEAP_ACCESSOR_C(AllocationSite, bool, IsSpeculationDisabled) HEAP_ACCESSOR_C(AllocationSite, bool, PointsToLiteral) HEAP_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind) HEAP_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType) diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h index c16b6bde8343d0..2db9395d46cee1 100644 --- a/deps/v8/src/compiler/heap-refs.h +++ b/deps/v8/src/compiler/heap-refs.h @@ -900,7 +900,7 @@ class AllocationSiteRef : public HeapObjectRef { OptionalJSObjectRef boilerplate(JSHeapBroker* broker) const; ElementsKind GetElementsKind() const; - bool CanInlineCall() const; + bool IsSpeculationDisabled() const; }; class BigIntRef : public HeapObjectRef { diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index 1a4227fbd64b19..e8e3c586cd3d0e 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -3976,7 +3976,7 @@ Reduction JSCallReducer::ReduceCallWasmFunction(Node* node, Tagged instance_data = function_data->instance_data(); - const wasm::CanonicalSig* wasm_signature = function_data->sig(); + const wasm::CanonicalSig* wasm_signature = function_data->internal()->sig(); if (!CanInlineJSToWasmCall(wasm_signature)) { return NoChange(); } @@ -5611,7 +5611,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { node->RemoveInput(n.FeedbackVectorIndex()); NodeProperties::ChangeOp( node, - javascript()->CreateArray(arity, std::nullopt, FeedbackSource())); + javascript()->CreateArray(arity, std::nullopt, p.feedback())); return Changed(node); } case Builtin::kObjectConstructor: { diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc index e22b54b4c83540..b32551947d5001 100644 --- a/deps/v8/src/compiler/js-create-lowering.cc +++ b/deps/v8/src/compiler/js-create-lowering.cc @@ -654,21 +654,22 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) { dependencies()->DependOnInitialMapInstanceSizePrediction( original_constructor); - // Tells whether we are protected by either the {site} or a - // protector cell to do certain speculative optimizations. - bool can_inline_call = false; + // Tells whether we are protected by either the {site} or a protector cell to + // do certain speculative optimizations. This mechanism protects against + // deopt loops. + bool can_speculate_call = false; // Check if we have a feedback {site} on the {node}. ElementsKind elements_kind = initial_map->elements_kind(); if (site_ref) { elements_kind = site_ref->GetElementsKind(); - can_inline_call = site_ref->CanInlineCall(); + can_speculate_call = !site_ref->IsSpeculationDisabled(); allocation = dependencies()->DependOnPretenureMode(*site_ref); dependencies()->DependOnElementsKind(*site_ref); } else { // If there is no allocation site, only inline the constructor when there is // overall speculation feedback that can be disabled on a deopt. - can_inline_call = p.call_feedback().IsValid(); + can_speculate_call = p.call_feedback().IsValid(); } if (arity == 0) { @@ -701,7 +702,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) { allocation, slack_tracking_prediction, p.call_feedback()); } - if (length_type.Maybe(Type::UnsignedSmall()) && can_inline_call) { + if (length_type.Maybe(Type::UnsignedSmall()) && can_speculate_call) { return ReduceNewArray(node, length, *initial_map, elements_kind, allocation, slack_tracking_prediction, p.call_feedback()); @@ -739,7 +740,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) { elements_kind = GetMoreGeneralElementsKind( elements_kind, IsHoleyElementsKind(elements_kind) ? HOLEY_ELEMENTS : PACKED_ELEMENTS); - } else if (!can_inline_call) { + } else if (!can_speculate_call) { // We have some crazy combination of types for the {values} where // there's no clear decision on the elements kind statically. And // we don't have a protection against deoptimization loops for the diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc index a07ea0e5cd0a98..a5ff77c8cc43ec 100644 --- a/deps/v8/src/compiler/js-generic-lowering.cc +++ b/deps/v8/src/compiler/js-generic-lowering.cc @@ -1125,7 +1125,7 @@ void JSGenericLowering::LowerJSForInNext(Node* node) { } void JSGenericLowering::LowerJSForOfNext(Node* node) { - ReplaceWithBuiltinCall(node, Builtin::kForOfNextBaseline); + ReplaceWithBuiltinCall(node, Builtin::kForOfNext); } void JSGenericLowering::LowerJSLoadMessage(Node* node) { diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc index cfc9f6e1879e4d..62f08d209183ed 100644 --- a/deps/v8/src/compiler/js-inlining.cc +++ b/deps/v8/src/compiler/js-inlining.cc @@ -689,6 +689,18 @@ Reduction JSInliner::ReduceJSCall(Node* node) { #endif // V8_ENABLE_WEBASSEMBLY JSCallAccessor call(node); + constexpr int kMaxArgumentsSafetyBuffer = 10; + if (node->InputCount() > Code::kMaxArguments - kMaxArgumentsSafetyBuffer) { + // We don't attempt to inline calls with too many inputs (note that we + // subtract this {kMaxArgumentsSafetyBuffer} so that we still have some + // place left to add FrameState, receiver and whatever other input is + // necessary), since a lot of things can go wrong when doing this, including + // CreateArtificialFrameState having too many inputs, Turboshaft needing to + // bail out because the max input count for a node in Turboshaft is lower as + // in Turbofan, and other assumptions about max input count being broken. + return NoChange(); + } + // Determine the call target. OptionalSharedFunctionInfoRef shared_info(DetermineCallTarget(node)); if (!shared_info.has_value()) return NoChange(); diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index 60b4df5fbb6b76..faeda3e2623afb 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -3274,12 +3274,16 @@ JSNativeContextSpecialization::BuildPropertyStore( } effect = graph()->NewNode( common()->BeginRegion(RegionObservability::kObservable), effect); + effect = graph()->NewNode(simplified()->StoreField(field_access), storage, + value, effect, control); + // We store the map only at the end of the transition to avoid a potential + // race with background threads: a background thread could otherwise read + // a map, then try to read the new field based on this map, but this field + // hasn't been written yet. effect = graph()->NewNode( simplified()->StoreField(AccessBuilder::ForMap()), receiver, jsgraph()->ConstantNoHole(transition_map_ref, broker()), effect, control); - effect = graph()->NewNode(simplified()->StoreField(field_access), storage, - value, effect, control); effect = graph()->NewNode(common()->FinishRegion(), jsgraph()->UndefinedConstant(), effect); } else { diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h index f8224c3db6356d..cee3ed8f60bcd4 100644 --- a/deps/v8/src/compiler/linkage.h +++ b/deps/v8/src/compiler/linkage.h @@ -71,6 +71,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final kInitializeRootRegister = 1u << 3, // Does not ever try to allocate space on our heap. kNoAllocate = 1u << 4, + // Repurpose this bit during instruction selection. Signal the presence of + // an effect handler to code generation. + kHasEffectHandler = kNoAllocate, // Use the kJavaScriptCallCodeStartRegister (fixed) register for the // indirect target address when calling. kFixedTargetRegister = 1u << 5, diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc index 23bdbc29e21f24..388df00b4e72ee 100644 --- a/deps/v8/src/compiler/node-properties.cc +++ b/deps/v8/src/compiler/node-properties.cc @@ -409,11 +409,6 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( HeapObjectMatcher m(receiver); if (m.HasResolvedValue()) { HeapObjectRef ref = m.Ref(broker); - if (ref.IsTheHole()) { - // Holes should never make it past hole checks, so they will never - // provide helpful map inference. - return kNoMaps; - } // We don't use ICs for the Array.prototype and the Object.prototype // because the runtime has to be able to intercept them properly, so // we better make sure that TurboFan doesn't outsmart the system here diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index 4070e7d3ce856f..71d25459e28f66 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -2787,6 +2787,36 @@ MaybeHandle Pipeline::GenerateCodeForTurboshaftBuiltin( return turboshaft_pipeline.FinalizeCode(); } +MaybeHandle Pipeline::GenerateCodeForTesting( + turboshaft::PipelineData* turboshaft_data, CallDescriptor* call_descriptor, + const char* debug_name) { + Isolate* isolate = turboshaft_data->isolate(); + + PipelineJobScope scope(turboshaft_data, + isolate->counters()->runtime_call_stats()); + RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode); + + std::unique_ptr pipeline_statistics( + CreatePipelineStatistics(Handle