diff --git a/common.gypi b/common.gypi index dbb8c0ff6085e6..e2b8476f3326cd 100644 --- a/common.gypi +++ b/common.gypi @@ -286,7 +286,10 @@ 'VCCLCompilerTool': { 'AdditionalOptions': [ '/Zc:__cplusplus', - '-std:c++17' + # The following option enables c++20 on Windows. This is needed for V8 v12.4+ + '-std:c++20', + # The following option reduces the "error C1060: compiler is out of heap space" + '/Zm2000', ], 'BufferSecurityCheck': 'true', 'DebugInformationFormat': 1, # /Z7 embed info in .obj files diff --git a/deps/v8/.clang-format b/deps/v8/.clang-format index 96a5eb602702d2..066c8d27c657cf 100644 --- a/deps/v8/.clang-format +++ b/deps/v8/.clang-format @@ -3,7 +3,13 @@ BasedOnStyle: Google DerivePointerAlignment: false MaxEmptyLinesToKeep: 1 -IfMacros: ['IF', 'IF_NOT', 'ELSE', 'ELSE_IF'] +IfMacros: + - IF + - IF_NOT +Macros: + # Make clang-format think TurboShaft `ELSE` expands to just `else`, so that + # it formats well alongside `if` + - ELSE=else StatementMacros: - DECL_CAST - DECL_VERIFIER diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 31d395a0fef5a4..d5c64a8e83c0e6 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -134,3 +134,6 @@ bazel-v8 !/third_party/abseil-cpp /third_party/abseil-cpp/.github /third_party/abseil-cpp/ci +!/third_party/fp16 +/third_party/fp16/src/* +!/third_party/fp16/src/include diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 9bd9ff447e5d03..6de30c78a85421 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -81,6 +81,7 @@ Benjamin Tan Bert Belder Brendon Tiszka Brice Dobry +Bruno Pitrus Burcu Dogan Caitlin Potter Chao Wang @@ -109,6 +110,7 @@ Deepak Mohan Deon Dior Derek Tu Divy Srivastava +Dmitry Bezhetskov Dominic Chen Dominic Farolini Douglas Crosher @@ -232,6 +234,7 @@ Peter Varga Peter Wong PhistucK Pierrick Bouvier +Punith B Nayak Rafal Krypa Raul Tambre Ray Glover diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index 2351c1e3300e0b..05b7472165ae85 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -674,6 +674,7 @@ filegroup( name = "v8_flags", srcs = [ "src/flags/flag-definitions.h", + "src/flags/flags-impl.h", "src/flags/flags.h", ] + select({ "is_v8_enable_webassembly": ["src/wasm/wasm-feature-flags.h"], @@ -802,7 +803,6 @@ filegroup( "src/base/timezone-cache.h", "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", - "src/base/v8-fallthrough.h", "src/base/vector.h", "src/base/virtual-address-space.cc", "src/base/virtual-address-space.h", @@ -1540,6 +1540,7 @@ filegroup( "src/extensions/trigger-failure-extension.cc", "src/extensions/trigger-failure-extension.h", "src/flags/flag-definitions.h", + "src/flags/flags-impl.h", "src/flags/flags.cc", "src/flags/flags.h", "src/handles/global-handles.cc", @@ -1569,8 +1570,8 @@ filegroup( "src/heap/base-space.h", "src/heap/base/active-system-pages.cc", "src/heap/base/active-system-pages.h", - "src/heap/basic-memory-chunk.cc", - "src/heap/basic-memory-chunk.h", + "src/heap/memory-chunk-metadata.cc", + "src/heap/memory-chunk-metadata.h", "src/heap/code-range.cc", "src/heap/code-range.h", "src/heap/trusted-range.cc", @@ -1687,11 +1688,11 @@ filegroup( "src/heap/memory-allocator.h", "src/heap/memory-balancer.cc", "src/heap/memory-balancer.h", + "src/heap/mutable-page.cc", + "src/heap/mutable-page.h", "src/heap/memory-chunk.cc", "src/heap/memory-chunk.h", - "src/heap/memory-chunk-header.cc", - "src/heap/memory-chunk-header.h", - "src/heap/memory-chunk-inl.h", + "src/heap/mutable-page-inl.h", "src/heap/memory-chunk-layout.cc", "src/heap/memory-chunk-layout.h", "src/heap/memory-measurement.cc", @@ -2805,6 +2806,8 @@ filegroup( "src/wasm/function-body-decoder-impl.h", "src/wasm/function-compiler.cc", "src/wasm/function-compiler.h", + "src/wasm/fuzzing/random-module-generation.cc", + "src/wasm/fuzzing/random-module-generation.h", "src/wasm/graph-builder-interface.cc", "src/wasm/graph-builder-interface.h", "src/wasm/inlining-tree.h", @@ -3144,6 +3147,7 @@ filegroup( "src/compiler/phase.h", "src/compiler/pipeline.cc", "src/compiler/pipeline.h", + "src/compiler/pipeline-data-inl.h", "src/compiler/pipeline-statistics.cc", "src/compiler/pipeline-statistics.h", "src/compiler/processed-feedback.h", @@ -3213,6 +3217,7 @@ filegroup( "src/compiler/turboshaft/graph-builder.h", "src/compiler/turboshaft/graph-visualizer.cc", "src/compiler/turboshaft/graph-visualizer.h", + "src/compiler/turboshaft/js-generic-lowering-reducer.h", "src/compiler/turboshaft/index.h", "src/compiler/turboshaft/instruction-selection-phase.cc", "src/compiler/turboshaft/instruction-selection-phase.h", @@ -3234,6 +3239,7 @@ filegroup( "src/compiler/turboshaft/machine-lowering-phase.cc", "src/compiler/turboshaft/machine-lowering-phase.h", "src/compiler/turboshaft/machine-lowering-reducer-inl.h", + "src/compiler/turboshaft/maglev-early-lowering-reducer-inl.h", "src/compiler/turboshaft/maglev-graph-building-phase.cc", "src/compiler/turboshaft/maglev-graph-building-phase.h", "src/compiler/turboshaft/machine-optimization-reducer.h", @@ -3479,6 +3485,7 @@ filegroup( "src/builtins/setup-builtins-internal.cc", "src/builtins/torque-csa-header-includes.h", "src/codegen/code-stub-assembler.cc", + "third_party/v8/codegen/fp16-inl.h", "src/codegen/code-stub-assembler-inl.h", "src/codegen/code-stub-assembler.h", "src/heap/setup-heap-internal.cc", @@ -3678,6 +3685,18 @@ filegroup( }), ) +v8_library( + name = "lib_fp16", + srcs = ["third_party/fp16/src/include/fp16.h"], + hdrs = [ + "third_party/fp16/src/include/fp16/fp16.h", + "third_party/fp16/src/include/fp16/bitcasts.h", + ], + includes = [ + "third_party/fp16/src/include", + ], +) + filegroup( name = "v8_bigint", srcs = [ @@ -4167,6 +4186,7 @@ v8_library( ":noicu/generated_torque_definitions", ], deps = [ + ":lib_fp16", ":v8_libbase", "//external:base_trace_event_common", "//external:absl_btree", @@ -4223,11 +4243,11 @@ alias( v8_library( name = "v8_vtune", - srcs = glob([ + srcs = [ "src/third_party/vtune/v8-vtune.h", "src/third_party/vtune/vtune-jit.cc", "src/third_party/vtune/vtune-jit.h", - ]), + ], copts = ["-I"], deps = [ ":core_lib_noicu", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 0e10f0e71b5142..a25b6f2ac986f3 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -337,10 +337,12 @@ declare_args() { # Sets -DV8_ENABLE_SANDBOX. v8_enable_sandbox = "" - # Expose the memory corruption API to JavaScript. Useful for testing the sandbox. - # WARNING This will expose builtins that (by design) cause memory corruption. - # Sets -DV8_EXPOSE_MEMORY_CORRUPTION_API - v8_expose_memory_corruption_api = false + # Enable the memory corruption API. Useful for testing the sandbox. + # The memory corruption API is only exposed to JavaScript if sandbox testing + # mode is enabled at runtime, for example via --sandbox-fuzzing. + # WARNING This will enable builtins that (by design) cause memory corruption. + # Sets -DV8_ENABLE_MEMORY_CORRUPTION_API + v8_enable_memory_corruption_api = false # Experimental feature for collecting per-class zone memory stats. # Requires use_rtti = true @@ -605,24 +607,26 @@ assert( if (v8_builtins_profiling_log_file == "default") { v8_builtins_profiling_log_file = "" - # Don't use existing profile when - # * v8_enable_builtins_optimization is disabled, - # * generating a new one (i.e. v8_enable_builtins_profiling), - # * is_debug or dcheck_always_on because they add more checks to the - # builtins control flow which we don't want to generate, - # * !v8_enable_sandbox because it affects the way how external pointer values - # are accessed, - # * v8_enable_webassembly because it changes the set of opcodes which affects - # graphs hashes, + # The existing profile can be used only when + # * `v8_enable_builtins_optimization` - this switch enables builtins PGO, + # * `!v8_enable_builtins_profiling` - don't use the profiles when generating + # a new one, + # * `!is_debug && !dcheck_always_on` - these modes add more checks to + # the builtins control flow which makes the builtins code different, + # * `v8_enable_pointer_compression` - it changes the objects layouts, + # * `v8_enable_sandbox && v8_enable_external_code_space` because they affect + # the way how external pointer values are accessed, + # * `v8_enable_webassembly` because it changes the set of opcodes which + # affects graphs hashes. if (v8_enable_builtins_optimization && !v8_enable_builtins_profiling && !is_debug && !dcheck_always_on && v8_enable_webassembly) { - # This is about function arguments evaluation order, which makes node IDs - # not predictable for subgraphs like Op1(Op2(), Op3()) and as a result - # different graph hashes. + # This is about function arguments evaluation order on the machine building + # mksnapshot, which makes node IDs not predictable for subgraphs like + # Op1(Op2(), Op3()) and as a result different graph hashes. # Clang uses left-to-right order everywhere except Windows, otherwise the # order is right-to-left. # TODO(crbug.com/v8/13647): Remove once this issue is fixed in CSA. - if (!is_clang || is_win) { + if (!is_clang || host_os == "win") { pgo_profile_suffix = "-rl" } else { pgo_profile_suffix = "" @@ -680,7 +684,7 @@ assert(!v8_enable_sandbox || v8_enable_external_code_space, assert(!v8_enable_sandbox || !v8_enable_third_party_heap, "The sandbox is incompatible with the third-party heap") -assert(!v8_expose_memory_corruption_api || v8_enable_sandbox, +assert(!v8_enable_memory_corruption_api || v8_enable_sandbox, "The Memory Corruption API requires the sandbox") assert( @@ -873,6 +877,7 @@ external_v8_defines = [ "V8_IS_TSAN", "V8_ENABLE_CONSERVATIVE_STACK_SCANNING", "V8_ENABLE_DIRECT_LOCAL", + "V8_MINORMS_STRING_SHORTCUTTING", ] enabled_external_v8_defines = [] @@ -1206,8 +1211,8 @@ config("features") { if (v8_advanced_bigint_algorithms) { defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ] } - if (v8_expose_memory_corruption_api) { - defines += [ "V8_EXPOSE_MEMORY_CORRUPTION_API" ] + if (v8_enable_memory_corruption_api) { + defines += [ "V8_ENABLE_MEMORY_CORRUPTION_API" ] } if (v8_enable_pointer_compression_8gb) { defines += [ "V8_COMPRESS_POINTERS_8GB" ] @@ -1738,7 +1743,7 @@ config("always_turbofanimize") { # TODO(crbug.com/621335) Rework this so that we don't have the confusion # between "optimize_speed" and "optimize_max". - if (((is_posix && !is_android) || is_fuchsia) && !using_sanitizer) { + if (((is_posix && !is_android) || is_fuchsia || is_win) && !using_sanitizer) { configs += [ "//build/config/compiler:optimize_speed" ] } else { configs += [ "//build/config/compiler:optimize_max" ] @@ -2735,6 +2740,7 @@ generated_file("v8_generate_features_json") { v8_enable_31bit_smis_on_64bit_arch = v8_enable_31bit_smis_on_64bit_arch v8_enable_conservative_stack_scanning = v8_enable_conservative_stack_scanning + v8_enable_direct_handle = v8_enable_direct_handle v8_enable_direct_local = v8_enable_direct_local v8_enable_extensible_ro_snapshot = v8_enable_extensible_ro_snapshot v8_enable_gdbjit = v8_enable_gdbjit @@ -2903,6 +2909,7 @@ v8_source_set("v8_initializers") { "src/interpreter/interpreter-intrinsics-generator.h", "src/numbers/integer-literal-inl.h", "src/numbers/integer-literal.h", + "third_party/v8/codegen/fp16-inl.h", ] if (v8_enable_webassembly) { @@ -3159,6 +3166,7 @@ v8_header_set("v8_flags") { sources = [ "src/flags/flag-definitions.h", + "src/flags/flags-impl.h", "src/flags/flags.h", ] @@ -3382,6 +3390,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/per-isolate-compiler-cache.h", "src/compiler/persistent-map.h", "src/compiler/phase.h", + "src/compiler/pipeline-data-inl.h", "src/compiler/pipeline-statistics.h", "src/compiler/pipeline.h", "src/compiler/processed-feedback.h", @@ -3427,6 +3436,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/graph.h", "src/compiler/turboshaft/index.h", "src/compiler/turboshaft/instruction-selection-phase.h", + "src/compiler/turboshaft/js-generic-lowering-reducer.h", "src/compiler/turboshaft/late-escape-analysis-reducer.h", "src/compiler/turboshaft/late-load-elimination-reducer.h", "src/compiler/turboshaft/layered-hash-map.h", @@ -3439,6 +3449,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/machine-lowering-phase.h", "src/compiler/turboshaft/machine-lowering-reducer-inl.h", "src/compiler/turboshaft/machine-optimization-reducer.h", + "src/compiler/turboshaft/maglev-early-lowering-reducer-inl.h", "src/compiler/turboshaft/maglev-graph-building-phase.h", "src/compiler/turboshaft/memory-optimization-reducer.h", "src/compiler/turboshaft/operation-matcher.h", @@ -3578,7 +3589,6 @@ v8_header_set("v8_internal_headers") { "src/heap/allocation-stats.h", "src/heap/array-buffer-sweeper.h", "src/heap/base-space.h", - "src/heap/basic-memory-chunk.h", "src/heap/code-range.h", "src/heap/code-stats.h", "src/heap/collection-barrier.h", @@ -3648,9 +3658,8 @@ v8_header_set("v8_internal_headers") { "src/heap/marking.h", "src/heap/memory-allocator.h", "src/heap/memory-balancer.h", - "src/heap/memory-chunk-header.h", - "src/heap/memory-chunk-inl.h", "src/heap/memory-chunk-layout.h", + "src/heap/memory-chunk-metadata.h", "src/heap/memory-chunk.h", "src/heap/memory-measurement-inl.h", "src/heap/memory-measurement.h", @@ -3658,6 +3667,8 @@ v8_header_set("v8_internal_headers") { "src/heap/minor-gc-job.h", "src/heap/minor-mark-sweep-inl.h", "src/heap/minor-mark-sweep.h", + "src/heap/mutable-page-inl.h", + "src/heap/mutable-page.h", "src/heap/new-spaces-inl.h", "src/heap/new-spaces.h", "src/heap/object-lock.h", @@ -4253,6 +4264,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/function-body-decoder-impl.h", "src/wasm/function-body-decoder.h", "src/wasm/function-compiler.h", + "src/wasm/fuzzing/random-module-generation.h", "src/wasm/graph-builder-interface.h", "src/wasm/inlining-tree.h", "src/wasm/jump-table-assembler.h", @@ -4677,6 +4689,17 @@ v8_header_set("v8_internal_headers") { "src/baseline/riscv/baseline-compiler-riscv-inl.h", ] } + if (v8_enable_webassembly) { + # Trap handling is enabled on riscv64 Linux and in simulators on + # x64 on Linux. + if ((current_cpu == "riscv64" && is_linux) || + (current_cpu == "x64" && is_linux)) { + sources += [ "src/trap-handler/handler-inside-posix.h" ] + } + if (current_cpu == "x64" && is_linux) { + sources += [ "src/trap-handler/trap-handler-simulator.h" ] + } + } } else if (v8_current_cpu == "riscv32") { sources += [ ### gcmole(riscv32) ### @@ -5305,7 +5328,6 @@ v8_source_set("v8_base_without_compiler") { "src/handles/traced-handles.cc", "src/heap/allocation-observer.cc", "src/heap/array-buffer-sweeper.cc", - "src/heap/basic-memory-chunk.cc", "src/heap/code-range.cc", "src/heap/code-stats.cc", "src/heap/collection-barrier.cc", @@ -5347,13 +5369,14 @@ v8_source_set("v8_base_without_compiler") { "src/heap/marking.cc", "src/heap/memory-allocator.cc", "src/heap/memory-balancer.cc", - "src/heap/memory-chunk-header.cc", "src/heap/memory-chunk-layout.cc", + "src/heap/memory-chunk-metadata.cc", "src/heap/memory-chunk.cc", "src/heap/memory-measurement.cc", "src/heap/memory-reducer.cc", "src/heap/minor-gc-job.cc", "src/heap/minor-mark-sweep.cc", + "src/heap/mutable-page.cc", "src/heap/new-spaces.cc", "src/heap/object-stats.cc", "src/heap/objects-visiting.cc", @@ -5734,6 +5757,12 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/well-known-imports.cc", "src/wasm/wrappers.cc", ] + if (!is_official_build) { + sources += [ + ### gcmole(all) ### + "src/wasm/fuzzing/random-module-generation.cc", + ] + } } if (v8_enable_third_party_heap) { @@ -5984,6 +6013,20 @@ v8_source_set("v8_base_without_compiler") { "src/execution/riscv/simulator-riscv.cc", "src/regexp/riscv/regexp-macro-assembler-riscv.cc", ] + if (v8_enable_webassembly) { + # Trap handling is enabled on riscv64 Linux and in simulators on + # x64 on Linux. + if ((current_cpu == "riscv64" && is_linux) || + (current_cpu == "x64" && is_linux)) { + sources += [ + "src/trap-handler/handler-inside-posix.cc", + "src/trap-handler/handler-outside-posix.cc", + ] + } + if (current_cpu == "x64" && is_linux) { + sources += [ "src/trap-handler/handler-outside-simulator.cc" ] + } + } } else if (v8_current_cpu == "riscv32") { sources += [ ### gcmole(riscv32) ### @@ -6364,7 +6407,6 @@ v8_component("v8_libbase") { "src/base/timezone-cache.h", "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", - "src/base/v8-fallthrough.h", "src/base/vector.h", "src/base/virtual-address-space-page-allocator.cc", "src/base/virtual-address-space-page-allocator.h", @@ -7309,7 +7351,9 @@ group("v8_fuzzers") { ":v8_simple_wasm_async_fuzzer", ":v8_simple_wasm_code_fuzzer", ":v8_simple_wasm_compile_fuzzer", + ":v8_simple_wasm_compile_simd_fuzzer", ":v8_simple_wasm_fuzzer", + ":v8_simple_wasm_init_expr_fuzzer", ":v8_simple_wasm_streaming_fuzzer", ] } @@ -7502,6 +7546,13 @@ v8_executable("v8_hello_world") { ":v8_libplatform", "//build/win:default_exe_manifest", ] + + # Need to workaround a link error when using devtoolset + # https://bugzilla.redhat.com/show_bug.cgi?id=2268188 + if ((v8_current_cpu == "ppc64" || v8_current_cpu == "s390x") && is_linux && + !is_clang) { + libs = [ "stdc++" ] + } } v8_executable("v8_sample_process") { @@ -7772,6 +7823,27 @@ if (v8_enable_webassembly) { v8_fuzzer("wasm_compile_fuzzer") { } + v8_source_set("wasm_compile_simd_fuzzer") { + sources = [ + "test/common/wasm/test-signatures.h", + "test/fuzzer/wasm-compile-simd.cc", + ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_fuzzer_common", + ":wasm_test_common", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] + } + + v8_fuzzer("wasm_compile_simd_fuzzer") { + } + v8_source_set("wasm_streaming_fuzzer") { sources = [ "test/fuzzer/wasm-streaming.cc" ] @@ -7789,6 +7861,24 @@ if (v8_enable_webassembly) { v8_fuzzer("wasm_streaming_fuzzer") { } + + v8_source_set("wasm_init_expr_fuzzer") { + sources = [ "test/fuzzer/wasm-init-expr.cc" ] + + deps = [ + ":fuzzer_support", + ":lib_wasm_fuzzer_common", + ":wasm_test_common", + ] + + configs = [ + ":external_config", + ":internal_config_base", + ] + } + + v8_fuzzer("wasm_init_expr_fuzzer") { + } } v8_source_set("inspector_fuzzer") { diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 6c3ca4e741a6e0..8f2b6e603dbde4 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -57,7 +57,7 @@ vars = { 'checkout_fuchsia_no_hooks': False, # reclient CIPD package version - 'reclient_version': 're_client_version:0.131.1.784ddbb-gomaip', + 'reclient_version': 're_client_version:0.134.1.2c9285b-gomaip', # Fetch configuration files required for the 'use_remoteexec' gn arg 'download_remoteexec_cfg': False, @@ -73,19 +73,22 @@ vars = { 'build_with_chromium': False, # GN CIPD package version. - 'gn_version': 'git_revision:0a2b8eac80f164f10b2cbc126890db0d295790cd', + 'gn_version': 'git_revision:59c4bb920542ee903ee1df39097ae024e2e8226f', # ninja CIPD package version # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja 'ninja_version': 'version:2@1.11.1.chromium.6', + # siso CIPD package version + 'siso_version': 'git_revision:110b1d8c0528de153cef259f09f3dc5ee627e6cb', + # luci-go CIPD package version. - 'luci_go': 'git_revision:3df60a11d33a59614c0e8d2bccc58d8c30984901', + 'luci_go': 'git_revision:623f8d17a069eaea6d0fca13147888284ec76ff1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:18.20240215.1.1', + 'fuchsia_version': 'version:19.20240305.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -118,16 +121,16 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. - 'android_sdk_cmdline-tools_version': 'BRpfUGFd3WoveSGTLVgkQF7ugIVyywGneVICP4c0010C', + 'android_sdk_cmdline-tools_version': 'mU9jm4LkManzjSzRquV1UIA7fHBZ2pK7NtbCXxoVnVUC', } deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '29ac73db520575590c3aceb0a6f1f58dda8934f6', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e5cf1b3ceb3fec6aa5c57b34dede99d36cede32d', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'bca39698b291b392f0b4336857caf929c603ada3', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '342659133d7d0b33f4e24b640a9ad78c0c423633', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '68fce43789231d29d2028ca85530e4814aac6f50', 'buildtools/linux64': { 'packages': [ { @@ -173,7 +176,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e4f91b6381d7694265031caad0c71d733ac132f3', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '0b1abd5ee70867311bea78e851bd609ad842011a', 'third_party/android_platform': { 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'eeb2d566f963bb66212fdc0d9bbe1dde550b4969', 'condition': 'checkout_android', @@ -235,7 +238,7 @@ deps = { 'condition': "checkout_centipede_deps", }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '3d6c15240b480da1e498a64a72ea77a61ba335e1', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '97c002a33e5b777eaa60e3ddc977a185f89446f7', 'condition': 'checkout_android', }, 'third_party/clang-format/script': @@ -249,11 +252,11 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '9d7c8e76f82ddc6a3bbc307217e31dec44a0f73a', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'fe6a359a803f55829ede3666215d080f6775f173', 'third_party/fp16/src': Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '0a92994d729ff76a58f692d3028ca1b64b145d91', 'third_party/fuchsia-gn-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + 'fa3c41d7a15127a989111fcede8dae9265f8566b', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '727f65f8dae76c0d5c39c0f95d9d8f3a90de79f1', 'condition': 'checkout_fuchsia', }, # Exists for rolling the Fuchsia SDK. Check out of the SDK should always @@ -269,17 +272,17 @@ deps = { 'dep_type': 'cipd', }, 'third_party/google_benchmark_chrome': { - 'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + 'c300add93460c31efe53fa71e61427fa1bc09e6a', + 'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + 'f049b96d7a50ae19f2748aae7fba7bde705bcd8c', }, 'third_party/google_benchmark_chrome/src': { - 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'b177433f3ee2513b1075140c723d73ab8901790f', + 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '344117638c8ff7e239044fd0fa7085839fc03021', }, 'third_party/fuzztest': - Var('chromium_url') + '/chromium/src/third_party/fuzztest.git' + '@' + '9fc64e5930915bfb5a593b7e12487d78283e8221', + Var('chromium_url') + '/chromium/src/third_party/fuzztest.git' + '@' + 'daea7ab861050a6445f59758f09cc3173f5add76', 'third_party/fuzztest/src': - Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '61d95200e7ece7d121cab26f0c39fbf392e6566e', + Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + 'bddcd9f77ba0a81a99ce50bcadf5149efe545df0', 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'b479e7a3c161d7087113a05f8cb034b870313a55', 'third_party/icu': Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a622de35ac311c5ad390a7af80724634e5dc61ed', 'third_party/instrumented_libraries': @@ -295,9 +298,9 @@ deps = { 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', 'third_party/libc++/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '6d83791af99ea95f04986d64f111b84ce0b3c6f5', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '80307e66e74bae927fb8709a549859e777e3bf0b', 'third_party/libc++abi/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'a7b3d968a3a923886fea64b424bd770e69dc4ea4', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'fc6253a642c9e336480b17fb17771e2c1efc7fff', 'third_party/libunwind/src': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '8bad7bd6ec30f94bce82f7cb5b58ecbd6ce02996', 'third_party/logdog/logdog': @@ -319,15 +322,25 @@ deps = { 'third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3', 'third_party/re2/src': - Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + 'd00d1e93781e6ebe415771a952689dff8f260d44', + Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + '108914d28a79243d4300e7e651cd0a0d5883ca0f', 'third_party/requests': { 'url': Var('chromium_url') + '/external/github.com/kennethreitz/requests.git' + '@' + 'c7e0fc087ceeadb8b4c84a0953a422c474093d6d', 'condition': 'checkout_android', }, + 'third_party/siso': { + 'packages': [ + { + 'package': 'infra/build/siso/${{platform}}', + 'version': Var('siso_version'), + } + ], + 'dep_type': 'cipd', + 'condition': 'not build_with_chromium and host_cpu != "s390" and host_cpu != "ppc"', + }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '4b5807f344182fd392849b820642457212618e5f', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'c5bf1b566e5df14e763507e2ce30cbfebefeeccf', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a4df104173dae7d49205ed8abefc920b7c5162d2', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '1ed379eda880f53d895559815cd3e30b370abff5', 'tools/luci-go': { 'packages': [ { @@ -343,7 +356,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/abseil-cpp': { - 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + 'f1c5751a2cb4102efbffc4110ee7551b3c54cfea', + 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + 'b3ae305fd5dbc6ad41eed9add26768c29181219f', 'condition': 'not build_with_chromium', } } @@ -354,6 +367,8 @@ include_rules = [ '+unicode', '+third_party/fdlibm', '+third_party/ittapi/include', + '+third_party/fp16/src/include', + '+third_party/v8/codegen', '+third_party/fuzztest', # Abseil features are allow-listed. Please use your best judgement when adding # to this set -- if in doubt, email v8-dev@. For general guidance, refer to @@ -746,4 +761,15 @@ hooks = [ '--skip_remoteexec_cfg_fetch', ], }, + # Configure Siso for developer builds. + { + 'name': 'configure_siso', + 'pattern': '.', + 'condition': 'not build_with_chromium', + 'action': ['python3', + 'build/config/siso/configure_siso.py', + '--rbe_instance', + Var('rbe_instance'), + ], + }, ] diff --git a/deps/v8/ENG_REVIEW_OWNERS b/deps/v8/ENG_REVIEW_OWNERS index 4f80f9d15a74c0..7d582ec7d4016b 100644 --- a/deps/v8/ENG_REVIEW_OWNERS +++ b/deps/v8/ENG_REVIEW_OWNERS @@ -5,6 +5,7 @@ adamk@chromium.org danno@chromium.org hpayer@chromium.org +leszeks@chromium.org mlippautz@chromium.org verwaest@chromium.org vahl@chromium.org diff --git a/deps/v8/WORKSPACE b/deps/v8/WORKSPACE index 87d8cb1fe8c991..96ef24384ed084 100644 --- a/deps/v8/WORKSPACE +++ b/deps/v8/WORKSPACE @@ -61,7 +61,7 @@ bind( new_local_repository( name = "com_googlesource_chromium_icu", - build_file = "bazel/BUILD.icu", + build_file = ":bazel/BUILD.icu", path = "third_party/icu", ) @@ -72,7 +72,7 @@ bind( new_local_repository( name = "com_googlesource_chromium_base_trace_event_common", - build_file = "bazel/BUILD.trace_event_common", + build_file = "//:bazel/BUILD.trace_event_common", path = "base/trace_event/common", ) diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index 1ea9bc0da8de0f..f23f48ef03ae6a 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -313,10 +313,7 @@ def v8_library( # split the set of outputs by using OutputGroupInfo, that way we do not need to # run the torque generator twice. def _torque_files_impl(ctx): - if ctx.workspace_name == "v8": - v8root = "." - else: - v8root = "external/v8" + v8root = "." # Arguments args = [] @@ -414,7 +411,7 @@ def _v8_target_cpu_transition_impl(settings, # Check for an existing v8_target_cpu flag. if "@v8//bazel/config:v8_target_cpu" in settings: if settings["@v8//bazel/config:v8_target_cpu"] != "none": - return + return {} # Auto-detect target architecture based on the --cpu flag. mapping = { @@ -480,9 +477,6 @@ _v8_mksnapshot = rule( cfg = "exec", ), "target_os": attr.string(mandatory = True), - "_allowlist_function_transition": attr.label( - default = "@bazel_tools//tools/allowlists/function_transition_allowlist", - ), "prefix": attr.string(mandatory = True), "suffix": attr.string(mandatory = True), }, diff --git a/deps/v8/bazel/v8-non-pointer-compression.bzl b/deps/v8/bazel/v8-non-pointer-compression.bzl index 7bb23591ca380c..a9f73728301254 100644 --- a/deps/v8/bazel/v8-non-pointer-compression.bzl +++ b/deps/v8/bazel/v8-non-pointer-compression.bzl @@ -1,4 +1,9 @@ -def _v8_disable_pointer_compression(settings, attr): +""" +Exposes the rule v8_binary_non_pointer_compression, which forces a label +to be compiled without pointer compression. +""" + +def _v8_disable_pointer_compression(): return { "//:v8_enable_pointer_compression": "False", } @@ -42,17 +47,6 @@ v8_binary_non_pointer_compression = rule( # Note specificaly how it's configured with v8_target_cpu_transition, which # ensures that setting propagates down the graph. "binary": attr.label(cfg = v8_disable_pointer_compression), - # This is a stock Bazel requirement for any rule that uses Starlark - # transitions. It's okay to copy the below verbatim for all such rules. - # - # The purpose of this requirement is to give the ability to restrict - # which packages can invoke these rules, since Starlark transitions - # make much larger graphs possible that can have memory and performance - # consequences for your build. The whitelist defaults to "everything". - # But you can redefine it more strictly if you feel that's prudent. - "_allowlist_function_transition": attr.label( - default = "@bazel_tools//tools/allowlists/function_transition_allowlist", - ), }, # Making this executable means it works with "$ bazel run". executable = True, diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni index 9830dfc51d0213..32896733f83bf1 100644 --- a/deps/v8/build_overrides/build.gni +++ b/deps/v8/build_overrides/build.gni @@ -42,6 +42,12 @@ enable_java_templates = false # Enables assertions on safety checks in libc++. enable_safe_libcxx = true +# Enable assertions on safety checks, also in libstdc++ +# +# In case the C++ standard library implementation used is libstdc++, then +# enable its own hardening checks. +enable_safe_libstdcxx = true + # Allows different projects to specify their own suppressions files. asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc" lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc" diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 185de67a52237b..7c4e3ba5c34af2 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -109,7 +109,7 @@ declare_args() { v8_enable_direct_handle = "" # Use direct pointers in local handles. - v8_enable_direct_local = false + v8_enable_direct_local = "" # Check for off-stack allocated local handles. v8_enable_local_off_stack_check = false @@ -212,11 +212,14 @@ if (v8_enable_turbofan == "") { assert(v8_enable_turbofan || !v8_enable_webassembly, "Webassembly is not available when Turbofan is disabled.") -# Direct internal handles are enabled by default if conservative stack scanning -# is enabled. +# Direct internal handles and direct locals are enabled by default if +# conservative stack scanning is enabled. if (v8_enable_direct_handle == "") { v8_enable_direct_handle = v8_enable_conservative_stack_scanning } +if (v8_enable_direct_local == "") { + v8_enable_direct_local = v8_enable_conservative_stack_scanning +} # Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute # paths for all configs in templates as they are shared in different @@ -245,7 +248,7 @@ if (is_debug && !v8_optimized_debug) { # TODO(crbug.com/621335) Rework this so that we don't have the confusion # between "optimize_speed" and "optimize_max". - if (is_posix && !is_android && !using_sanitizer) { + if (((is_posix && !is_android) || is_win) && !using_sanitizer) { v8_add_configs += [ "//build/config/compiler:optimize_speed" ] } else { v8_add_configs += [ "//build/config/compiler:optimize_max" ] diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h index c81dc80c526ca2..4849c925806f95 100644 --- a/deps/v8/include/v8-context.h +++ b/deps/v8/include/v8-context.h @@ -84,6 +84,29 @@ class V8_EXPORT Context : public Data { * created by a previous call to Context::New with the same global * template. The state of the global object will be completely reset * and only object identify will remain. + * + * \param internal_fields_deserializer An optional callback used + * to deserialize fields set by + * v8::Object::SetAlignedPointerInInternalField() in wrapper objects + * from the default context snapshot. It should match the + * SerializeInternalFieldsCallback() used by + * v8::SnapshotCreator::SetDefaultContext() when the default context + * snapshot is created. It does not need to be configured if the default + * context snapshot contains no wrapper objects with pointer internal + * fields, or if no custom startup snapshot is configured + * in the v8::CreateParams used to create the isolate. + * + * \param microtask_queue An optional microtask queue used to manage + * the microtasks created in this context. If not set the per-isolate + * default microtask queue would be used. + * + * \param context_data_deserializer An optional callback used + * to deserialize embedder data set by + * v8::Context::SetAlignedPointerInEmbedderData() in the default + * context from the default context snapshot. It does not need to be + * configured if the default context snapshot contains no pointer embedder + * data, or if no custom startup snapshot is configured in the + * v8::CreateParams used to create the isolate. */ static Local New( Isolate* isolate, ExtensionConfiguration* extensions = nullptr, @@ -91,7 +114,9 @@ class V8_EXPORT Context : public Data { MaybeLocal global_object = MaybeLocal(), DeserializeInternalFieldsCallback internal_fields_deserializer = DeserializeInternalFieldsCallback(), - MicrotaskQueue* microtask_queue = nullptr); + MicrotaskQueue* microtask_queue = nullptr, + DeserializeContextDataCallback context_data_deserializer = + DeserializeContextDataCallback()); /** * Create a new context from a (non-default) context snapshot. There @@ -103,21 +128,37 @@ class V8_EXPORT Context : public Data { * \param context_snapshot_index The index of the context snapshot to * deserialize from. Use v8::Context::New for the default snapshot. * - * \param embedder_fields_deserializer Optional callback to deserialize - * internal fields. It should match the SerializeInternalFieldCallback used - * to serialize. + * \param internal_fields_deserializer An optional callback used + * to deserialize fields set by + * v8::Object::SetAlignedPointerInInternalField() in wrapper objects + * from the default context snapshot. It does not need to be + * configured if there are no wrapper objects with no internal + * pointer fields in the default context snapshot or if no startup + * snapshot is configured when the isolate is created. * * \param extensions See v8::Context::New. * * \param global_object See v8::Context::New. + * + * \param internal_fields_deserializer Similar to + * internal_fields_deserializer in v8::Context::New but applies to + * the context specified by the context_snapshot_index. + * + * \param microtask_queue See v8::Context::New. + * + * \param context_data_deserializer Similar to + * context_data_deserializer in v8::Context::New but applies to + * the context specified by the context_snapshot_index. */ static MaybeLocal FromSnapshot( Isolate* isolate, size_t context_snapshot_index, - DeserializeInternalFieldsCallback embedder_fields_deserializer = + DeserializeInternalFieldsCallback internal_fields_deserializer = DeserializeInternalFieldsCallback(), ExtensionConfiguration* extensions = nullptr, MaybeLocal global_object = MaybeLocal(), - MicrotaskQueue* microtask_queue = nullptr); + MicrotaskQueue* microtask_queue = nullptr, + DeserializeContextDataCallback context_data_deserializer = + DeserializeContextDataCallback()); /** * Returns an global object that isn't backed by an actual context. @@ -181,27 +222,8 @@ class V8_EXPORT Context : public Data { * also be considered for freezing should be added to the children_out * parameter. Returns true if the operation completed successfully. */ - V8_DEPRECATED("Please use the version that takes a LocalVector&") - virtual bool FreezeEmbedderObjectAndGetChildren( - Local obj, std::vector>& children_out) { - // TODO(chromium:1454114): This method is temporarily defined in order to - // smoothen the transition to the version that follows. - return true; - } virtual bool FreezeEmbedderObjectAndGetChildren( - Local obj, LocalVector& children_out) { - // TODO(chromium:1454114): This method is temporarily defined and - // calls the previous version, soon to be deprecated, in order to - // smoothen the transition. When deprecation is completed, this - // will become an abstract method. - std::vector> children; - START_ALLOW_USE_DEPRECATED() - // Temporarily use the old callback. - bool result = FreezeEmbedderObjectAndGetChildren(obj, children); - END_ALLOW_USE_DEPRECATED() - children_out.insert(children_out.end(), children.begin(), children.end()); - return result; - } + Local obj, LocalVector& children_out) = 0; }; /** @@ -328,22 +350,6 @@ class V8_EXPORT Context : public Data { Local context); void SetAbortScriptExecution(AbortScriptExecutionCallback callback); - /** - * Returns the value that was set or restored by - * SetContinuationPreservedEmbedderData(), if any. - */ - V8_DEPRECATE_SOON( - "Use v8::Isolate::GetContinuationPreservedEmbedderData instead") - Local GetContinuationPreservedEmbedderData() const; - - /** - * Sets a value that will be stored on continuations and reset while the - * continuation runs. - */ - V8_DEPRECATE_SOON( - "Use v8::Isolate::SetContinuationPreservedEmbedderData instead") - void SetContinuationPreservedEmbedderData(Local context); - /** * Set or clear hooks to be invoked for promise lifecycle operations. * To clear a hook, set it to an empty v8::Function. Each function will diff --git a/deps/v8/include/v8-function-callback.h b/deps/v8/include/v8-function-callback.h index 22b5328d101f89..86a3ea72f4033c 100644 --- a/deps/v8/include/v8-function-callback.h +++ b/deps/v8/include/v8-function-callback.h @@ -82,8 +82,15 @@ class ReturnValue { friend class PropertyCallbackInfo; template friend class PersistentValueMapBase; - V8_INLINE void SetInternal(internal::Address value) { *value_ = value; } - V8_INLINE internal::Address GetDefaultValue(); + V8_INLINE void SetInternal(internal::Address value); + // Setting the hole value has different meanings depending on the usage: + // - for function template callbacks it means that the callback returns + // the undefined value, + // - for property getter callbacks is means that the callback returns + // the undefined value (for property setter callbacks the value returned + // is ignored), + // - for interceptor callbacks it means that the request was not handled. + V8_INLINE void SetTheHole(); V8_INLINE explicit ReturnValue(internal::Address* slot); // See FunctionCallbackInfo. @@ -286,14 +293,28 @@ using FunctionCallback = void (*)(const FunctionCallbackInfo& info); template ReturnValue::ReturnValue(internal::Address* slot) : value_(slot) {} +template +void ReturnValue::SetInternal(internal::Address value) { +#if V8_STATIC_ROOTS_BOOL + using I = internal::Internals; + // Ensure that the upper 32-bits are not modified. Compiler should be + // able to optimize this to a store of a lower 32-bits of the value. + // This is fine since the callback can return only JavaScript values which + // are either Smis or heap objects allocated in the main cage. + *value_ = I::DecompressTaggedField(*value_, I::CompressTagged(value)); +#else + *value_ = value; +#endif // V8_STATIC_ROOTS_BOOL +} + template template void ReturnValue::Set(const Global& handle) { static_assert(std::is_base_of::value, "type check"); if (V8_UNLIKELY(handle.IsEmpty())) { - *value_ = GetDefaultValue(); + SetTheHole(); } else { - *value_ = handle.ptr(); + SetInternal(handle.ptr()); } } @@ -304,7 +325,7 @@ void ReturnValue::SetNonEmpty(const Global& handle) { #ifdef V8_ENABLE_CHECKS internal::VerifyHandleIsNonEmpty(handle.IsEmpty()); #endif // V8_ENABLE_CHECKS - *value_ = handle.ptr(); + SetInternal(handle.ptr()); } template @@ -312,9 +333,9 @@ template void ReturnValue::Set(const BasicTracedReference& handle) { static_assert(std::is_base_of::value, "type check"); if (V8_UNLIKELY(handle.IsEmpty())) { - *value_ = GetDefaultValue(); + SetTheHole(); } else { - *value_ = handle.ptr(); + SetInternal(handle.ptr()); } } @@ -325,7 +346,7 @@ void ReturnValue::SetNonEmpty(const BasicTracedReference& handle) { #ifdef V8_ENABLE_CHECKS internal::VerifyHandleIsNonEmpty(handle.IsEmpty()); #endif // V8_ENABLE_CHECKS - *value_ = handle.ptr(); + SetInternal(handle.ptr()); } template @@ -334,9 +355,9 @@ void ReturnValue::Set(const Local handle) { static_assert(std::is_void::value || std::is_base_of::value, "type check"); if (V8_UNLIKELY(handle.IsEmpty())) { - *value_ = GetDefaultValue(); + SetTheHole(); } else { - *value_ = handle.ptr(); + SetInternal(handle.ptr()); } } @@ -348,13 +369,13 @@ void ReturnValue::SetNonEmpty(const Local handle) { #ifdef V8_ENABLE_CHECKS internal::VerifyHandleIsNonEmpty(handle.IsEmpty()); #endif // V8_ENABLE_CHECKS - *value_ = handle.ptr(); + SetInternal(handle.ptr()); } template void ReturnValue::Set(double i) { static_assert(std::is_base_of::value, "type check"); - Set(Number::New(GetIsolate(), i)); + SetNonEmpty(Number::New(GetIsolate(), i)); } template @@ -362,10 +383,10 @@ void ReturnValue::Set(int32_t i) { static_assert(std::is_base_of::value, "type check"); using I = internal::Internals; if (V8_LIKELY(I::IsValidSmi(i))) { - *value_ = I::IntToSmi(i); + SetInternal(I::IntToSmi(i)); return; } - Set(Integer::New(GetIsolate(), i)); + SetNonEmpty(Integer::New(GetIsolate(), i)); } template @@ -377,7 +398,7 @@ void ReturnValue::Set(uint32_t i) { Set(static_cast(i)); return; } - Set(Integer::NewFromUnsigned(GetIsolate(), i)); + SetNonEmpty(Integer::NewFromUnsigned(GetIsolate(), i)); } template @@ -386,7 +407,7 @@ void ReturnValue::Set(uint16_t i) { using I = internal::Internals; static_assert(I::IsValidSmi(std::numeric_limits::min())); static_assert(I::IsValidSmi(std::numeric_limits::max())); - *value_ = I::IntToSmi(i); + SetInternal(I::IntToSmi(i)); } template @@ -398,9 +419,8 @@ void ReturnValue::Set(bool value) { internal::PerformCastCheck( internal::ValueHelper::SlotAsValue(value_)); #endif // V8_ENABLE_CHECKS - *value_ = I::DecompressTaggedField( - *value_, value ? I::StaticReadOnlyRoot::kTrueValue - : I::StaticReadOnlyRoot::kFalseValue); + SetInternal(value ? I::StaticReadOnlyRoot::kTrueValue + : I::StaticReadOnlyRoot::kFalseValue); #else int root_index; if (value) { @@ -412,6 +432,16 @@ void ReturnValue::Set(bool value) { #endif // V8_STATIC_ROOTS_BOOL } +template +void ReturnValue::SetTheHole() { + using I = internal::Internals; +#if V8_STATIC_ROOTS_BOOL + SetInternal(I::StaticReadOnlyRoot::kTheHoleValue); +#else + *value_ = I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex); +#endif // V8_STATIC_ROOTS_BOOL +} + template void ReturnValue::SetNull() { static_assert(std::is_base_of::value, "type check"); @@ -421,8 +451,7 @@ void ReturnValue::SetNull() { internal::PerformCastCheck( internal::ValueHelper::SlotAsValue(value_)); #endif // V8_ENABLE_CHECKS - *value_ = - I::DecompressTaggedField(*value_, I::StaticReadOnlyRoot::kNullValue); + SetInternal(I::StaticReadOnlyRoot::kNullValue); #else *value_ = I::GetRoot(GetIsolate(), I::kNullValueRootIndex); #endif // V8_STATIC_ROOTS_BOOL @@ -437,8 +466,7 @@ void ReturnValue::SetUndefined() { internal::PerformCastCheck( internal::ValueHelper::SlotAsValue(value_)); #endif // V8_ENABLE_CHECKS - *value_ = - I::DecompressTaggedField(*value_, I::StaticReadOnlyRoot::kUndefinedValue); + SetInternal(I::StaticReadOnlyRoot::kUndefinedValue); #else *value_ = I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex); #endif // V8_STATIC_ROOTS_BOOL @@ -453,8 +481,7 @@ void ReturnValue::SetEmptyString() { internal::PerformCastCheck( internal::ValueHelper::SlotAsValue(value_)); #endif // V8_ENABLE_CHECKS - *value_ = - I::DecompressTaggedField(*value_, I::StaticReadOnlyRoot::kEmptyString); + SetInternal(I::StaticReadOnlyRoot::kEmptyString); #else *value_ = I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex); #endif // V8_STATIC_ROOTS_BOOL @@ -485,12 +512,6 @@ void ReturnValue::Set(S* whatever) { static_assert(sizeof(S) < 0, "incompilable to prevent inadvertent misuse"); } -template -internal::Address ReturnValue::GetDefaultValue() { - using I = internal::Internals; - return I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex); -} - template FunctionCallbackInfo::FunctionCallbackInfo(internal::Address* implicit_args, internal::Address* values, diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index 48001c68b0b433..322b22d98e8be4 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -425,7 +425,7 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = { /* it is the Embedder's responsibility to ensure type safety (against */ \ /* substitution) and lifetime validity of these objects. */ \ V(kExternalObjectValueTag, TAG(13)) \ - V(kCallHandlerInfoCallbackTag, TAG(14)) \ + V(kFunctionTemplateInfoCallbackTag, TAG(14)) \ V(kAccessorInfoGetterTag, TAG(15)) \ V(kAccessorInfoSetterTag, TAG(16)) \ V(kWasmInternalFunctionCallTargetTag, TAG(17)) \ @@ -478,7 +478,7 @@ V8_INLINE static constexpr bool IsSharedExternalPointerType( V8_INLINE static constexpr bool IsMaybeReadOnlyExternalPointerType( ExternalPointerTag tag) { return tag == kAccessorInfoGetterTag || tag == kAccessorInfoSetterTag || - tag == kCallHandlerInfoCallbackTag; + tag == kFunctionTemplateInfoCallbackTag; } // Sanity checks. @@ -746,23 +746,28 @@ class Internals { #if V8_STATIC_ROOTS_BOOL -// These constants need to be initialized in api.cc. +// These constants are copied from static-roots.h and guarded by static asserts. #define EXPORTED_STATIC_ROOTS_PTR_LIST(V) \ - V(UndefinedValue) \ - V(NullValue) \ - V(TrueValue) \ - V(FalseValue) \ - V(EmptyString) \ - V(TheHoleValue) + V(UndefinedValue, 0x69) \ + V(NullValue, 0x85) \ + V(TrueValue, 0xc9) \ + V(FalseValue, 0xad) \ + V(EmptyString, 0xa1) \ + V(TheHoleValue, 0x719) using Tagged_t = uint32_t; struct StaticReadOnlyRoot { -#define DEF_ROOT(name) V8_EXPORT static const Tagged_t k##name; +#define DEF_ROOT(name, value) static constexpr Tagged_t k##name = value; EXPORTED_STATIC_ROOTS_PTR_LIST(DEF_ROOT) #undef DEF_ROOT - V8_EXPORT static const Tagged_t kFirstStringMap; - V8_EXPORT static const Tagged_t kLastStringMap; + static constexpr Tagged_t kFirstStringMap = 0xe5; + static constexpr Tagged_t kLastStringMap = 0x47d; + +#define PLUSONE(...) +1 + static constexpr size_t kNumberOfExportedStaticRoots = + 2 + EXPORTED_STATIC_ROOTS_PTR_LIST(PLUSONE); +#undef PLUSONE }; #endif // V8_STATIC_ROOTS_BOOL @@ -786,6 +791,11 @@ class Internals { static const int kJSObjectType = 0x421; static const int kFirstJSApiObjectType = 0x422; static const int kLastJSApiObjectType = 0x80A; + // Defines a range [kFirstEmbedderJSApiObjectType, kJSApiObjectTypesCount] + // of JSApiObject instance type values that an embedder can use. + static const int kFirstEmbedderJSApiObjectType = 0; + static const int kLastEmbedderJSApiObjectType = + kLastJSApiObjectType - kFirstJSApiObjectType; static const int kUndefinedOddballKind = 4; static const int kNullOddballKind = 3; @@ -939,15 +949,15 @@ class Internals { Address base = *reinterpret_cast( reinterpret_cast(isolate) + kIsolateCageBaseOffset); switch (index) { -#define DECOMPRESS_ROOT(name) \ - case k##name##RootIndex: \ +#define DECOMPRESS_ROOT(name, ...) \ + case k##name##RootIndex: \ return base + StaticReadOnlyRoot::k##name; EXPORTED_STATIC_ROOTS_PTR_LIST(DECOMPRESS_ROOT) #undef DECOMPRESS_ROOT +#undef EXPORTED_STATIC_ROOTS_PTR_LIST default: break; } -#undef EXPORTED_STATIC_ROOTS_PTR_LIST #endif // V8_STATIC_ROOTS_BOOL return *GetRootSlot(isolate, index); } @@ -1046,6 +1056,10 @@ class Internals { return addr & -static_cast(kPtrComprCageBaseAlignment); } + V8_INLINE static uint32_t CompressTagged(Address value) { + return static_cast(value); + } + V8_INLINE static Address DecompressTaggedField(Address heap_object_ptr, uint32_t value) { Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr); diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index a3ceec01334ea0..585b513fac446a 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -562,6 +562,7 @@ class V8_EXPORT Isolate { kWasmTypeReflection = 137, kWasmExnRef = 138, kWasmTypedFuncRef = 139, + kInvalidatedStringWrapperToPrimitiveProtector = 140, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index b61f27af6e3410..313c0287bcf882 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -76,8 +76,12 @@ class TaskRunner { /** * Schedules a task to be invoked by this TaskRunner. The TaskRunner * implementation takes ownership of |task|. + * + * Embedders should override PostTaskImpl instead of this. */ - virtual void PostTask(std::unique_ptr task) = 0; + virtual void PostTask(std::unique_ptr task) { + PostTaskImpl(std::move(task), SourceLocation::Current()); + } /** * Schedules a task to be invoked by this TaskRunner. The TaskRunner @@ -93,16 +97,25 @@ class TaskRunner { * execution is not allowed to nest. * * Requires that |TaskRunner::NonNestableTasksEnabled()| is true. + * + * Embedders should override PostNonNestableTaskImpl instead of this. */ - virtual void PostNonNestableTask(std::unique_ptr task) {} + virtual void PostNonNestableTask(std::unique_ptr task) { + PostNonNestableTaskImpl(std::move(task), SourceLocation::Current()); + } /** * Schedules a task to be invoked by this TaskRunner. The task is scheduled * after the given number of seconds |delay_in_seconds|. The TaskRunner * implementation takes ownership of |task|. + * + * Embedders should override PostDelayedTaskImpl instead of this. */ virtual void PostDelayedTask(std::unique_ptr task, - double delay_in_seconds) = 0; + double delay_in_seconds) { + PostDelayedTaskImpl(std::move(task), delay_in_seconds, + SourceLocation::Current()); + } /** * Schedules a task to be invoked by this TaskRunner. The task is scheduled @@ -119,9 +132,14 @@ class TaskRunner { * execution is not allowed to nest. * * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true. + * + * Embedders should override PostNonNestableDelayedTaskImpl instead of this. */ virtual void PostNonNestableDelayedTask(std::unique_ptr task, - double delay_in_seconds) {} + double delay_in_seconds) { + PostNonNestableDelayedTaskImpl(std::move(task), delay_in_seconds, + SourceLocation::Current()); + } /** * Schedules an idle task to be invoked by this TaskRunner. The task is @@ -130,8 +148,12 @@ class TaskRunner { * relative to other task types and may be starved for an arbitrarily long * time if no idle time is available. The TaskRunner implementation takes * ownership of |task|. + * + * Embedders should override PostIdleTaskImpl instead of this. */ - virtual void PostIdleTask(std::unique_ptr task) = 0; + virtual void PostIdleTask(std::unique_ptr task) { + PostIdleTaskImpl(std::move(task), SourceLocation::Current()); + } /** * Returns true if idle tasks are enabled for this TaskRunner. @@ -153,6 +175,23 @@ class TaskRunner { TaskRunner(const TaskRunner&) = delete; TaskRunner& operator=(const TaskRunner&) = delete; + + protected: + /** + * Implementation of above methods with an additional `location` argument. + */ + virtual void PostTaskImpl(std::unique_ptr task, + const SourceLocation& location) {} + virtual void PostNonNestableTaskImpl(std::unique_ptr task, + const SourceLocation& location) {} + virtual void PostDelayedTaskImpl(std::unique_ptr task, + double delay_in_seconds, + const SourceLocation& location) {} + virtual void PostNonNestableDelayedTaskImpl(std::unique_ptr task, + double delay_in_seconds, + const SourceLocation& location) {} + virtual void PostIdleTaskImpl(std::unique_ptr task, + const SourceLocation& location) {} }; /** diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h index db22de9b18797b..75589863d9d1c7 100644 --- a/deps/v8/include/v8-script.h +++ b/deps/v8/include/v8-script.h @@ -291,11 +291,6 @@ class V8_EXPORT Module : public Data { * module_name is used solely for logging/debugging and doesn't affect module * behavior. */ - V8_DEPRECATED("Please use the version that takes a MemorySpan") - static Local CreateSyntheticModule( - Isolate* isolate, Local module_name, - const std::vector>& export_names, - SyntheticModuleEvaluationSteps evaluation_steps); static Local CreateSyntheticModule( Isolate* isolate, Local module_name, const MemorySpan>& export_names, @@ -311,17 +306,6 @@ class V8_EXPORT Module : public Data { V8_WARN_UNUSED_RESULT Maybe SetSyntheticModuleExport( Isolate* isolate, Local export_name, Local export_value); - /** - * Search the modules requested directly or indirectly by the module for - * any top-level await that has not yet resolved. If there is any, the - * returned vector contains a tuple of the unresolved module and a message - * with the pending top-level await. - * An embedder may call this before exiting to improve error messages. - */ - V8_DEPRECATED("Please use GetStalledTopLevelAwaitMessages") - std::vector, Local>> - GetStalledTopLevelAwaitMessage(Isolate* isolate); - /** * Search the modules requested directly or indirectly by the module for * any top-level await that has not yet resolved. If there is any, the diff --git a/deps/v8/include/v8-snapshot.h b/deps/v8/include/v8-snapshot.h index a1dc0c3881c22d..9e5a53f134a82c 100644 --- a/deps/v8/include/v8-snapshot.h +++ b/deps/v8/include/v8-snapshot.h @@ -38,7 +38,7 @@ class V8_EXPORT StartupData { /** * Callback and supporting data used in SnapshotCreator to implement embedder - * logic to serialize internal fields. + * logic to serialize internal fields of v8::Objects. * Internal fields that directly reference V8 objects are serialized without * calling this callback. Internal fields that contain aligned pointers are * serialized by this callback if it returns non-zero result. Otherwise it is @@ -53,13 +53,24 @@ struct SerializeInternalFieldsCallback { CallbackFunction callback; void* data; }; -// Note that these fields are called "internal fields" in the API and called -// "embedder fields" within V8. -using SerializeEmbedderFieldsCallback = SerializeInternalFieldsCallback; + +/** + * Similar to SerializeInternalFieldsCallback, but works with the embedder data + * in a v8::Context. + */ +struct SerializeContextDataCallback { + using CallbackFunction = StartupData (*)(Local holder, int index, + void* data); + SerializeContextDataCallback(CallbackFunction function = nullptr, + void* data_arg = nullptr) + : callback(function), data(data_arg) {} + CallbackFunction callback; + void* data; +}; /** * Callback and supporting data used to implement embedder logic to deserialize - * internal fields. + * internal fields of v8::Objects. */ struct DeserializeInternalFieldsCallback { using CallbackFunction = void (*)(Local holder, int index, @@ -67,12 +78,24 @@ struct DeserializeInternalFieldsCallback { DeserializeInternalFieldsCallback(CallbackFunction function = nullptr, void* data_arg = nullptr) : callback(function), data(data_arg) {} - void (*callback)(Local holder, int index, StartupData payload, - void* data); + + CallbackFunction callback; void* data; }; -using DeserializeEmbedderFieldsCallback = DeserializeInternalFieldsCallback; +/** + * Similar to DeserializeInternalFieldsCallback, but works with the embedder + * data in a v8::Context. + */ +struct DeserializeContextDataCallback { + using CallbackFunction = void (*)(Local holder, int index, + StartupData payload, void* data); + DeserializeContextDataCallback(CallbackFunction function = nullptr, + void* data_arg = nullptr) + : callback(function), data(data_arg) {} + CallbackFunction callback; + void* data; +}; /** * Helper class to create a snapshot data blob. @@ -156,23 +179,37 @@ class V8_EXPORT SnapshotCreator { * The snapshot will not contain the global proxy, and we expect one or a * global object template to create one, to be provided upon deserialization. * - * \param callback optional callback to serialize internal fields. + * \param internal_fields_serializer An optional callback used to serialize + * internal pointer fields set by + * v8::Object::SetAlignedPointerInInternalField(). + * + * \param context_data_serializer An optional callback used to serialize + * context embedder data set by + * v8::Context::SetAlignedPointerInEmbedderData(). + * */ - void SetDefaultContext(Local context, - SerializeInternalFieldsCallback callback = - SerializeInternalFieldsCallback()); + void SetDefaultContext( + Local context, + SerializeInternalFieldsCallback internal_fields_serializer = + SerializeInternalFieldsCallback(), + SerializeContextDataCallback context_data_serializer = + SerializeContextDataCallback()); /** * Add additional context to be included in the snapshot blob. * The snapshot will include the global proxy. * - * \param callback optional callback to serialize internal fields. + * \param internal_fields_serializer Similar to internal_fields_serializer + * in SetDefaultContext() but only applies to the context being added. * - * \returns the index of the context in the snapshot blob. + * \param context_data_serializer Similar to context_data_serializer + * in SetDefaultContext() but only applies to the context being added. */ size_t AddContext(Local context, - SerializeInternalFieldsCallback callback = - SerializeInternalFieldsCallback()); + SerializeInternalFieldsCallback internal_fields_serializer = + SerializeInternalFieldsCallback(), + SerializeContextDataCallback context_data_serializer = + SerializeContextDataCallback()); /** * Attach arbitrary V8::Data to the context snapshot, which can be retrieved diff --git a/deps/v8/include/v8-statistics.h b/deps/v8/include/v8-statistics.h index aeca8cf44843e8..82b78f5ec65729 100644 --- a/deps/v8/include/v8-statistics.h +++ b/deps/v8/include/v8-statistics.h @@ -61,33 +61,8 @@ class V8_EXPORT MeasureMemoryDelegate { */ virtual bool ShouldMeasure(Local context) = 0; - /** - * This function is called when memory measurement finishes. - * - * \param context_sizes_in_bytes a vector of (context, size) pairs that - * includes each context for which ShouldMeasure returned true and that - * was not garbage collected while the memory measurement was in progress. - * - * \param unattributed_size_in_bytes total size of objects that were not - * attributed to any context (i.e. are likely shared objects). - */ - V8_DEPRECATED("Please use the version that takes a result struct") - virtual void MeasurementComplete( - const std::vector, size_t>>& - context_sizes_in_bytes, - size_t unattributed_size_in_bytes) {} - /** Holds the result of a memory measurement request. */ struct Result { - /** - * A vector of (context, size) pairs that includes each context for - * which ShouldMeasure returned true and that was not garbage collected - * while the memory measurement was in progress. - */ - V8_DEPRECATED("Please use contexts and sizes_in_bytes") - const std::vector, size_t>>& - context_sizes_in_bytes; - /** * Two spans of equal length: the first includes each context for which * ShouldMeasure returned true and that was not garbage collected while diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h index 674d4201d5d782..6a0c898f4507d2 100644 --- a/deps/v8/include/v8-template.h +++ b/deps/v8/include/v8-template.h @@ -94,6 +94,7 @@ class V8_EXPORT Template : public Data { PropertyAttribute attribute, AccessControl settings, SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); + V8_DEPRECATE_SOON("Use SetNativeDataProperty with Local instead") void SetNativeDataProperty( Local name, AccessorGetterCallback getter, AccessorSetterCallback setter = nullptr, @@ -131,27 +132,35 @@ class V8_EXPORT Template : public Data { friend class FunctionTemplate; }; -// TODO(dcarney): Replace GenericNamedPropertyFooCallback with just -// NamedPropertyFooCallback. +/** + * Interceptor callbacks use this value to indicate whether the request was + * intercepted or not. + */ +enum class Intercepted : uint8_t { kNo = 0, kYes = 1 }; /** * Interceptor for get requests on an object. * - * Use `info.GetReturnValue().Set()` to set the return value of the - * intercepted get request. If the property does not exist the callback should - * not set the result and must not produce side effects. + * If the interceptor handles the request (i.e. the property should not be + * looked up beyond the interceptor) it should + * - (optionally) use info.GetReturnValue().Set()` to set the return value + * (by default the result is set to v8::Undefined), + * - return `Intercepted::kYes`. + * If the interceptor does not handle the request it must return + * `Intercepted::kNo` and it must not produce side effects. * * \param property The name of the property for which the request was * intercepted. * \param info Information about the intercepted request, such as - * isolate, receiver, return value, or whether running in `'use strict`' mode. + * isolate, receiver, return value, or whether running in `'use strict'` mode. * See `PropertyCallbackInfo`. * * \code - * void GetterCallback( - * Local name, - * const v8::PropertyCallbackInfo& info) { - * info.GetReturnValue().Set(v8_num(42)); + * Intercepted GetterCallback( + * Local name, const v8::PropertyCallbackInfo& info) { + * if (!IsKnownProperty(info.GetIsolate(), name)) return Intercepted::kNo; + * info.GetReturnValue().Set(v8_num(42)); + * return Intercepted::kYes; * } * * v8::Local templ = @@ -171,18 +180,23 @@ class V8_EXPORT Template : public Data { * * See also `ObjectTemplate::SetHandler`. */ +using NamedPropertyGetterCallback = Intercepted (*)( + Local property, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. +// +// Use `info.GetReturnValue().Set()` to set the return value of the +// intercepted get request. If the property does not exist the callback should +// not set the result and must not produce side effects. using GenericNamedPropertyGetterCallback = void (*)(Local property, const PropertyCallbackInfo& info); /** * Interceptor for set requests on an object. * - * Use `info.GetReturnValue()` to indicate whether the request was intercepted - * or not. If the setter successfully intercepts the request, i.e., if the - * request should not be further executed, call - * `info.GetReturnValue().Set(value)`. If the setter did not intercept the - * request, i.e., if the request should be handled as if no interceptor is - * present, do not not call `Set()` and do not produce side effects. + * If the interceptor handles the request (i.e. the property should not be + * looked up beyond the interceptor) it should return `Intercepted::kYes`. + * If the interceptor does not handle the request it must return + * `Intercepted::kNo` and it must not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -192,9 +206,19 @@ using GenericNamedPropertyGetterCallback = * isolate, receiver, return value, or whether running in `'use strict'` mode. * See `PropertyCallbackInfo`. * - * See also - * `ObjectTemplate::SetHandler.` + * See also `ObjectTemplate::SetHandler.` */ +using NamedPropertySetterCallback = + Intercepted (*)(Local property, Local value, + const PropertyCallbackInfo& info); +// This variant will be deprecated soon. +// +// Use `info.GetReturnValue()` to indicate whether the request was intercepted +// or not. If the setter successfully intercepts the request, i.e., if the +// request should not be further executed, call +// `info.GetReturnValue().Set(value)`. If the setter did not intercept the +// request, i.e., if the request should be handled as if no interceptor is +// present, do not not call `Set()` and do not produce side effects. using GenericNamedPropertySetterCallback = void (*)(Local property, Local value, const PropertyCallbackInfo& info); @@ -204,10 +228,13 @@ using GenericNamedPropertySetterCallback = * property, e.g., getOwnPropertyDescriptor(), propertyIsEnumerable(), and * defineProperty(). * - * Use `info.GetReturnValue().Set(value)` to set the property attributes. The - * value is an integer encoding a `v8::PropertyAttribute`. If the property does - * not exist the callback should not set the result and must not produce side - * effects. + * If the interceptor handles the request (i.e. the property should not be + * looked up beyond the interceptor) it should + * - use `info.GetReturnValue().Set()` to set to an Integer value encoding + * a `v8::PropertyAttribute` bits, + * - return `Intercepted::kYes`. + * If the interceptor does not handle the request it must return + * `Intercepted::kNo` and it must not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -219,21 +246,29 @@ using GenericNamedPropertySetterCallback = * they do not return the attributes. For example, `hasOwnProperty()` can * trigger this interceptor depending on the state of the object. * - * See also - * `ObjectTemplate::SetHandler.` + * See also `ObjectTemplate::SetHandler.` */ +using NamedPropertyQueryCallback = Intercepted (*)( + Local property, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. +// +// Use `info.GetReturnValue().Set(value)` to set the property attributes. The +// value is an integer encoding a `v8::PropertyAttribute`. If the property does +// not exist the callback should not set the result and must not produce side +// effects. using GenericNamedPropertyQueryCallback = void (*)(Local property, const PropertyCallbackInfo& info); /** * Interceptor for delete requests on an object. * - * Use `info.GetReturnValue()` to indicate whether the request was intercepted - * or not. If the deleter successfully intercepts the request, i.e., if the - * request should not be further executed, call - * `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is - * used as the return value of `delete`. If the deleter does not intercept the - * request then it should not set the result and must not produce side effects. + * If the interceptor handles the request (i.e. the property should not be + * looked up beyond the interceptor) it should + * - use `info.GetReturnValue().Set()` to set to a Boolean value indicating + * whether the property deletion was successful or not, + * - return `Intercepted::kYes`. + * If the interceptor does not handle the request it must return + * `Intercepted::kNo` and it must not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -247,6 +282,16 @@ using GenericNamedPropertyQueryCallback = * * See also `ObjectTemplate::SetHandler.` */ +using NamedPropertyDeleterCallback = Intercepted (*)( + Local property, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. +// +// Use `info.GetReturnValue()` to indicate whether the request was intercepted +// or not. If the deleter successfully intercepts the request, i.e., if the +// request should not be further executed, call +// `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is +// used as the return value of `delete`. If the deleter does not intercept the +// request then it should not set the result and must not produce side effects. using GenericNamedPropertyDeleterCallback = void (*)(Local property, const PropertyCallbackInfo& info); @@ -256,18 +301,19 @@ using GenericNamedPropertyDeleterCallback = * * Note: The values in the array must be of type v8::Name. */ -using GenericNamedPropertyEnumeratorCallback = +using NamedPropertyEnumeratorCallback = void (*)(const PropertyCallbackInfo& info); +// This variant will be deprecated soon. +// This is just a renaming of the typedef. +using GenericNamedPropertyEnumeratorCallback = NamedPropertyEnumeratorCallback; /** * Interceptor for defineProperty requests on an object. * - * Use `info.GetReturnValue()` to indicate whether the request was intercepted - * or not. If the definer successfully intercepts the request, i.e., if the - * request should not be further executed, call - * `info.GetReturnValue().Set(value)`. If the definer did not intercept the - * request, i.e., if the request should be handled as if no interceptor is - * present, do not not call `Set()` and do not produce side effects. + * If the interceptor handles the request (i.e. the property should not be + * looked up beyond the interceptor) it should return `Intercepted::kYes`. + * If the interceptor does not handle the request it must return + * `Intercepted::kNo` and it must not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -279,6 +325,17 @@ using GenericNamedPropertyEnumeratorCallback = * * See also `ObjectTemplate::SetHandler`. */ +using NamedPropertyDefinerCallback = + Intercepted (*)(Local property, const PropertyDescriptor& desc, + const PropertyCallbackInfo& info); +// This variant will be deprecated soon. +// +// Use `info.GetReturnValue()` to indicate whether the request was intercepted +// or not. If the definer successfully intercepts the request, i.e., if the +// request should not be further executed, call +// `info.GetReturnValue().Set(value)`. If the definer did not intercept the +// request, i.e., if the request should be handled as if no interceptor is +// present, do not not call `Set()` and do not produce side effects. using GenericNamedPropertyDefinerCallback = void (*)(Local property, const PropertyDescriptor& desc, const PropertyCallbackInfo& info); @@ -286,10 +343,14 @@ using GenericNamedPropertyDefinerCallback = /** * Interceptor for getOwnPropertyDescriptor requests on an object. * - * Use `info.GetReturnValue().Set()` to set the return value of the - * intercepted request. The return value must be an object that - * can be converted to a PropertyDescriptor, e.g., a `v8::value` returned from - * `v8::Object::getOwnPropertyDescriptor`. + * If the interceptor handles the request (i.e. the property should not be + * looked up beyond the interceptor) it should + * - use `info.GetReturnValue().Set()` to set the return value which must be + * object that can be converted to a PropertyDescriptor (for example, + * a value returned by `v8::Object::getOwnPropertyDescriptor`), + * - return `Intercepted::kYes`. + * If the interceptor does not handle the request it must return + * `Intercepted::kNo` and it must not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -302,18 +363,36 @@ using GenericNamedPropertyDefinerCallback = * * See also `ObjectTemplate::SetHandler`. */ +using NamedPropertyDescriptorCallback = Intercepted (*)( + Local property, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. +// +// Use `info.GetReturnValue().Set()` to set the return value of the +// intercepted request. The return value must be an object that +// can be converted to a PropertyDescriptor, e.g., a `v8::Value` returned from +// `v8::Object::getOwnPropertyDescriptor`. using GenericNamedPropertyDescriptorCallback = void (*)(Local property, const PropertyCallbackInfo& info); +// TODO(ishell): Rename IndexedPropertyXxxCallbackV2 back to +// IndexedPropertyXxxCallback once the old IndexedPropertyXxxCallback is +// removed. + /** * See `v8::GenericNamedPropertyGetterCallback`. */ +using IndexedPropertyGetterCallbackV2 = + Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. using IndexedPropertyGetterCallback = void (*)(uint32_t index, const PropertyCallbackInfo& info); /** * See `v8::GenericNamedPropertySetterCallback`. */ +using IndexedPropertySetterCallbackV2 = Intercepted (*)( + uint32_t index, Local value, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. using IndexedPropertySetterCallback = void (*)(uint32_t index, Local value, const PropertyCallbackInfo& info); @@ -321,12 +400,18 @@ using IndexedPropertySetterCallback = /** * See `v8::GenericNamedPropertyQueryCallback`. */ +using IndexedPropertyQueryCallbackV2 = + Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. using IndexedPropertyQueryCallback = void (*)(uint32_t index, const PropertyCallbackInfo& info); /** * See `v8::GenericNamedPropertyDeleterCallback`. */ +using IndexedPropertyDeleterCallbackV2 = + Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. using IndexedPropertyDeleterCallback = void (*)(uint32_t index, const PropertyCallbackInfo& info); @@ -342,6 +427,10 @@ using IndexedPropertyEnumeratorCallback = /** * See `v8::GenericNamedPropertyDefinerCallback`. */ +using IndexedPropertyDefinerCallbackV2 = + Intercepted (*)(uint32_t index, const PropertyDescriptor& desc, + const PropertyCallbackInfo& info); +// This variant will be deprecated soon. using IndexedPropertyDefinerCallback = void (*)(uint32_t index, const PropertyDescriptor& desc, const PropertyCallbackInfo& info); @@ -349,6 +438,9 @@ using IndexedPropertyDefinerCallback = /** * See `v8::GenericNamedPropertyDescriptorCallback`. */ +using IndexedPropertyDescriptorCallbackV2 = + Intercepted (*)(uint32_t index, const PropertyCallbackInfo& info); +// This variant will be deprecated soon. using IndexedPropertyDescriptorCallback = void (*)(uint32_t index, const PropertyCallbackInfo& info); @@ -611,7 +703,8 @@ enum class PropertyHandlerFlags { */ kNone = 0, - /** Will not call into interceptor for properties on the receiver or prototype + /** + * Will not call into interceptor for properties on the receiver or prototype * chain, i.e., only call into interceptor for properties that do not exist. * Currently only valid for named interceptors. */ @@ -627,9 +720,49 @@ enum class PropertyHandlerFlags { * The getter, query, enumerator callbacks do not produce side effects. */ kHasNoSideEffect = 1 << 2, + + /** + * This flag is used to distinguish which callbacks were provided - + * GenericNamedPropertyXXXCallback (old signature) or + * NamedPropertyXXXCallback (new signature). + * DO NOT use this flag, it'll be removed once embedders migrate to new + * callbacks signatures. + */ + kInternalNewCallbacksSignatures = 1 << 10, }; struct NamedPropertyHandlerConfiguration { + private: + static constexpr PropertyHandlerFlags WithNewSignatureFlag( + PropertyHandlerFlags flags) { + return static_cast( + static_cast(flags) | + static_cast( + PropertyHandlerFlags::kInternalNewCallbacksSignatures)); + } + + public: + NamedPropertyHandlerConfiguration( + NamedPropertyGetterCallback getter, // + NamedPropertySetterCallback setter, // + NamedPropertyQueryCallback query, // + NamedPropertyDeleterCallback deleter, // + NamedPropertyEnumeratorCallback enumerator, // + NamedPropertyDefinerCallback definer, // + NamedPropertyDescriptorCallback descriptor, // + Local data = Local(), + PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), + enumerator(enumerator), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), + data(data), + flags(WithNewSignatureFlag(flags)) {} + + // This variant will be deprecated soon. NamedPropertyHandlerConfiguration( GenericNamedPropertyGetterCallback getter, GenericNamedPropertySetterCallback setter, @@ -640,35 +773,73 @@ struct NamedPropertyHandlerConfiguration { GenericNamedPropertyDescriptorCallback descriptor, Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) - : getter(getter), - setter(setter), - query(query), - deleter(deleter), + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), enumerator(enumerator), - definer(definer), - descriptor(descriptor), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), data(data), flags(flags) {} - NamedPropertyHandlerConfiguration( - /** Note: getter is required */ - GenericNamedPropertyGetterCallback getter = nullptr, + explicit NamedPropertyHandlerConfiguration( + NamedPropertyGetterCallback getter, + NamedPropertySetterCallback setter = nullptr, + NamedPropertyQueryCallback query = nullptr, + NamedPropertyDeleterCallback deleter = nullptr, + NamedPropertyEnumeratorCallback enumerator = nullptr, + Local data = Local(), + PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), + enumerator(enumerator), + definer(nullptr), + descriptor(nullptr), + data(data), + flags(WithNewSignatureFlag(flags)) {} + + // This variant will be deprecated soon. + explicit NamedPropertyHandlerConfiguration( + GenericNamedPropertyGetterCallback getter, GenericNamedPropertySetterCallback setter = nullptr, GenericNamedPropertyQueryCallback query = nullptr, GenericNamedPropertyDeleterCallback deleter = nullptr, GenericNamedPropertyEnumeratorCallback enumerator = nullptr, Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) - : getter(getter), - setter(setter), - query(query), - deleter(deleter), + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), enumerator(enumerator), definer(nullptr), descriptor(nullptr), data(data), flags(flags) {} + NamedPropertyHandlerConfiguration( + NamedPropertyGetterCallback getter, // + NamedPropertySetterCallback setter, // + NamedPropertyDescriptorCallback descriptor, // + NamedPropertyDeleterCallback deleter, // + NamedPropertyEnumeratorCallback enumerator, // + NamedPropertyDefinerCallback definer, // + Local data = Local(), + PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(nullptr), + deleter(reinterpret_cast(deleter)), + enumerator(enumerator), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), + data(data), + flags(WithNewSignatureFlag(flags)) {} + + // This variant will be deprecated soon. NamedPropertyHandlerConfiguration( GenericNamedPropertyGetterCallback getter, GenericNamedPropertySetterCallback setter, @@ -678,66 +849,136 @@ struct NamedPropertyHandlerConfiguration { GenericNamedPropertyDefinerCallback definer, Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) - : getter(getter), - setter(setter), + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), query(nullptr), - deleter(deleter), + deleter(reinterpret_cast(deleter)), enumerator(enumerator), - definer(definer), - descriptor(descriptor), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), data(data), flags(flags) {} - GenericNamedPropertyGetterCallback getter; - GenericNamedPropertySetterCallback setter; - GenericNamedPropertyQueryCallback query; - GenericNamedPropertyDeleterCallback deleter; - GenericNamedPropertyEnumeratorCallback enumerator; - GenericNamedPropertyDefinerCallback definer; - GenericNamedPropertyDescriptorCallback descriptor; + void* getter; // [Generic]NamedPropertyGetterCallback + void* setter; // [Generic]NamedPropertySetterCallback + void* query; // [Generic]NamedPropertyQueryCallback + void* deleter; // [Generic]NamedPropertyDeleterCallback + NamedPropertyEnumeratorCallback enumerator; + void* definer; // [Generic]NamedPropertyDefinerCallback + void* descriptor; // [Generic]NamedPropertyDescriptorCallback Local data; PropertyHandlerFlags flags; }; struct IndexedPropertyHandlerConfiguration { + private: + static constexpr PropertyHandlerFlags WithNewSignatureFlag( + PropertyHandlerFlags flags) { + return static_cast( + static_cast(flags) | + static_cast( + PropertyHandlerFlags::kInternalNewCallbacksSignatures)); + } + + public: IndexedPropertyHandlerConfiguration( - IndexedPropertyGetterCallback getter, - IndexedPropertySetterCallback setter, IndexedPropertyQueryCallback query, - IndexedPropertyDeleterCallback deleter, - IndexedPropertyEnumeratorCallback enumerator, - IndexedPropertyDefinerCallback definer, - IndexedPropertyDescriptorCallback descriptor, + IndexedPropertyGetterCallbackV2 getter, // + IndexedPropertySetterCallbackV2 setter, // + IndexedPropertyQueryCallbackV2 query, // + IndexedPropertyDeleterCallbackV2 deleter, // + IndexedPropertyEnumeratorCallback enumerator, // + IndexedPropertyDefinerCallbackV2 definer, // + IndexedPropertyDescriptorCallbackV2 descriptor, // Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) - : getter(getter), - setter(setter), - query(query), - deleter(deleter), + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), enumerator(enumerator), - definer(definer), - descriptor(descriptor), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), data(data), - flags(flags) {} + flags(WithNewSignatureFlag(flags)) {} + // This variant will be deprecated soon. IndexedPropertyHandlerConfiguration( - /** Note: getter is required */ - IndexedPropertyGetterCallback getter = nullptr, + IndexedPropertyGetterCallback getter, // + IndexedPropertySetterCallback setter, // + IndexedPropertyQueryCallback query, // + IndexedPropertyDeleterCallback deleter, // + IndexedPropertyEnumeratorCallback enumerator, // + IndexedPropertyDefinerCallback definer, // + IndexedPropertyDescriptorCallback descriptor, // + Local data = Local(), + PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), + enumerator(enumerator), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), + data(data), + flags(flags) {} + + explicit IndexedPropertyHandlerConfiguration( + IndexedPropertyGetterCallbackV2 getter = nullptr, + IndexedPropertySetterCallbackV2 setter = nullptr, + IndexedPropertyQueryCallbackV2 query = nullptr, + IndexedPropertyDeleterCallbackV2 deleter = nullptr, + IndexedPropertyEnumeratorCallback enumerator = nullptr, + Local data = Local(), + PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), + enumerator(enumerator), + definer(nullptr), + descriptor(nullptr), + data(data), + flags(WithNewSignatureFlag(flags)) {} + + // This variant will be deprecated soon. + explicit IndexedPropertyHandlerConfiguration( + IndexedPropertyGetterCallback getter, IndexedPropertySetterCallback setter = nullptr, IndexedPropertyQueryCallback query = nullptr, IndexedPropertyDeleterCallback deleter = nullptr, IndexedPropertyEnumeratorCallback enumerator = nullptr, Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) - : getter(getter), - setter(setter), - query(query), - deleter(deleter), + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(reinterpret_cast(query)), + deleter(reinterpret_cast(deleter)), enumerator(enumerator), definer(nullptr), descriptor(nullptr), data(data), flags(flags) {} + IndexedPropertyHandlerConfiguration( + IndexedPropertyGetterCallbackV2 getter, + IndexedPropertySetterCallbackV2 setter, + IndexedPropertyDescriptorCallbackV2 descriptor, + IndexedPropertyDeleterCallbackV2 deleter, + IndexedPropertyEnumeratorCallback enumerator, + IndexedPropertyDefinerCallbackV2 definer, + Local data = Local(), + PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), + query(nullptr), + deleter(reinterpret_cast(deleter)), + enumerator(enumerator), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), + data(data), + flags(WithNewSignatureFlag(flags)) {} + + // This variant will be deprecated soon. IndexedPropertyHandlerConfiguration( IndexedPropertyGetterCallback getter, IndexedPropertySetterCallback setter, @@ -747,23 +988,23 @@ struct IndexedPropertyHandlerConfiguration { IndexedPropertyDefinerCallback definer, Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) - : getter(getter), - setter(setter), + : getter(reinterpret_cast(getter)), + setter(reinterpret_cast(setter)), query(nullptr), - deleter(deleter), + deleter(reinterpret_cast(deleter)), enumerator(enumerator), - definer(definer), - descriptor(descriptor), + definer(reinterpret_cast(definer)), + descriptor(reinterpret_cast(descriptor)), data(data), flags(flags) {} - IndexedPropertyGetterCallback getter; - IndexedPropertySetterCallback setter; - IndexedPropertyQueryCallback query; - IndexedPropertyDeleterCallback deleter; + void* getter; // IndexedPropertyGetterCallback[V2] + void* setter; // IndexedPropertySetterCallback[V2] + void* query; // IndexedPropertyQueryCallback[V2] + void* deleter; // IndexedPropertyDeleterCallback[V2] IndexedPropertyEnumeratorCallback enumerator; - IndexedPropertyDefinerCallback definer; - IndexedPropertyDescriptorCallback descriptor; + void* definer; // IndexedPropertyDefinerCallback[V2] + void* descriptor; // IndexedPropertyDescriptorCallback[V2] Local data; PropertyHandlerFlags flags; }; @@ -804,6 +1045,7 @@ class V8_EXPORT ObjectTemplate : public Template { * \param attribute The attributes of the property for which an accessor * is added. */ + V8_DEPRECATE_SOON("Use SetAccessor with Local instead") void SetAccessor( Local name, AccessorGetterCallback getter, AccessorSetterCallback setter = nullptr, @@ -846,7 +1088,7 @@ class V8_EXPORT ObjectTemplate : public Template { * \param data A piece of data that will be passed to the callbacks * whenever they are invoked. */ - // TODO(dcarney): deprecate + V8_DEPRECATE_SOON("Use SetHandler instead") void SetIndexedPropertyHandler( IndexedPropertyGetterCallback getter, IndexedPropertySetterCallback setter = nullptr, @@ -951,8 +1193,7 @@ class V8_EXPORT ObjectTemplate : public Template { private: ObjectTemplate(); - static Local New(internal::Isolate* isolate, - Local constructor); + static void CheckCast(Data* that); friend class FunctionTemplate; }; diff --git a/deps/v8/include/v8-typed-array.h b/deps/v8/include/v8-typed-array.h index 9cb645fb02c4d1..66e21f470acb7e 100644 --- a/deps/v8/include/v8-typed-array.h +++ b/deps/v8/include/v8-typed-array.h @@ -249,6 +249,30 @@ class V8_EXPORT Int32Array : public TypedArray { static void CheckCast(Value* obj); }; +/** + * An instance of Float16Array constructor. + */ +class V8_EXPORT Float16Array : public TypedArray { + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(uint16_t); + + public: + static Local New(Local array_buffer, + size_t byte_offset, size_t length); + static Local New(Local shared_array_buffer, + size_t byte_offset, size_t length); + V8_INLINE static Float16Array* Cast(Value* value) { +#ifdef V8_ENABLE_CHECKS + CheckCast(value); +#endif + return static_cast(value); + } + + private: + Float16Array(); + static void CheckCast(Value* obj); +}; + /** * An instance of Float32Array constructor (ES6 draft 15.13.6). */ diff --git a/deps/v8/include/v8-value.h b/deps/v8/include/v8-value.h index 9356cd626163dd..ac04525d86d759 100644 --- a/deps/v8/include/v8-value.h +++ b/deps/v8/include/v8-value.h @@ -301,6 +301,11 @@ class V8_EXPORT Value : public Data { */ bool IsInt32Array() const; + /** + * Returns true if this value is a Float16Array. + */ + bool IsFloat16Array() const; + /** * Returns true if this value is a Float32Array. */ diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index c3c0da86379d07..f16596a58fa667 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 12 -#define V8_MINOR_VERSION 3 -#define V8_BUILD_NUMBER 219 -#define V8_PATCH_LEVEL 16 +#define V8_MINOR_VERSION 4 +#define V8_BUILD_NUMBER 254 +#define V8_PATCH_LEVEL 14 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index c9b20e7d643cdc..a498d240e3fd83 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -69,7 +69,7 @@ 'V8 Linux64 - builder (goma cache silo)': 'release_x64', 'V8 Linux64 - builder (reclient)': 'release_x64_reclient', 'V8 Linux64 - builder (reclient compare)': 'release_x64_reclient', - 'V8 Linux64 - official - builder': 'official_x64', + 'V8 Linux64 - official - builder': 'official_x64_on_release_branch', 'V8 Linux64 - debug builder': 'debug_x64', 'V8 Linux64 - no shared cage - debug builder': 'debug_x64_no_shared_cage', 'V8 Linux64 - external code space - debug - builder': 'debug_x64_external_code_space', @@ -117,6 +117,7 @@ # FYI. 'V8 iOS - sim - builder': 'release_x64_ios_simulator', 'V8 Linux64 - arm64 - builder': 'release_arm64', + 'V8 Linux64 - arm64 - no pointer compression - builder': 'release_arm64_no_pointer_compression', 'V8 Linux64 - arm64 - debug builder': 'debug_arm64', 'V8 Linux64 - arm64 - sim - no pointer compression - builder': 'release_simulate_arm64_no_pointer_compression', @@ -183,8 +184,10 @@ 'V8 Clusterfuzz Linux64 TSAN - release builder': 'release_x64_tsan', 'V8 Clusterfuzz Linux64 UBSan - release builder': 'release_x64_ubsan_recover', + 'V8 Clusterfuzz Linux64 sandbox testing - release builder': + 'release_x64_sandbox_testing', 'V8 Clusterfuzz Linux64 ASAN sandbox testing - release builder': - 'release_x64_asan_symbolized_expose_memory_corruption', + 'release_x64_asan_sandbox_testing', }, 'client.v8.perf' : { # Arm @@ -257,6 +260,7 @@ 'v8_linux_vtunejit': 'debug_x86_vtunejit', 'v8_linux64_arm64_compile_dbg': 'debug_arm64_trybot', 'v8_linux64_arm64_compile_rel': 'release_arm64_trybot', + 'v8_linux64_native_arm64_no_pointer_compression_compile_rel': 'release_arm64_no_pointer_compression_trybot', 'v8_linux64_arm64_no_pointer_compression_compile_rel': 'release_simulate_arm64_no_pointer_compression', 'v8_linux64_asan_centipede_compile_dbg': 'debug_x64_asan_centipede', @@ -284,7 +288,7 @@ 'v8_linux64_no_pointer_compression_compile_rel': 'release_x64_no_pointer_compression', 'v8_linux64_compile_rel': 'release_x64_test_features_gcmole_trybot', 'v8_linux64_no_sandbox_compile_rel': 'release_x64_no_sandbox', - 'v8_linux64_official_compile_rel': 'official_x64', + 'v8_linux64_official_compile_rel': 'official_x64_on_release_branch', 'v8_linux64_predictable_compile_rel': 'release_x64_predictable', 'v8_linux64_pku_compile_rel': 'release_x64', 'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap', @@ -293,7 +297,8 @@ 'v8_linux64_arm64_no_wasm_compile_dbg': 'debug_arm64_webassembly_disabled', 'v8_linux64_verify_csa_compile_rel': 'release_x64_verify_csa', 'v8_linux64_asan_compile_rel': 'release_x64_asan_minimal_symbols', - 'v8_linux64_asan_sandbox_compile_rel': 'release_x64_asan_symbolized_expose_memory_corruption', + 'v8_linux64_sandbox_testing_compile_rel': 'release_x64_sandbox_testing', + 'v8_linux64_asan_sandbox_testing_compile_rel': 'release_x64_asan_sandbox_testing', 'v8_linux64_cfi_compile_rel': 'release_x64_cfi', 'v8_linux64_fuzzilli_compile_rel': 'release_x64_fuzzilli', 'v8_linux64_loong64_compile_rel': 'release_simulate_loong64', @@ -507,10 +512,14 @@ 'release_bot', 'arm', 'hard_float'], 'release_arm64': [ 'release_bot', 'arm64'], + 'release_arm64_no_pointer_compression': [ + 'release_bot', 'arm64', 'v8_disable_pointer_compression'], 'release_arm64_trybot': [ 'release_trybot', 'arm64'], 'release_arm64_hard_float': [ 'release_bot', 'arm64', 'hard_float'], + 'release_arm64_no_pointer_compression_trybot': [ + 'release_trybot', 'arm64', 'v8_disable_pointer_compression'], 'release_android_arm': [ 'release_bot', 'arm', 'android', 'minimal_symbols', 'android_strip_outputs'], @@ -562,9 +571,11 @@ 'release_x64_asan_no_lsan_verify_heap_dchecks': [ 'release_bot', 'x64', 'asan', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap'], - 'release_x64_asan_symbolized_expose_memory_corruption': [ + 'release_x64_sandbox_testing': [ + 'release_bot', 'x64', 'symbolized', 'v8_enable_memory_corruption_api'], + 'release_x64_asan_sandbox_testing': [ 'release_bot', 'x64', 'asan', 'symbolized', - 'v8_expose_memory_corruption_api'], + 'v8_enable_memory_corruption_api'], 'release_x64_asan_symbolized_verify_heap': [ 'release_bot', 'x64', 'asan', 'lsan', 'symbolized', 'v8_verify_heap'], @@ -642,6 +653,9 @@ 'official_x64_pgo': [ 'release_bot', 'x64', 'official', 'disable_chrome_pgo', 'builtins_optimization'], + 'official_x64_on_release_branch': [ + 'release_bot', 'x64', 'official', 'disable_chrome_pgo', + 'v8_is_on_release_branch'], # Debug configs for x64. 'debug_x64': [ @@ -1042,8 +1056,8 @@ 'gn_args': 'v8_enable_verify_heap=false', }, - 'v8_expose_memory_corruption_api': { - 'gn_args': 'v8_expose_memory_corruption_api=true', + 'v8_enable_memory_corruption_api': { + 'gn_args': 'v8_enable_memory_corruption_api=true', }, 'v8_enable_lite_mode': { @@ -1107,6 +1121,10 @@ 'gn_args': 'v8_optimized_debug=false', }, + 'v8_is_on_release_branch': { + 'gn_args': 'v8_is_on_release_branch=true', + }, + 'v8_optimized_debug': { # This is the default in gn for debug. }, diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 674f5f2d572151..2aca712ca73f8a 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -402,6 +402,9 @@ }, 'tests': [ {'name': 'v8testing', 'shards': 5}, + {'name': 'benchmarks', 'shards': 5}, + {'name': 'mozilla', 'shards': 5}, + {'name': 'test262', 'shards': 10}, ], }, 'v8_linux64_dbg': { @@ -553,7 +556,6 @@ {'name': 'benchmarks', 'variant': 'minor_ms'}, {'name': 'mozilla', 'variant': 'minor_ms'}, {'name': 'test262', 'variant': 'minor_ms', 'shards': 2}, - {'name': 'mjsunit', 'variant': 'minor_ms'}, ], }, 'v8_linux64_msan_rel': { @@ -1506,7 +1508,6 @@ {'name': 'benchmarks', 'variant': 'minor_ms'}, {'name': 'mozilla', 'variant': 'minor_ms'}, {'name': 'test262', 'variant': 'minor_ms', 'shards': 2}, - {'name': 'mjsunit', 'variant': 'minor_ms'}, ], }, 'V8 Linux64 - disable runtime call stats': { @@ -1732,6 +1733,9 @@ }, 'tests': [ {'name': 'v8testing', 'shards': 5}, + {'name': 'benchmarks', 'shards': 5}, + {'name': 'mozilla', 'shards': 5}, + {'name': 'test262', 'shards': 10}, ], }, 'V8 Linux64 GC Stress - custom snapshot': { diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc index eba72bc0c238db..d773e9b9d1870c 100644 --- a/deps/v8/samples/process.cc +++ b/deps/v8/samples/process.cc @@ -140,13 +140,13 @@ class JsHttpRequestProcessor : public HttpRequestProcessor { static Local MakeMapTemplate(Isolate* isolate); // Callbacks that access the individual fields of request objects. - static void GetPath(Local name, + static void GetPath(Local name, const PropertyCallbackInfo& info); - static void GetReferrer(Local name, + static void GetReferrer(Local name, const PropertyCallbackInfo& info); - static void GetHost(Local name, + static void GetHost(Local name, const PropertyCallbackInfo& info); - static void GetUserAgent(Local name, + static void GetUserAgent(Local name, const PropertyCallbackInfo& info); // Callbacks that access maps @@ -507,8 +507,7 @@ HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Local obj) { return static_cast(ptr); } - -void JsHttpRequestProcessor::GetPath(Local name, +void JsHttpRequestProcessor::GetPath(Local name, const PropertyCallbackInfo& info) { // Extract the C++ request object from the JavaScript wrapper. HttpRequest* request = UnwrapRequest(info.Holder()); @@ -523,10 +522,8 @@ void JsHttpRequestProcessor::GetPath(Local name, static_cast(path.length())).ToLocalChecked()); } - void JsHttpRequestProcessor::GetReferrer( - Local name, - const PropertyCallbackInfo& info) { + Local name, const PropertyCallbackInfo& info) { HttpRequest* request = UnwrapRequest(info.Holder()); const string& path = request->Referrer(); info.GetReturnValue().Set( @@ -535,8 +532,7 @@ void JsHttpRequestProcessor::GetReferrer( static_cast(path.length())).ToLocalChecked()); } - -void JsHttpRequestProcessor::GetHost(Local name, +void JsHttpRequestProcessor::GetHost(Local name, const PropertyCallbackInfo& info) { HttpRequest* request = UnwrapRequest(info.Holder()); const string& path = request->Host(); @@ -546,10 +542,8 @@ void JsHttpRequestProcessor::GetHost(Local name, static_cast(path.length())).ToLocalChecked()); } - void JsHttpRequestProcessor::GetUserAgent( - Local name, - const PropertyCallbackInfo& info) { + Local name, const PropertyCallbackInfo& info) { HttpRequest* request = UnwrapRequest(info.Holder()); const string& path = request->UserAgent(); info.GetReturnValue().Set( @@ -558,7 +552,6 @@ void JsHttpRequestProcessor::GetUserAgent( static_cast(path.length())).ToLocalChecked()); } - Local JsHttpRequestProcessor::MakeRequestTemplate( Isolate* isolate) { EscapableHandleScope handle_scope(isolate); diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index aeedfd8bb70aee..d9c58d01236454 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -15,8 +15,9 @@ include_rules = [ "+src/compiler/turboshaft/wasm-turboshaft-compiler.h", "+src/compiler/wasm-compiler-definitions.h", "+src/compiler/wasm-compiler.h", + "-src/flags/flags-impl.h", "-src/heap", - "+src/heap/basic-memory-chunk.h", + "+src/heap/memory-chunk-metadata.h", "+src/heap/code-range.h", "+src/heap/trusted-range.h", "+src/heap/combined-heap.h", @@ -37,9 +38,9 @@ include_rules = [ "+src/heap/local-heap-inl.h", "+src/heap/pretenuring-handler-inl.h", # TODO(v8:10496): Don't expose memory chunk outside of heap/. + "+src/heap/mutable-page.h", + "+src/heap/mutable-page-inl.h", "+src/heap/memory-chunk.h", - "+src/heap/memory-chunk-inl.h", - "+src/heap/memory-chunk-header.h", "+src/heap/paged-spaces-inl.h", "+src/heap/parked-scope-inl.h", "+src/heap/parked-scope.h", diff --git a/deps/v8/src/api/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h index 563884b90ef738..de2648c8d60528 100644 --- a/deps/v8/src/api/api-arguments-inl.h +++ b/deps/v8/src/api/api-arguments-inl.h @@ -47,6 +47,19 @@ Handle CustomArguments::GetReturnValue(Isolate* isolate) const { return Handle::cast(Handle(slot.location())); } +template +template +Handle CustomArguments::GetReturnValueNoHoleCheck( + Isolate* isolate) const { + // Check the ReturnValue. + FullObjectSlot slot = slot_at(kReturnValueIndex); + // TODO(ishell): remove the hole check once it's no longer possible to set + // return value to the hole. + CHECK(!IsTheHole(*slot, isolate)); + DCHECK(IsApiCallResultType(*slot)); + return Handle::cast(Handle(slot.location())); +} + inline Tagged PropertyCallbackArguments::holder() const { return JSObject::cast(*slot_at(T::kHolderIndex)); } @@ -85,15 +98,15 @@ inline Tagged FunctionCallbackArguments::holder() const { PropertyCallbackInfo callback_info(values_); Handle FunctionCallbackArguments::Call( - Tagged handler) { + Tagged function) { Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback); v8::FunctionCallback f = - reinterpret_cast(handler->callback(isolate)); + reinterpret_cast(function->callback(isolate)); Handle receiver_check_unsupported; if (isolate->should_check_side_effects() && !isolate->debug()->PerformSideEffectCheckForCallback( - handle(handler, isolate))) { + handle(function, isolate))) { return {}; } ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); @@ -129,11 +142,21 @@ Handle PropertyCallbackArguments::CallNamedQuery( Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedQueryCallback); Handle receiver_check_unsupported; - GenericNamedPropertyQueryCallback f = - ToCData(interceptor->query()); - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor); - f(v8::Utils::ToLocal(name), callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + NamedPropertyQueryCallback f = + ToCData(interceptor->query()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor); + auto intercepted = f(v8::Utils::ToLocal(name), callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValueNoHoleCheck(isolate); + + } else { + GenericNamedPropertyQueryCallback f = + ToCData(interceptor->query()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor); + f(v8::Utils::ToLocal(name), callback_info); + return GetReturnValue(isolate); + } } Handle PropertyCallbackArguments::CallNamedGetter( @@ -141,11 +164,21 @@ Handle PropertyCallbackArguments::CallNamedGetter( DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback); - GenericNamedPropertyGetterCallback f = - ToCData(interceptor->getter()); - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); - f(v8::Utils::ToLocal(name), callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + NamedPropertyGetterCallback f = + ToCData(interceptor->getter()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + auto intercepted = f(v8::Utils::ToLocal(name), callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValueNoHoleCheck(isolate); + + } else { + GenericNamedPropertyGetterCallback f = + ToCData(interceptor->getter()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + f(v8::Utils::ToLocal(name), callback_info); + return GetReturnValue(isolate); + } } Handle PropertyCallbackArguments::CallNamedDescriptor( @@ -153,53 +186,104 @@ Handle PropertyCallbackArguments::CallNamedDescriptor( DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDescriptorCallback); - GenericNamedPropertyDescriptorCallback f = - ToCData( - interceptor->descriptor()); - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); - f(v8::Utils::ToLocal(name), callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + NamedPropertyDescriptorCallback f = + ToCData(interceptor->descriptor()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + auto intercepted = f(v8::Utils::ToLocal(name), callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValueNoHoleCheck(isolate); + + } else { + GenericNamedPropertyDescriptorCallback f = + ToCData( + interceptor->descriptor()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + f(v8::Utils::ToLocal(name), callback_info); + return GetReturnValue(isolate); + } } +// TODO(ishell): just return v8::Intercepted. Handle PropertyCallbackArguments::CallNamedSetter( Handle interceptor, Handle name, Handle value) { DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedSetterCallback); - GenericNamedPropertySetterCallback f = - ToCData(interceptor->setter()); - Handle has_side_effects; - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); - f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + NamedPropertySetterCallback f = + ToCData(interceptor->setter()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects); + auto intercepted = + f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + // Non-empty handle indicates that the request was intercepted. + return isolate->factory()->undefined_value(); + + } else { + GenericNamedPropertySetterCallback f = + ToCData(interceptor->setter()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); + f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info); + return GetReturnValue(isolate); + } } +// TODO(ishell): just return v8::Intercepted. Handle PropertyCallbackArguments::CallNamedDefiner( Handle interceptor, Handle name, const v8::PropertyDescriptor& desc) { DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDefinerCallback); - GenericNamedPropertyDefinerCallback f = - ToCData(interceptor->definer()); - Handle has_side_effects; - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); - f(v8::Utils::ToLocal(name), desc, callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + NamedPropertyDefinerCallback f = + ToCData(interceptor->definer()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects); + auto intercepted = f(v8::Utils::ToLocal(name), desc, callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + // Non-empty handle indicates that the request was intercepted. + return isolate->factory()->undefined_value(); + + } else { + GenericNamedPropertyDefinerCallback f = + ToCData(interceptor->definer()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); + f(v8::Utils::ToLocal(name), desc, callback_info); + return GetReturnValue(isolate); + } } +// TODO(ishell): return Handle Handle PropertyCallbackArguments::CallNamedDeleter( Handle interceptor, Handle name) { DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDeleterCallback); - GenericNamedPropertyDeleterCallback f = - ToCData(interceptor->deleter()); - Handle has_side_effects; - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, has_side_effects); - f(v8::Utils::ToLocal(name), callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + NamedPropertyDeleterCallback f = + ToCData(interceptor->deleter()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, + has_side_effects); + auto intercepted = f(v8::Utils::ToLocal(name), callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValue(isolate); + + } else { + GenericNamedPropertyDeleterCallback f = + ToCData(interceptor->deleter()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, + has_side_effects); + f(v8::Utils::ToLocal(name), callback_info); + return GetReturnValue(isolate); + } } // ------------------------------------------------------------------------- @@ -217,23 +301,44 @@ Handle PropertyCallbackArguments::CallIndexedQuery( DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedQueryCallback); - IndexedPropertyQueryCallback f = - ToCData(interceptor->query()); - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor); - f(index, callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + IndexedPropertyQueryCallbackV2 f = + ToCData(interceptor->query()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor); + auto intercepted = f(index, callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValueNoHoleCheck(isolate); + + } else { + IndexedPropertyQueryCallback f = + ToCData(interceptor->query()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor); + f(index, callback_info); + return GetReturnValue(isolate); + } } Handle PropertyCallbackArguments::CallIndexedGetter( Handle interceptor, uint32_t index) { DCHECK(!interceptor->is_named()); RCS_SCOPE(isolate(), RuntimeCallCounterId::kNamedGetterCallback); - IndexedPropertyGetterCallback f = - ToCData(interceptor->getter()); - Isolate* isolate = this->isolate(); - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); - f(index, callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + IndexedPropertyGetterCallbackV2 f = + ToCData(interceptor->getter()); + Isolate* isolate = this->isolate(); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + auto intercepted = f(index, callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValueNoHoleCheck(isolate); + + } else { + IndexedPropertyGetterCallback f = + ToCData(interceptor->getter()); + Isolate* isolate = this->isolate(); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + f(index, callback_info); + return GetReturnValue(isolate); + } } Handle PropertyCallbackArguments::CallIndexedDescriptor( @@ -241,58 +346,106 @@ Handle PropertyCallbackArguments::CallIndexedDescriptor( DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDescriptorCallback); - IndexedPropertyDescriptorCallback f = - ToCData(interceptor->descriptor()); - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); - f(index, callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + IndexedPropertyDescriptorCallbackV2 f = + ToCData(interceptor->descriptor()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + auto intercepted = f(index, callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValueNoHoleCheck(isolate); + + } else { + IndexedPropertyDescriptorCallback f = + ToCData(interceptor->descriptor()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor); + f(index, callback_info); + return GetReturnValue(isolate); + } } +// TODO(ishell): just return v8::Intercepted. Handle PropertyCallbackArguments::CallIndexedSetter( Handle interceptor, uint32_t index, Handle value) { DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedSetterCallback); - IndexedPropertySetterCallback f = - ToCData(interceptor->setter()); - Handle has_side_effects; - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); - f(index, v8::Utils::ToLocal(value), callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + IndexedPropertySetterCallbackV2 f = + ToCData(interceptor->setter()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects); + auto intercepted = f(index, v8::Utils::ToLocal(value), callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + // Non-empty handle indicates that the request was intercepted. + return isolate->factory()->undefined_value(); + + } else { + IndexedPropertySetterCallback f = + ToCData(interceptor->setter()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); + f(index, v8::Utils::ToLocal(value), callback_info); + return GetReturnValue(isolate); + } } +// TODO(ishell): just return v8::Intercepted. Handle PropertyCallbackArguments::CallIndexedDefiner( Handle interceptor, uint32_t index, const v8::PropertyDescriptor& desc) { DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDefinerCallback); - IndexedPropertyDefinerCallback f = - ToCData(interceptor->definer()); - Handle has_side_effects; - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); - f(index, desc, callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + IndexedPropertyDefinerCallbackV2 f = + ToCData(interceptor->definer()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects); + auto intercepted = f(index, desc, callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + // Non-empty handle indicates that the request was intercepted. + return isolate->factory()->undefined_value(); + + } else { + IndexedPropertyDefinerCallback f = + ToCData(interceptor->definer()); + Handle has_side_effects; + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, has_side_effects); + f(index, desc, callback_info); + return GetReturnValue(isolate); + } } +// TODO(ishell): return Handle Handle PropertyCallbackArguments::CallIndexedDeleter( Handle interceptor, uint32_t index) { DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDeleterCallback); - IndexedPropertyDeleterCallback f = - ToCData(interceptor->deleter()); - PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, interceptor); - f(index, callback_info); - return GetReturnValue(isolate); + if (interceptor->has_new_callbacks_signature()) { + IndexedPropertyDeleterCallbackV2 f = + ToCData(interceptor->deleter()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, interceptor); + auto intercepted = f(index, callback_info); + if (intercepted == v8::Intercepted::kNo) return {}; + return GetReturnValueNoHoleCheck(isolate); + + } else { + IndexedPropertyDeleterCallback f = + ToCData(interceptor->deleter()); + PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, interceptor); + f(index, callback_info); + return GetReturnValue(isolate); + } } Handle PropertyCallbackArguments::CallPropertyEnumerator( Handle interceptor) { - // For now there is a single enumerator for indexed and named properties. + // Named and indexed enumerator callbacks have same signatures. + static_assert(std::is_same::value); IndexedPropertyEnumeratorCallback f = v8::ToCData(interceptor->enumerator()); - // TODO(cbruni): assert same type for indexed and named callback. Isolate* isolate = this->isolate(); PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Array, interceptor); f(callback_info); diff --git a/deps/v8/src/api/api-arguments.h b/deps/v8/src/api/api-arguments.h index 18f28ce7bdf8ae..179559d8b251a0 100644 --- a/deps/v8/src/api/api-arguments.h +++ b/deps/v8/src/api/api-arguments.h @@ -42,6 +42,9 @@ class CustomArguments : public CustomArgumentsBase { template Handle GetReturnValue(Isolate* isolate) const; + template + Handle GetReturnValueNoHoleCheck(Isolate* isolate) const; + inline Isolate* isolate() const { return reinterpret_cast((*slot_at(T::kIsolateIndex)).ptr()); } @@ -209,7 +212,7 @@ class FunctionCallbackArguments * and used if it's been set to anything inside the callback. * New style callbacks always use the return value. */ - inline Handle Call(Tagged handler); + inline Handle Call(Tagged function); private: inline Tagged holder() const; diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index 0f1747a2a911ad..e3ff723b53e38d 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -282,106 +282,6 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, return obj; } -// Whether or not to cache every instance: when we materialize a getter or -// setter from an lazy AccessorPair, we rely on this cache to be able to always -// return the same getter or setter. However, objects will be cloned anyways, -// so it's not observable if we didn't cache an instance. Furthermore, a badly -// behaved embedder might create an unlimited number of objects, so we limit -// the cache for those cases. -enum class CachingMode { kLimited, kUnlimited }; - -MaybeHandle ProbeInstantiationsCache( - Isolate* isolate, Handle native_context, int serial_number, - CachingMode caching_mode) { - DCHECK_NE(serial_number, TemplateInfo::kDoNotCache); - if (serial_number == TemplateInfo::kUncached) { - return {}; - } - - if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) { - Tagged fast_cache = - native_context->fast_template_instantiations_cache(); - Handle object{fast_cache->get(serial_number), isolate}; - if (IsTheHole(*object, isolate)) return {}; - return Handle::cast(object); - } - if (caching_mode == CachingMode::kUnlimited || - (serial_number < TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { - Tagged slow_cache = - native_context->slow_template_instantiations_cache(); - InternalIndex entry = slow_cache->FindEntry(isolate, serial_number); - if (entry.is_found()) { - return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate); - } - } - return {}; -} - -void CacheTemplateInstantiation(Isolate* isolate, - Handle native_context, - Handle data, - CachingMode caching_mode, - Handle object) { - DCHECK_NE(TemplateInfo::kDoNotCache, data->serial_number()); - - int serial_number = data->serial_number(); - if (serial_number == TemplateInfo::kUncached) { - serial_number = isolate->heap()->GetNextTemplateSerialNumber(); - } - - if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) { - Handle fast_cache = - handle(native_context->fast_template_instantiations_cache(), isolate); - Handle new_cache = - FixedArray::SetAndGrow(isolate, fast_cache, serial_number, object); - if (*new_cache != *fast_cache) { - native_context->set_fast_template_instantiations_cache(*new_cache); - } - data->set_serial_number(serial_number); - } else if (caching_mode == CachingMode::kUnlimited || - (serial_number < - TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { - Handle cache = - handle(native_context->slow_template_instantiations_cache(), isolate); - auto new_cache = - SimpleNumberDictionary::Set(isolate, cache, serial_number, object); - if (*new_cache != *cache) { - native_context->set_slow_template_instantiations_cache(*new_cache); - } - data->set_serial_number(serial_number); - } else { - // we've overflowed the cache limit, no more caching - data->set_serial_number(TemplateInfo::kDoNotCache); - } -} - -void UncacheTemplateInstantiation(Isolate* isolate, - Handle native_context, - Handle data, - CachingMode caching_mode) { - int serial_number = data->serial_number(); - if (serial_number < 0) return; - - if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) { - Tagged fast_cache = - native_context->fast_template_instantiations_cache(); - DCHECK(!IsUndefined(fast_cache->get(serial_number), isolate)); - fast_cache->set(serial_number, ReadOnlyRoots{isolate}.undefined_value(), - SKIP_WRITE_BARRIER); - data->set_serial_number(TemplateInfo::kUncached); - } else if (caching_mode == CachingMode::kUnlimited || - (serial_number < - TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { - Handle cache = - handle(native_context->slow_template_instantiations_cache(), isolate); - InternalIndex entry = cache->FindEntry(isolate, serial_number); - DCHECK(entry.is_found()); - cache = SimpleNumberDictionary::DeleteEntry(isolate, cache, entry); - native_context->set_slow_template_instantiations_cache(*cache); - data->set_serial_number(TemplateInfo::kUncached); - } -} - bool IsSimpleInstantiation(Isolate* isolate, Tagged info, Tagged new_target) { DisallowGarbageCollection no_gc; @@ -412,8 +312,9 @@ MaybeHandle InstantiateObject(Isolate* isolate, // Fast path. Handle result; if (should_cache && info->is_cached()) { - if (ProbeInstantiationsCache(isolate, isolate->native_context(), - info->serial_number(), CachingMode::kLimited) + if (TemplateInfo::ProbeInstantiationsCache( + isolate, isolate->native_context(), info->serial_number(), + TemplateInfo::CachingMode::kLimited) .ToHandle(&result)) { return isolate->factory()->CopyJSObject(result); } @@ -457,8 +358,9 @@ MaybeHandle InstantiateObject(Isolate* isolate, JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject"); // Don't cache prototypes. if (should_cache) { - CacheTemplateInstantiation(isolate, isolate->native_context(), info, - CachingMode::kLimited, result); + TemplateInfo::CacheTemplateInstantiation( + isolate, isolate->native_context(), info, + TemplateInfo::CachingMode::kLimited, result); result = isolate->factory()->CopyJSObject(result); } } @@ -495,8 +397,9 @@ MaybeHandle InstantiateFunction( bool should_cache = data->should_cache(); if (should_cache && data->is_cached()) { Handle result; - if (ProbeInstantiationsCache(isolate, native_context, data->serial_number(), - CachingMode::kUnlimited) + if (TemplateInfo::ProbeInstantiationsCache( + isolate, native_context, data->serial_number(), + TemplateInfo::CachingMode::kUnlimited) .ToHandle(&result)) { return Handle::cast(result); } @@ -537,23 +440,24 @@ MaybeHandle InstantiateFunction( if (!data->needs_access_check() && IsUndefined(data->GetNamedPropertyHandler(), isolate) && IsUndefined(data->GetIndexedPropertyHandler(), isolate)) { - function_type = v8_flags.embedder_instance_types && data->HasInstanceType() - ? static_cast(data->InstanceType()) - : JS_API_OBJECT_TYPE; + function_type = v8_flags.embedder_instance_types ? data->GetInstanceType() + : JS_API_OBJECT_TYPE; + DCHECK(InstanceTypeChecker::IsJSApiObject(function_type)); } Handle function = ApiNatives::CreateApiFunction( isolate, native_context, data, prototype, function_type, maybe_name); if (should_cache) { // Cache the function. - CacheTemplateInstantiation(isolate, native_context, data, - CachingMode::kUnlimited, function); + TemplateInfo::CacheTemplateInstantiation( + isolate, native_context, data, TemplateInfo::CachingMode::kUnlimited, + function); } MaybeHandle result = ConfigureInstance(isolate, function, data); if (result.is_null()) { // Uncache on error. - UncacheTemplateInstantiation(isolate, native_context, data, - CachingMode::kUnlimited); + TemplateInfo::UncacheTemplateInstantiation( + isolate, native_context, data, TemplateInfo::CachingMode::kUnlimited); return MaybeHandle(); } data->set_published(true); diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 9ca94b045c26c9..a7070c494c211b 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -566,13 +566,23 @@ Isolate* SnapshotCreator::GetIsolate() { } void SnapshotCreator::SetDefaultContext( - Local context, SerializeInternalFieldsCallback callback) { - impl_->SetDefaultContext(Utils::OpenHandle(*context), callback); + Local context, + SerializeInternalFieldsCallback internal_fields_serializer, + SerializeContextDataCallback context_data_serializer) { + impl_->SetDefaultContext( + Utils::OpenHandle(*context), + i::SerializeEmbedderFieldsCallback(internal_fields_serializer, + context_data_serializer)); } -size_t SnapshotCreator::AddContext(Local context, - SerializeInternalFieldsCallback callback) { - return impl_->AddContext(Utils::OpenHandle(*context), callback); +size_t SnapshotCreator::AddContext( + Local context, + SerializeInternalFieldsCallback internal_fields_serializer, + SerializeContextDataCallback context_data_serializer) { + return impl_->AddContext( + Utils::OpenHandle(*context), + i::SerializeEmbedderFieldsCallback(internal_fields_serializer, + context_data_serializer)); } size_t SnapshotCreator::AddData(i::Address object) { @@ -768,33 +778,36 @@ void HandleHelper::VerifyOnMainThread() { #if V8_STATIC_ROOTS_BOOL -// Initialize static root constants exposed in v8-internal.h. +// Check static root constants exposed in v8-internal.h. namespace { constexpr InstanceTypeChecker::TaggedAddressRange kStringMapRange = *InstanceTypeChecker::UniqueMapRangeOfInstanceTypeRange(FIRST_STRING_TYPE, LAST_STRING_TYPE); -constexpr Tagged_t kFirstStringMapPtr = kStringMapRange.first; -constexpr Tagged_t kLastStringMapPtr = kStringMapRange.second; } // namespace -#define EXPORTED_STATIC_ROOTS_MAPPING(V) \ +#define EXPORTED_STATIC_ROOTS_PTR_MAPPING(V) \ V(UndefinedValue, i::StaticReadOnlyRoot::kUndefinedValue) \ V(NullValue, i::StaticReadOnlyRoot::kNullValue) \ V(TrueValue, i::StaticReadOnlyRoot::kTrueValue) \ V(FalseValue, i::StaticReadOnlyRoot::kFalseValue) \ V(EmptyString, i::StaticReadOnlyRoot::kempty_string) \ V(TheHoleValue, i::StaticReadOnlyRoot::kTheHoleValue) \ - V(FirstStringMap, kFirstStringMapPtr) \ - V(LastStringMap, kLastStringMapPtr) + V(FirstStringMap, kStringMapRange.first) \ + V(LastStringMap, kStringMapRange.second) static_assert(std::is_same::value); -#define DEF_STATIC_ROOT(name, internal_value) \ - const Internals::Tagged_t Internals::StaticReadOnlyRoot::k##name = \ - internal_value; -EXPORTED_STATIC_ROOTS_MAPPING(DEF_STATIC_ROOT) -#undef DEF_STATIC_ROOT -#undef EXPORTED_STATIC_ROOTS_MAPPING +// Ensure they have the correct value. +#define CHECK_STATIC_ROOT(name, value) \ + static_assert(Internals::StaticReadOnlyRoot::k##name == value); +EXPORTED_STATIC_ROOTS_PTR_MAPPING(CHECK_STATIC_ROOT) +#undef CHECK_STATIC_ROOT +#define PLUS_ONE(...) +1 +static constexpr int kNumberOfCheckedStaticRoots = + 0 EXPORTED_STATIC_ROOTS_PTR_MAPPING(PLUS_ONE); +#undef EXPORTED_STATIC_ROOTS_PTR_MAPPING +static_assert(Internals::StaticReadOnlyRoot::kNumberOfExportedStaticRoots == + kNumberOfCheckedStaticRoots); #endif // V8_STATIC_ROOTS_BOOL @@ -1130,15 +1143,6 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) { // --- T e m p l a t e --- -static void InitializeTemplate(i::Tagged that, int type, - bool do_not_cache) { - that->set_number_of_properties(0); - that->set_tag(type); - int serial_number = - do_not_cache ? i::TemplateInfo::kDoNotCache : i::TemplateInfo::kUncached; - that->set_serial_number(serial_number); -} - void Template::Set(v8::Local name, v8::Local value, v8::PropertyAttribute attribute) { auto templ = Utils::OpenHandle(this); @@ -1172,58 +1176,32 @@ void Template::SetAccessorProperty(v8::Local name, v8::Local getter, v8::Local setter, v8::PropertyAttribute attribute) { - Utils::ApiCheck( - getter.IsEmpty() || - !IsUndefined( - Utils::OpenDirectHandle(*getter)->call_code(kAcquireLoad)), - "v8::Template::SetAccessorProperty", "Getter must have a call handler"); - Utils::ApiCheck( - setter.IsEmpty() || - !IsUndefined( - Utils::OpenDirectHandle(*setter)->call_code(kAcquireLoad)), - "v8::Template::SetAccessorProperty", "Setter must have a call handler"); - auto templ = Utils::OpenHandle(this); auto i_isolate = templ->GetIsolateChecked(); + i::Handle i_getter; + if (!getter.IsEmpty()) { + i_getter = Utils::OpenHandle(*getter); + Utils::ApiCheck(i_getter->has_callback(i_isolate), + "v8::Template::SetAccessorProperty", + "Getter must have a call handler"); + } + i::Handle i_setter; + if (!setter.IsEmpty()) { + i_setter = Utils::OpenHandle(*setter); + Utils::ApiCheck(i_setter->has_callback(i_isolate), + "v8::Template::SetAccessorProperty", + "Setter must have a call handler"); + } ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); DCHECK(!name.IsEmpty()); DCHECK(!getter.IsEmpty() || !setter.IsEmpty()); i::HandleScope scope(i_isolate); i::ApiNatives::AddAccessorProperty( - i_isolate, templ, Utils::OpenHandle(*name), - Utils::OpenHandle(*getter, true), Utils::OpenHandle(*setter, true), + i_isolate, templ, Utils::OpenHandle(*name), i_getter, i_setter, static_cast(attribute)); } // --- F u n c t i o n T e m p l a t e --- -static void InitializeFunctionTemplate(i::Tagged info, - bool do_not_cache) { - InitializeTemplate(info, Consts::FUNCTION_TEMPLATE, do_not_cache); - info->set_flag(0, kRelaxedStore); -} - -namespace { -Local ObjectTemplateNew(i::Isolate* i_isolate, - v8::Local constructor, - bool do_not_cache) { - API_RCS_SCOPE(i_isolate, ObjectTemplate, New); - ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle struct_obj = i_isolate->factory()->NewStruct( - i::OBJECT_TEMPLATE_INFO_TYPE, i::AllocationType::kOld); - auto obj = i::Handle::cast(struct_obj); - { - // Disallow GC until all fields of obj have acceptable types. - i::DisallowGarbageCollection no_gc; - i::Tagged raw = *obj; - InitializeTemplate(raw, Consts::OBJECT_TEMPLATE, do_not_cache); - raw->set_data(0); - if (!constructor.IsEmpty()) { - raw->set_constructor(*Utils::OpenDirectHandle(*constructor)); - } - } - return Utils::ToLocal(obj); -} -} // namespace Local FunctionTemplate::PrototypeTemplate() { auto self = Utils::OpenHandle(this); @@ -1233,11 +1211,13 @@ Local FunctionTemplate::PrototypeTemplate() { i_isolate); if (i::IsUndefined(*heap_obj, i_isolate)) { // Do not cache prototype objects. - Local result = - ObjectTemplateNew(i_isolate, Local(), true); + constexpr bool do_not_cache = true; + i::Handle proto_template = + i_isolate->factory()->NewObjectTemplateInfo( + i::Handle(), do_not_cache); i::FunctionTemplateInfo::SetPrototypeTemplate(i_isolate, self, - Utils::OpenHandle(*result)); - return result; + proto_template); + return Utils::ToLocal(proto_template); } return ToApiHandle(heap_obj, i_isolate); } @@ -1267,48 +1247,35 @@ static void EnsureNotPublished(i::DirectHandle info, "FunctionTemplate already instantiated"); } -Local FunctionTemplateNew( +i::Handle FunctionTemplateNew( i::Isolate* i_isolate, FunctionCallback callback, v8::Local data, v8::Local signature, int length, ConstructorBehavior behavior, bool do_not_cache, v8::Local cached_property_name = v8::Local(), SideEffectType side_effect_type = SideEffectType::kHasSideEffect, - const MemorySpan& c_function_overloads = {}, - uint8_t instance_type = 0, - uint8_t allowed_receiver_instance_type_range_start = 0, - uint8_t allowed_receiver_instance_type_range_end = 0) { - i::Handle struct_obj = i_isolate->factory()->NewStruct( - i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld); - auto obj = i::Handle::cast(struct_obj); + const MemorySpan& c_function_overloads = {}) { + i::Handle obj = + i_isolate->factory()->NewFunctionTemplateInfo(length, do_not_cache); { // Disallow GC until all fields of obj have acceptable types. i::DisallowGarbageCollection no_gc; i::Tagged raw = *obj; - InitializeFunctionTemplate(raw, do_not_cache); - raw->set_length(length); - raw->set_undetectable(false); - raw->set_needs_access_check(false); - raw->set_accept_any_receiver(true); if (!signature.IsEmpty()) { raw->set_signature(*Utils::OpenDirectHandle(*signature)); } - raw->set_cached_property_name( - cached_property_name.IsEmpty() - ? i::ReadOnlyRoots(i_isolate).the_hole_value() - : *Utils::OpenDirectHandle(*cached_property_name)); - if (behavior == ConstructorBehavior::kThrow) + if (!cached_property_name.IsEmpty()) { + raw->set_cached_property_name( + *Utils::OpenDirectHandle(*cached_property_name)); + } + if (behavior == ConstructorBehavior::kThrow) { raw->set_remove_prototype(true); - raw->SetInstanceType(instance_type); - raw->set_allowed_receiver_instance_type_range_start( - allowed_receiver_instance_type_range_start); - raw->set_allowed_receiver_instance_type_range_end( - allowed_receiver_instance_type_range_end); + } } if (callback != nullptr) { Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type, c_function_overloads); } - return Utils::ToLocal(obj); + return obj; } } // namespace @@ -1342,24 +1309,43 @@ Local FunctionTemplate::New( return Local(); } - if (instance_type != 0) { + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + i::Handle templ = FunctionTemplateNew( + i_isolate, callback, data, signature, length, behavior, false, + Local(), side_effect_type, + c_function ? MemorySpan{c_function, 1} + : MemorySpan{}); + + if (instance_type) { if (!Utils::ApiCheck( - instance_type >= i::Internals::kFirstJSApiObjectType && - instance_type <= i::Internals::kLastJSApiObjectType, + base::IsInRange(static_cast(instance_type), + i::Internals::kFirstEmbedderJSApiObjectType, + i::Internals::kLastEmbedderJSApiObjectType), "FunctionTemplate::New", "instance_type is outside the range of valid JSApiObject types")) { return Local(); } + templ->SetInstanceType(instance_type); + } + + if (allowed_receiver_instance_type_range_start || + allowed_receiver_instance_type_range_end) { + if (!Utils::ApiCheck(i::Internals::kFirstEmbedderJSApiObjectType <= + allowed_receiver_instance_type_range_start && + allowed_receiver_instance_type_range_start <= + allowed_receiver_instance_type_range_end && + allowed_receiver_instance_type_range_end <= + i::Internals::kLastEmbedderJSApiObjectType, + "FunctionTemplate::New", + "allowed receiver instance type range is outside the " + "range of valid JSApiObject types")) { + return Local(); + } + templ->SetAllowedReceiverInstanceTypeRange( + allowed_receiver_instance_type_range_start, + allowed_receiver_instance_type_range_end); } - - ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - return FunctionTemplateNew( - i_isolate, callback, data, signature, length, behavior, false, - Local(), side_effect_type, - c_function ? MemorySpan{c_function, 1} - : MemorySpan{}, - instance_type, allowed_receiver_instance_type_range_start, - allowed_receiver_instance_type_range_end); + return Utils::ToLocal(templ); } Local FunctionTemplate::NewWithCFunctionOverloads( @@ -1379,9 +1365,10 @@ Local FunctionTemplate::NewWithCFunctionOverloads( } ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - return FunctionTemplateNew(i_isolate, callback, data, signature, length, - behavior, false, Local(), - side_effect_type, c_function_overloads); + i::Handle templ = FunctionTemplateNew( + i_isolate, callback, data, signature, length, behavior, false, + Local(), side_effect_type, c_function_overloads); + return Utils::ToLocal(templ); } Local FunctionTemplate::NewWithCache( @@ -1391,9 +1378,10 @@ Local FunctionTemplate::NewWithCache( i::Isolate* i_isolate = reinterpret_cast(v8_isolate); API_RCS_SCOPE(i_isolate, FunctionTemplate, NewWithCache); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - return FunctionTemplateNew(i_isolate, callback, data, signature, length, - ConstructorBehavior::kAllow, false, cache_property, - side_effect_type); + i::Handle templ = FunctionTemplateNew( + i_isolate, callback, data, signature, length, ConstructorBehavior::kAllow, + false, cache_property, side_effect_type); + return Utils::ToLocal(templ); } Local Signature::New(Isolate* v8_isolate, @@ -1416,14 +1404,15 @@ void FunctionTemplate::SetCallHandler( i::Isolate* i_isolate = info->GetIsolateChecked(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i::HandleScope scope(i_isolate); - i::Handle obj = i_isolate->factory()->NewCallHandlerInfo( - side_effect_type == SideEffectType::kHasNoSideEffect); - obj->set_owner_template(*info); - obj->set_callback(i_isolate, reinterpret_cast(callback)); + info->set_has_side_effects(side_effect_type != + SideEffectType::kHasNoSideEffect); + info->set_callback(i_isolate, reinterpret_cast(callback)); if (data.IsEmpty()) { data = v8::Undefined(reinterpret_cast(i_isolate)); } - obj->set_data(*Utils::OpenDirectHandle(*data)); + // "Release" callback and callback data fields. + info->set_callback_data(*Utils::OpenDirectHandle(*data), kReleaseStore); + if (!c_function_overloads.empty()) { // Stores the data for a sequence of CFunction overloads into a single // FixedArray, as [address_0, signature_0, ... address_n-1, signature_n-1]. @@ -1447,7 +1436,6 @@ void FunctionTemplate::SetCallHandler( i::FunctionTemplateInfo::SetCFunctionOverloads(i_isolate, info, function_overloads); } - info->set_call_code(*obj, kReleaseStore); } namespace { @@ -1490,24 +1478,25 @@ i::Handle MakeAccessorInfo(i::Isolate* i_isolate, } // namespace Local FunctionTemplate::InstanceTemplate() { - auto handle = Utils::OpenHandle(this, true); - if (!Utils::ApiCheck(!handle.is_null(), + auto constructor = Utils::OpenHandle(this, true); + if (!Utils::ApiCheck(!constructor.is_null(), "v8::FunctionTemplate::InstanceTemplate()", "Reading from empty handle")) { return Local(); } - i::Isolate* i_isolate = handle->GetIsolateChecked(); + i::Isolate* i_isolate = constructor->GetIsolateChecked(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - if (i::IsUndefined(handle->GetInstanceTemplate(), i_isolate)) { - Local templ = - ObjectTemplate::New(i_isolate, ToApiHandle(handle)); - i::FunctionTemplateInfo::SetInstanceTemplate(i_isolate, handle, - Utils::OpenHandle(*templ)); - } - return Utils::ToLocal(i::direct_handle(i::ObjectTemplateInfo::cast( - handle->GetInstanceTemplate()), - i_isolate), - i_isolate); + auto maybe_templ = constructor->GetInstanceTemplate(); + if (!i::IsUndefined(maybe_templ, i_isolate)) { + return Utils::ToLocal( + i::direct_handle(i::ObjectTemplateInfo::cast(maybe_templ), i_isolate), + i_isolate); + } + constexpr bool do_not_cache = false; + i::Handle templ = + i_isolate->factory()->NewObjectTemplateInfo(constructor, do_not_cache); + i::FunctionTemplateInfo::SetInstanceTemplate(i_isolate, constructor, templ); + return Utils::ToLocal(templ); } void FunctionTemplate::SetLength(int length) { @@ -1554,12 +1543,14 @@ void FunctionTemplate::RemovePrototype() { Local ObjectTemplate::New( Isolate* v8_isolate, v8::Local constructor) { - return New(reinterpret_cast(v8_isolate), constructor); -} - -Local ObjectTemplate::New( - i::Isolate* i_isolate, v8::Local constructor) { - return ObjectTemplateNew(i_isolate, constructor, false); + auto i_isolate = reinterpret_cast(v8_isolate); + API_RCS_SCOPE(i_isolate, ObjectTemplate, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + constexpr bool do_not_cache = false; + i::Handle obj = + i_isolate->factory()->NewObjectTemplateInfo( + Utils::OpenDirectHandle(*constructor, true), do_not_cache); + return Utils::ToLocal(obj); } namespace { @@ -1708,7 +1699,8 @@ template CreateInterceptorInfo( i::Isolate* i_isolate, Getter getter, Setter setter, Query query, Descriptor descriptor, Deleter remover, Enumerator enumerator, - Definer definer, Local data, PropertyHandlerFlags flags) { + Definer definer, Local data, + base::Flags flags) { auto obj = i::Handle::cast(i_isolate->factory()->NewStruct( i::INTERCEPTOR_INFO_TYPE, i::AllocationType::kOld)); @@ -1730,13 +1722,12 @@ i::Handle CreateInterceptorInfo( SET_FIELD_WRAPPED(i_isolate, obj, set_definer, definer); } obj->set_can_intercept_symbols( - !(static_cast(flags) & - static_cast(PropertyHandlerFlags::kOnlyInterceptStrings))); - obj->set_non_masking(static_cast(flags) & - static_cast(PropertyHandlerFlags::kNonMasking)); - obj->set_has_no_side_effect( - static_cast(flags) & - static_cast(PropertyHandlerFlags::kHasNoSideEffect)); + !(flags & PropertyHandlerFlags::kOnlyInterceptStrings)); + obj->set_non_masking(flags & PropertyHandlerFlags::kNonMasking); + obj->set_has_no_side_effect(flags & PropertyHandlerFlags::kHasNoSideEffect); + + obj->set_has_new_callbacks_signature( + flags & PropertyHandlerFlags::kInternalNewCallbacksSignatures); if (data.IsEmpty()) { data = v8::Undefined(reinterpret_cast(i_isolate)); @@ -1750,7 +1741,8 @@ template CreateNamedInterceptorInfo( i::Isolate* i_isolate, Getter getter, Setter setter, Query query, Descriptor descriptor, Deleter remover, Enumerator enumerator, - Definer definer, Local data, PropertyHandlerFlags flags) { + Definer definer, Local data, + base::Flags flags) { auto interceptor = CreateInterceptorInfo(i_isolate, getter, setter, query, descriptor, remover, enumerator, definer, data, flags); @@ -1763,7 +1755,8 @@ template CreateIndexedInterceptorInfo( i::Isolate* i_isolate, Getter getter, Setter setter, Query query, Descriptor descriptor, Deleter remover, Enumerator enumerator, - Definer definer, Local data, PropertyHandlerFlags flags) { + Definer definer, Local data, + base::Flags flags) { auto interceptor = CreateInterceptorInfo(i_isolate, getter, setter, query, descriptor, remover, enumerator, definer, data, flags); @@ -1891,15 +1884,17 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback, i::HandleScope scope(i_isolate); auto cons = EnsureConstructor(i_isolate, this); EnsureNotPublished(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler"); - i::Handle obj = - i_isolate->factory()->NewCallHandlerInfo(); - obj->set_owner_template(*Utils::OpenDirectHandle(this)); - obj->set_callback(i_isolate, reinterpret_cast(callback)); - if (data.IsEmpty()) { - data = v8::Undefined(reinterpret_cast(i_isolate)); - } - obj->set_data(*Utils::OpenDirectHandle(*data)); - i::FunctionTemplateInfo::SetInstanceCallHandler(i_isolate, cons, obj); + DCHECK_NOT_NULL(callback); + + // This template is just a container for callback and data values and thus + // it's not supposed to be instantiated. Don't cache it. + constexpr bool do_not_cache = true; + constexpr int length = 0; + i::Handle templ = + i_isolate->factory()->NewFunctionTemplateInfo(length, do_not_cache); + templ->set_is_object_template_call_handler(true); + Utils::ToLocal(templ)->SetCallHandler(callback, data); + i::FunctionTemplateInfo::SetInstanceCallHandler(i_isolate, cons, templ); } int ObjectTemplate::InternalFieldCount() const { @@ -2231,10 +2226,6 @@ std::vector Script::GetProducedCompileHints() const { CHECK(IsSmi(item)); result.push_back(i::Smi::ToInt(item)); } - // Clear the data; the embedder can still request more data later, but it'll - // have to keep track of the original data itself. - script->set_compiled_lazy_function_positions( - i::ReadOnlyRoots(i_isolate).undefined_value()); } return result; } @@ -2472,16 +2463,6 @@ MaybeLocal Module::Evaluate(Local context) { RETURN_ESCAPED(result); } -Local Module::CreateSyntheticModule( - Isolate* v8_isolate, Local module_name, - const std::vector>& export_names, - v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) { - return CreateSyntheticModule( - v8_isolate, module_name, - MemorySpan>(export_names.begin(), export_names.end()), - evaluation_steps); -} - Local Module::CreateSyntheticModule( Isolate* v8_isolate, Local module_name, const MemorySpan>& export_names, @@ -2522,33 +2503,6 @@ Maybe Module::SetSyntheticModuleExport(Isolate* v8_isolate, return Just(true); } -std::vector, Local>> -Module::GetStalledTopLevelAwaitMessage(Isolate* isolate) { - auto i_isolate = reinterpret_cast(isolate); - auto self = Utils::OpenDirectHandle(this); - Utils::ApiCheck(i::IsSourceTextModule(*self), - "v8::Module::GetStalledTopLevelAwaitMessage", - "v8::Module::GetStalledTopLevelAwaitMessage must only be " - "called on a SourceTextModule"); - std::vector< - std::tuple, i::Handle>> - stalled_awaits = i::DirectHandle::cast(self) - ->GetStalledTopLevelAwaitMessages(i_isolate); - - std::vector, Local>> result; - size_t stalled_awaits_count = stalled_awaits.size(); - if (stalled_awaits_count == 0) { - return result; - } - result.reserve(stalled_awaits_count); - for (size_t i = 0; i < stalled_awaits_count; ++i) { - auto [module, message] = stalled_awaits[i]; - result.push_back(std::make_tuple(ToApiHandle(module), - ToApiHandle(message))); - } - return result; -} - std::pair, LocalVector> Module::GetStalledTopLevelAwaitMessages(Isolate* isolate) { auto i_isolate = reinterpret_cast(isolate); @@ -3801,10 +3755,17 @@ bool Value::IsTypedArray() const { i::JSTypedArray::cast(obj)->type() == i::kExternal##Type##Array; \ } -TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY) - +TYPED_ARRAYS_BASE(VALUE_IS_TYPED_ARRAY) #undef VALUE_IS_TYPED_ARRAY +bool Value::IsFloat16Array() const { + Utils::ApiCheck(i::v8_flags.js_float16array, "Value::IsFloat16Array", + "Float16Array is not supported"); + auto obj = *Utils::OpenDirectHandle(this); + return i::IsJSTypedArray(obj) && + i::JSTypedArray::cast(obj)->type() == i::kExternalFloat16Array; +} + bool Value::IsDataView() const { auto obj = *Utils::OpenDirectHandle(this); return IsJSDataView(obj) || IsJSRabGsabDataView(obj); @@ -4331,10 +4292,19 @@ void v8::TypedArray::CheckCast(Value* that) { "v8::" #Type "Array::Cast()", "Value is not a " #Type "Array"); \ } -TYPED_ARRAYS(CHECK_TYPED_ARRAY_CAST) - +TYPED_ARRAYS_BASE(CHECK_TYPED_ARRAY_CAST) #undef CHECK_TYPED_ARRAY_CAST +void v8::Float16Array::CheckCast(Value* that) { + Utils::ApiCheck(i::v8_flags.js_float16array, "v8::Float16Array::Cast", + "Float16Array is not supported"); + auto obj = *Utils::OpenHandle(that); + Utils::ApiCheck( + i::IsJSTypedArray(obj) && + i::JSTypedArray::cast(obj)->type() == i::kExternalFloat16Array, + "v8::Float16Array::Cast()", "Value is not a Float16Array"); +} + void v8::DataView::CheckCast(Value* that) { auto obj = *Utils::OpenDirectHandle(that); Utils::ApiCheck(i::IsJSDataView(obj) || IsJSRabGsabDataView(obj), @@ -4861,6 +4831,7 @@ Local v8::Object::FindInstanceInPrototypeChain( i::PrototypeIterator iter(i_isolate, *self, i::kStartAtReceiver); i::Tagged tmpl_info = *Utils::OpenDirectHandle(*tmpl); + if (!IsJSObject(iter.GetCurrent())) return Local(); while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) { iter.Advance(); if (iter.IsAtEnd()) return Local(); @@ -5451,7 +5422,7 @@ MaybeLocal Function::New(Local context, auto templ = FunctionTemplateNew(i_isolate, callback, data, Local(), length, behavior, true, Local(), side_effect_type); - return templ->GetFunction(context); + return Utils::ToLocal(templ)->GetFunction(context); } MaybeLocal Function::NewInstance(Local context, int argc, @@ -5477,15 +5448,12 @@ MaybeLocal Function::NewInstanceWithSideEffectType( if (should_set_has_no_side_effect) { CHECK(IsJSFunction(*self) && i::JSFunction::cast(*self)->shared()->IsApiFunction()); - i::Tagged obj = - i::JSFunction::cast(*self)->shared()->api_func_data()->call_code( - kAcquireLoad); - if (i::IsCallHandlerInfo(obj)) { - i::Tagged handler_info = - i::CallHandlerInfo::cast(obj); - if (handler_info->IsSideEffectCallHandlerInfo()) { + i::Tagged func_data = + i::JSFunction::cast(*self)->shared()->api_func_data(); + if (func_data->has_callback(i_isolate)) { + if (func_data->has_side_effects()) { i_isolate->debug()->IgnoreSideEffectsOnNextCallTo( - handle(handler_info, i_isolate)); + handle(func_data, i_isolate)); } } } @@ -6593,7 +6561,7 @@ struct InvokeBootstrapper { i::MaybeHandle maybe_global_proxy, v8::Local global_proxy_template, v8::ExtensionConfiguration* extensions, size_t context_snapshot_index, - v8::DeserializeInternalFieldsCallback embedder_fields_deserializer, + i::DeserializeEmbedderFieldsCallback embedder_fields_deserializer, v8::MicrotaskQueue* microtask_queue) { return i_isolate->bootstrapper()->CreateEnvironment( maybe_global_proxy, global_proxy_template, extensions, @@ -6608,7 +6576,7 @@ struct InvokeBootstrapper { i::MaybeHandle maybe_global_proxy, v8::Local global_proxy_template, v8::ExtensionConfiguration* extensions, size_t context_snapshot_index, - v8::DeserializeInternalFieldsCallback embedder_fields_deserializer, + i::DeserializeEmbedderFieldsCallback embedder_fields_deserializer, v8::MicrotaskQueue* microtask_queue) { USE(extensions); USE(context_snapshot_index); @@ -6622,7 +6590,7 @@ static i::Handle CreateEnvironment( i::Isolate* i_isolate, v8::ExtensionConfiguration* extensions, v8::MaybeLocal maybe_global_template, v8::MaybeLocal maybe_global_proxy, size_t context_snapshot_index, - v8::DeserializeInternalFieldsCallback embedder_fields_deserializer, + i::DeserializeEmbedderFieldsCallback embedder_fields_deserializer, v8::MicrotaskQueue* microtask_queue) { i::Handle result; @@ -6727,7 +6695,7 @@ Local NewContext( v8::Isolate* external_isolate, v8::ExtensionConfiguration* extensions, v8::MaybeLocal global_template, v8::MaybeLocal global_object, size_t context_snapshot_index, - v8::DeserializeInternalFieldsCallback embedder_fields_deserializer, + i::DeserializeEmbedderFieldsCallback embedder_fields_deserializer, v8::MicrotaskQueue* microtask_queue) { i::Isolate* i_isolate = reinterpret_cast(external_isolate); // TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't @@ -6751,27 +6719,34 @@ Local v8::Context::New( v8::Isolate* external_isolate, v8::ExtensionConfiguration* extensions, v8::MaybeLocal global_template, v8::MaybeLocal global_object, - DeserializeInternalFieldsCallback internal_fields_deserializer, - v8::MicrotaskQueue* microtask_queue) { - return NewContext(external_isolate, extensions, global_template, - global_object, 0, internal_fields_deserializer, - microtask_queue); + v8::DeserializeInternalFieldsCallback internal_fields_deserializer, + v8::MicrotaskQueue* microtask_queue, + v8::DeserializeContextDataCallback context_callback_deserializer) { + return NewContext( + external_isolate, extensions, global_template, global_object, 0, + i::DeserializeEmbedderFieldsCallback(internal_fields_deserializer, + context_callback_deserializer), + microtask_queue); } MaybeLocal v8::Context::FromSnapshot( v8::Isolate* external_isolate, size_t context_snapshot_index, - v8::DeserializeInternalFieldsCallback embedder_fields_deserializer, + v8::DeserializeInternalFieldsCallback internal_fields_deserializer, v8::ExtensionConfiguration* extensions, MaybeLocal global_object, - v8::MicrotaskQueue* microtask_queue) { + v8::MicrotaskQueue* microtask_queue, + v8::DeserializeContextDataCallback context_callback_deserializer) { size_t index_including_default_context = context_snapshot_index + 1; if (!i::Snapshot::HasContextSnapshot( reinterpret_cast(external_isolate), index_including_default_context)) { return MaybeLocal(); } - return NewContext(external_isolate, extensions, MaybeLocal(), - global_object, index_including_default_context, - embedder_fields_deserializer, microtask_queue); + return NewContext( + external_isolate, extensions, MaybeLocal(), global_object, + index_including_default_context, + i::DeserializeEmbedderFieldsCallback(internal_fields_deserializer, + context_callback_deserializer), + microtask_queue); } MaybeLocal v8::Context::NewRemoteContext( @@ -6794,7 +6769,7 @@ MaybeLocal v8::Context::NewRemoteContext( "Global template needs to have access check handlers"); i::Handle global_proxy = CreateEnvironment( i_isolate, nullptr, global_template, global_object, 0, - DeserializeInternalFieldsCallback(), nullptr); + i::DeserializeEmbedderFieldsCallback(), nullptr); if (global_proxy.is_null()) { if (i_isolate->has_exception()) i_isolate->clear_exception(); return MaybeLocal(); @@ -6850,6 +6825,7 @@ bool IsJSReceiverSafeToFreeze(i::InstanceType obj_type) { /* Function types */ case i::BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE: case i::BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE: + case i::FLOAT16_TYPED_ARRAY_CONSTRUCTOR_TYPE: case i::FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE: case i::FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE: case i::INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE: @@ -7219,32 +7195,6 @@ void Context::SetAbortScriptExecution( } } -Local Context::GetContinuationPreservedEmbedderData() const { - auto context = Utils::OpenDirectHandle(this); - i::Isolate* i_isolate = context->GetIsolate(); - -#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA - return ToApiHandle( - i::direct_handle( - context->native_context()->continuation_preserved_embedder_data(), - i_isolate), - i_isolate); -#else // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA - return v8::Undefined(reinterpret_cast(i_isolate)); -#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA -} - -void Context::SetContinuationPreservedEmbedderData(Local data) { -#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA - auto context = Utils::OpenDirectHandle(this); - i::Isolate* i_isolate = context->GetIsolate(); - if (data.IsEmpty()) - data = v8::Undefined(reinterpret_cast(i_isolate)); - context->native_context()->set_continuation_preserved_embedder_data( - i::HeapObject::cast(*Utils::OpenDirectHandle(*data))); -#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA -} - void v8::Context::SetPromiseHooks(Local init_hook, Local before_hook, Local after_hook, @@ -9161,9 +9111,48 @@ static_assert(v8::TypedArray::kMaxByteLength == i::JSTypedArray::kMaxByteLength, return Utils::ToLocal##Type##Array(obj); \ } -TYPED_ARRAYS(TYPED_ARRAY_NEW) +TYPED_ARRAYS_BASE(TYPED_ARRAY_NEW) #undef TYPED_ARRAY_NEW +Local Float16Array::New(Local array_buffer, + size_t byte_offset, size_t length) { + Utils::ApiCheck(i::v8_flags.js_float16array, "v8::Float16Array::New", + "Float16Array is not supported"); + i::Isolate* i_isolate = Utils::OpenDirectHandle(*array_buffer)->GetIsolate(); + API_RCS_SCOPE(i_isolate, Float16Array, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + if (!Utils::ApiCheck( + length <= kMaxLength, + "v8::Float16Array::New(Local, size_t, size_t)", + "length exceeds max allowed value")) { + return Local(); + } + auto buffer = Utils::OpenHandle(*array_buffer); + i::Handle obj = i_isolate->factory()->NewJSTypedArray( + i::kExternalFloat16Array, buffer, byte_offset, length); + return Utils::ToLocalFloat16Array(obj); +} +Local Float16Array::New( + Local shared_array_buffer, size_t byte_offset, + size_t length) { + Utils::ApiCheck(i::v8_flags.js_float16array, "v8::Float16Array::New", + "Float16Array is not supported"); + i::Isolate* i_isolate = + Utils::OpenDirectHandle(*shared_array_buffer)->GetIsolate(); + API_RCS_SCOPE(i_isolate, Float16Array, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + if (!Utils::ApiCheck( + length <= kMaxLength, + "v8::Float16Array::New(Local, size_t, size_t)", + "length exceeds max allowed value")) { + return Local(); + } + auto buffer = Utils::OpenHandle(*shared_array_buffer); + i::Handle obj = i_isolate->factory()->NewJSTypedArray( + i::kExternalFloat16Array, buffer, byte_offset, length); + return Utils::ToLocalFloat16Array(obj); +} + // TODO(v8:11111): Support creating length tracking DataViews via the API. Local DataView::New(Local array_buffer, size_t byte_offset, size_t byte_length) { @@ -9619,7 +9608,7 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type, base::Optional stack_scope; if (type == kFullGarbageCollection) { stack_scope.emplace(reinterpret_cast(this)->heap(), - i::EmbedderStackStateScope::kExplicitInvocation, + i::EmbedderStackStateOrigin::kExplicitInvocation, stack_state); } RequestGarbageCollectionForTesting(type); @@ -11678,10 +11667,8 @@ inline void InvokeFunctionCallback( ApiCallbackExitFrame* frame = ApiCallbackExitFrame::cast(it.frame()); Tagged fti = FunctionTemplateInfo::cast(frame->target()); - Tagged call_handler_info = - CallHandlerInfo::cast(fti->call_code(kAcquireLoad)); if (!i_isolate->debug()->PerformSideEffectCheckForCallback( - handle(call_handler_info, i_isolate))) { + handle(fti, i_isolate))) { // Failed side effect check. return; } diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index bbc42ef09680bd..40b419db777f96 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -48,15 +48,6 @@ class Script; class EphemeronTable; } // namespace debug -// Constants used in the implementation of the API. The most natural thing -// would usually be to place these with the classes that use them, but -// we want to keep them out of v8.h because it is an externally -// visible file. -class Consts { - public: - enum TemplateType { FUNCTION_TEMPLATE = 0, OBJECT_TEMPLATE = 1 }; -}; - template inline T ToCData(v8::internal::Tagged obj); @@ -160,6 +151,7 @@ class RegisteredExtension { V(Int16Array, JSTypedArray) \ V(Uint32Array, JSTypedArray) \ V(Int32Array, JSTypedArray) \ + V(Float16Array, JSTypedArray) \ V(Float32Array, JSTypedArray) \ V(Float64Array, JSTypedArray) \ V(DataView, JSDataViewOrRabGsabDataView) \ diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index c4d9645fa6c462..c5ff60d671f19d 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -369,8 +369,8 @@ void AsmJsParser::ValidateModule() { uint32_t import_index = module_builder_->AddGlobalImport( global_import.import_name, global_import.value_type, false /* mutability */); - start->EmitWithI32V(kExprGlobalGet, import_index); - start->EmitWithI32V(kExprGlobalSet, VarIndex(global_import.var_info)); + start->EmitWithU32V(kExprGlobalGet, import_index); + start->EmitWithU32V(kExprGlobalSet, VarIndex(global_import.var_info)); } start->Emit(kExprEnd); FunctionSig::Builder b(zone(), 0, 0); @@ -961,7 +961,7 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count, } else { FAIL("Bad local variable definition"); } - current_function_builder_->EmitWithI32V(kExprGlobalGet, + current_function_builder_->EmitWithU32V(kExprGlobalGet, VarIndex(sinfo)); current_function_builder_->EmitSetLocal(info->index); } else if (sinfo->type->IsA(stdlib_fround_)) { @@ -1275,8 +1275,7 @@ void AsmJsParser::BreakStatement() { if (depth < 0) { FAIL("Illegal break"); } - current_function_builder_->Emit(kExprBr); - current_function_builder_->EmitI32V(depth); + current_function_builder_->EmitWithU32V(kExprBr, depth); SkipSemicolon(); } @@ -1292,7 +1291,7 @@ void AsmJsParser::ContinueStatement() { if (depth < 0) { FAIL("Illegal continue"); } - current_function_builder_->EmitWithI32V(kExprBr, depth); + current_function_builder_->EmitWithU32V(kExprBr, depth); SkipSemicolon(); } @@ -1337,9 +1336,9 @@ void AsmJsParser::SwitchStatement() { current_function_builder_->EmitGetLocal(tmp); current_function_builder_->EmitI32Const(c); current_function_builder_->Emit(kExprI32Eq); - current_function_builder_->EmitWithI32V(kExprBrIf, table_pos++); + current_function_builder_->EmitWithU32V(kExprBrIf, table_pos++); } - current_function_builder_->EmitWithI32V(kExprBr, table_pos++); + current_function_builder_->EmitWithU32V(kExprBr, table_pos++); while (!failed_ && Peek(TOK(case))) { current_function_builder_->Emit(kExprEnd); BareEnd(); @@ -1455,7 +1454,7 @@ AsmType* AsmJsParser::Identifier() { if (info->kind != VarKind::kGlobal) { FAILn("Undefined global variable"); } - current_function_builder_->EmitWithI32V(kExprGlobalGet, VarIndex(info)); + current_function_builder_->EmitWithU32V(kExprGlobalGet, VarIndex(info)); return info->type; } UNREACHABLE(); diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 581156baf34e6d..39e3a8d5d59acb 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -552,32 +552,35 @@ bool Scope::IsReparsedMemberInitializerScope() const { #endif DeclarationScope* Scope::AsDeclarationScope() { - DCHECK(is_declaration_scope()); + // Here and below: if an attacker corrupts the in-sandox SFI::unique_id or + // fields of a Script object, we can get confused about which type of scope + // we're operating on. These CHECKs defend against that. + SBXCHECK(is_declaration_scope()); return static_cast(this); } const DeclarationScope* Scope::AsDeclarationScope() const { - DCHECK(is_declaration_scope()); + SBXCHECK(is_declaration_scope()); return static_cast(this); } ModuleScope* Scope::AsModuleScope() { - DCHECK(is_module_scope()); + SBXCHECK(is_module_scope()); return static_cast(this); } const ModuleScope* Scope::AsModuleScope() const { - DCHECK(is_module_scope()); + SBXCHECK(is_module_scope()); return static_cast(this); } ClassScope* Scope::AsClassScope() { - DCHECK(is_class_scope()); + SBXCHECK(is_class_scope()); return static_cast(this); } const ClassScope* Scope::AsClassScope() const { - DCHECK(is_class_scope()); + SBXCHECK(is_class_scope()); return static_cast(this); } diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h index f25bd3f9b57c43..9ed4c8f10263f0 100644 --- a/deps/v8/src/base/build_config.h +++ b/deps/v8/src/base/build_config.h @@ -72,10 +72,11 @@ constexpr int kPageSizeBits = 18; // to that size needs to be individually protectable via // {base::OS::SetPermission} and friends. #if (defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)) || \ - (defined(V8_OS_ANDROID) && defined(V8_HOST_ARCH_ARM64)) || \ + (defined(V8_OS_ANDROID) && \ + (defined(V8_HOST_ARCH_ARM64) || defined(V8_HOST_ARCH_X64))) || \ defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64) || \ defined(V8_OS_IOS) -// Android on arm64 has experimental support for 16kB pages. +// Android 64 bit has experimental support for 16kB pages. // MacOS & iOS on arm64 uses 16kB pages. // LOONG64 and MIPS64 also use 16kB pages. constexpr int kMinimumOSPageSize = 16 * 1024; diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index 82c0d04550402a..210885af3c3c0a 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -427,7 +427,18 @@ bool is_inbounds(float_t v) { #define IF_TARGET_ARCH_64_BIT(V, ...) EXPAND(V(__VA_ARGS__)) #else #define IF_TARGET_ARCH_64_BIT(V, ...) -#endif +#endif // V8_TARGET_ARCH_64_BIT + +// Defines IF_OFFICIAL_BUILD and IF_NO_OFFICIAL_BUILD, to be used in macro lists +// for elements that should only be there in official / non-official builds. +#ifdef OFFICIAL_BUILD +// EXPAND is needed to work around MSVC's broken __VA_ARGS__ expansion. +#define IF_OFFICIAL_BUILD(V, ...) EXPAND(V(__VA_ARGS__)) +#define IF_NO_OFFICIAL_BUILD(V, ...) +#else +#define IF_OFFICIAL_BUILD(V, ...) +#define IF_NO_OFFICIAL_BUILD(V, ...) EXPAND(V(__VA_ARGS__)) +#endif // OFFICIAL_BUILD #ifdef GOOGLE3 // Disable FRIEND_TEST macro in Google3. diff --git a/deps/v8/src/base/numbers/fast-dtoa.cc b/deps/v8/src/base/numbers/fast-dtoa.cc index 87b424c5812a7c..4dee33c98e7261 100644 --- a/deps/v8/src/base/numbers/fast-dtoa.cc +++ b/deps/v8/src/base/numbers/fast-dtoa.cc @@ -10,7 +10,6 @@ #include "src/base/numbers/cached-powers.h" #include "src/base/numbers/diy-fp.h" #include "src/base/numbers/double.h" -#include "src/base/v8-fallthrough.h" namespace v8 { namespace base { @@ -265,7 +264,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 9; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 29: case 28: case 27: @@ -274,7 +273,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 8; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 26: case 25: case 24: @@ -283,7 +282,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 7; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 23: case 22: case 21: @@ -293,7 +292,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 6; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 19: case 18: case 17: @@ -302,7 +301,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 5; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 16: case 15: case 14: @@ -311,7 +310,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 4; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 13: case 12: case 11: @@ -321,7 +320,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 3; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 9: case 8: case 7: @@ -330,7 +329,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 2; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 6: case 5: case 4: @@ -339,7 +338,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 1; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 3: case 2: case 1: @@ -348,7 +347,7 @@ static inline void BiggestPowerTen(uint32_t number, int number_bits, *exponent = 0; break; } - V8_FALLTHROUGH; + [[fallthrough]]; case 0: *power = 0; *exponent = -1; diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc index c0e3743410b394..57ef0b431260b1 100644 --- a/deps/v8/src/base/platform/platform-linux.cc +++ b/deps/v8/src/base/platform/platform-linux.cc @@ -218,7 +218,7 @@ std::unique_ptr> ParseProcSelfMaps( } fclose(fp); - if (!error && result->size()) return result; + if (!error && !result->empty()) return result; return nullptr; } diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index b1b5d641985dfe..edaab3a7a6b42f 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -198,12 +198,12 @@ class SmallVector { end_ = begin_ + new_size; } - void resize_and_init(size_t new_size) { + void resize_and_init(size_t new_size, const T& initial_value = {}) { static_assert(std::is_trivially_destructible_v); if (new_size > capacity()) Grow(new_size); T* new_end = begin_ + new_size; if (new_end > end_) { - std::uninitialized_fill(end_, new_end, T{}); + std::uninitialized_fill(end_, new_end, initial_value); } end_ = new_end; } diff --git a/deps/v8/src/base/v8-fallthrough.h b/deps/v8/src/base/v8-fallthrough.h deleted file mode 100644 index a6dc6972d6d21d..00000000000000 --- a/deps/v8/src/base/v8-fallthrough.h +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_BASE_V8_FALLTHROUGH_H_ -#define V8_BASE_V8_FALLTHROUGH_H_ - -// When clang suggests inserting [[clang::fallthrough]], it first checks if -// it knows of a macro expanding to it, and if so suggests inserting the -// macro. This means that this macro must be used only in code internal -// to v8, so that v8's user code doesn't end up getting suggestions -// for V8_FALLTHROUGH instead of the user-specific fallthrough macro. -// So do not include this header in any of v8's public headers -- only -// use it in src/, not in include/. -#if defined(__clang__) -#define V8_FALLTHROUGH [[clang::fallthrough]] -#else -#define V8_FALLTHROUGH -#endif - -#endif // V8_BASE_V8_FALLTHROUGH_H_ diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc index e97f4be9000c1a..f934a88bbe81c8 100644 --- a/deps/v8/src/baseline/baseline-batch-compiler.cc +++ b/deps/v8/src/baseline/baseline-batch-compiler.cc @@ -99,9 +99,9 @@ class BaselineBatchCompilerJob { handles_ = isolate->NewPersistentHandles(); tasks_.reserve(batch_size); for (int i = 0; i < batch_size; i++) { - MaybeObject maybe_sfi = task_queue->get(i); + Tagged maybe_sfi = task_queue->get(i); // TODO(victorgomes): Do I need to clear the value? - task_queue->set(i, HeapObjectReference::ClearedValue(isolate)); + task_queue->set(i, ClearedValue(isolate)); Tagged obj; // Skip functions where weak reference is no longer valid. if (!maybe_sfi.GetHeapObjectIfWeak(&obj)) continue; @@ -283,7 +283,7 @@ void BaselineBatchCompiler::EnqueueSFI(Tagged shared) { void BaselineBatchCompiler::Enqueue(Handle shared) { EnsureQueueCapacity(); - compilation_queue_->set(last_index_++, HeapObjectReference::Weak(*shared)); + compilation_queue_->set(last_index_++, MakeWeak(*shared)); } void BaselineBatchCompiler::InstallBatch() { @@ -315,9 +315,9 @@ void BaselineBatchCompiler::CompileBatch(Handle function) { &is_compiled_scope); } for (int i = 0; i < last_index_; i++) { - MaybeObject maybe_sfi = compilation_queue_->get(i); + Tagged maybe_sfi = compilation_queue_->get(i); MaybeCompileFunction(maybe_sfi); - compilation_queue_->set(i, HeapObjectReference::ClearedValue(isolate_)); + compilation_queue_->set(i, ClearedValue(isolate_)); } ClearBatch(); } @@ -368,7 +368,8 @@ bool BaselineBatchCompiler::ShouldCompileBatch( return false; } -bool BaselineBatchCompiler::MaybeCompileFunction(MaybeObject maybe_sfi) { +bool BaselineBatchCompiler::MaybeCompileFunction( + Tagged maybe_sfi) { Tagged heapobj; // Skip functions where the weak reference is no longer valid. if (!maybe_sfi.GetHeapObjectIfWeak(&heapobj)) return false; diff --git a/deps/v8/src/baseline/baseline-batch-compiler.h b/deps/v8/src/baseline/baseline-batch-compiler.h index adfe9dfe4200b9..94aa7b9b03295f 100644 --- a/deps/v8/src/baseline/baseline-batch-compiler.h +++ b/deps/v8/src/baseline/baseline-batch-compiler.h @@ -57,7 +57,7 @@ class BaselineBatchCompiler { // Tries to compile |maybe_sfi|. Returns false if compilation was not possible // (e.g. bytecode was fushed, weak handle no longer valid, ...). - bool MaybeCompileFunction(MaybeObject maybe_sfi); + bool MaybeCompileFunction(Tagged maybe_sfi); Isolate* isolate_; diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 58ea23043da452..b3b76b60ca0dca 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -59,11 +59,11 @@ namespace internal { namespace baseline { template -Handle BytecodeOffsetTableBuilder::ToBytecodeOffsetTable( +Handle BytecodeOffsetTableBuilder::ToBytecodeOffsetTable( IsolateT* isolate) { - if (bytes_.empty()) return isolate->factory()->empty_byte_array(); - Handle table = isolate->factory()->NewByteArray( - static_cast(bytes_.size()), AllocationType::kOld); + if (bytes_.empty()) return isolate->factory()->empty_trusted_byte_array(); + Handle table = + isolate->factory()->NewTrustedByteArray(static_cast(bytes_.size())); MemCopy(table->begin(), bytes_.data(), bytes_.size()); return table; } @@ -337,7 +337,7 @@ MaybeHandle BaselineCompiler::Build(LocalIsolate* local_isolate) { __ GetCode(local_isolate, &desc); // Allocate the bytecode offset table. - Handle bytecode_offset_table = + Handle bytecode_offset_table = bytecode_offset_table_builder_.ToBytecodeOffsetTable(local_isolate); Factory::CodeBuilder code_builder(local_isolate, desc, CodeKind::BASELINE); @@ -934,6 +934,16 @@ void BaselineCompiler::VisitGetKeyedProperty() { IndexAsTagged(1)); // slot } +void BaselineCompiler::VisitGetEnumeratedKeyedProperty() { + DCHECK(v8_flags.enable_enumerated_keyed_access_bytecode); + CallBuiltin( + RegisterOperand(0), // object + kInterpreterAccumulatorRegister, // key + RegisterOperand(1), // enum index + RegisterOperand(2), // cache type + IndexAsTagged(3)); // slot +} + void BaselineCompiler::VisitLdaModuleVariable() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register scratch = scratch_scope.AcquireScratch(); diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h index c06fdafddf44c9..ecbb5e68ddcfcc 100644 --- a/deps/v8/src/baseline/baseline-compiler.h +++ b/deps/v8/src/baseline/baseline-compiler.h @@ -37,7 +37,7 @@ class BytecodeOffsetTableBuilder { } template - Handle ToBytecodeOffsetTable(IsolateT* isolate); + Handle ToBytecodeOffsetTable(IsolateT* isolate); void Reserve(size_t size) { bytes_.reserve(size); } diff --git a/deps/v8/src/baseline/bytecode-offset-iterator.cc b/deps/v8/src/baseline/bytecode-offset-iterator.cc index f7a4d7ac66cba7..c0bed57ea2e1bc 100644 --- a/deps/v8/src/baseline/bytecode-offset-iterator.cc +++ b/deps/v8/src/baseline/bytecode-offset-iterator.cc @@ -12,8 +12,8 @@ namespace v8 { namespace internal { namespace baseline { -BytecodeOffsetIterator::BytecodeOffsetIterator(Handle mapping_table, - Handle bytecodes) +BytecodeOffsetIterator::BytecodeOffsetIterator( + Handle mapping_table, Handle bytecodes) : mapping_table_(mapping_table), data_start_address_(mapping_table_->begin()), data_length_(mapping_table_->length()), @@ -26,8 +26,8 @@ BytecodeOffsetIterator::BytecodeOffsetIterator(Handle mapping_table, Initialize(); } -BytecodeOffsetIterator::BytecodeOffsetIterator(Tagged mapping_table, - Tagged bytecodes) +BytecodeOffsetIterator::BytecodeOffsetIterator( + Tagged mapping_table, Tagged bytecodes) : data_start_address_(mapping_table->begin()), data_length_(mapping_table->length()), current_index_(0), diff --git a/deps/v8/src/baseline/bytecode-offset-iterator.h b/deps/v8/src/baseline/bytecode-offset-iterator.h index 06d02207ebc4b7..91919f2d6c9d71 100644 --- a/deps/v8/src/baseline/bytecode-offset-iterator.h +++ b/deps/v8/src/baseline/bytecode-offset-iterator.h @@ -19,10 +19,10 @@ namespace baseline { class V8_EXPORT_PRIVATE BytecodeOffsetIterator { public: - explicit BytecodeOffsetIterator(Handle mapping_table, + explicit BytecodeOffsetIterator(Handle mapping_table, Handle bytecodes); // Non-handlified version for use when no GC can happen. - explicit BytecodeOffsetIterator(Tagged mapping_table, + explicit BytecodeOffsetIterator(Tagged mapping_table, Tagged bytecodes); ~BytecodeOffsetIterator(); @@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE BytecodeOffsetIterator { return base::VLQDecodeUnsigned(data_start_address_, ¤t_index_); } - Handle mapping_table_; + Handle mapping_table_; uint8_t* data_start_address_; int data_length_; int current_index_; diff --git a/deps/v8/src/bigint/bigint-internal.cc b/deps/v8/src/bigint/bigint-internal.cc index 35a9e5b3f2377f..ae0b202c65710e 100644 --- a/deps/v8/src/bigint/bigint-internal.cc +++ b/deps/v8/src/bigint/bigint-internal.cc @@ -52,7 +52,12 @@ void ProcessorImpl::Multiply(RWDigits Z, Digits X, Digits Y) { void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) { A.Normalize(); B.Normalize(); - DCHECK(B.len() > 0); + // While callers are not required to normalize inputs, they must not + // provide divisors that normalize to zero. + // This must be a Release-mode CHECK because it is load bearing for + // security fuzzing: subsequent operations would perform illegal memory + // accesses if they attempted to work with zero divisors. + CHECK(B.len() > 0); int cmp = Compare(A, B); if (cmp < 0) return Q.Clear(); if (cmp == 0) { @@ -82,7 +87,12 @@ void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) { void ProcessorImpl::Modulo(RWDigits R, Digits A, Digits B) { A.Normalize(); B.Normalize(); - DCHECK(B.len() > 0); + // While callers are not required to normalize inputs, they must not + // provide divisors that normalize to zero. + // This must be a Release-mode CHECK because it is load bearing for + // security fuzzing: subsequent operations would perform illegal memory + // accesses if they attempted to work with zero divisors. + CHECK(B.len() > 0); int cmp = Compare(A, B); if (cmp < 0) { for (int i = 0; i < B.len(); i++) R[i] = B[i]; diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 79124bd196a57f..ce2a0ac3b728a7 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -3167,7 +3167,7 @@ class RegisterAllocator { while (it != allocated_registers_.end()) { if (registerIsAvailable(**it)) { **it = no_reg; - allocated_registers_.erase(it); + it = allocated_registers_.erase(it); } else { it++; } @@ -3947,7 +3947,8 @@ void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, __ PrepareCallCFunction(2); __ Move(kCArgRegs[0], ER::isolate_address(masm->isolate())); __ Move(kCArgRegs[1], kOldSPRegister); - __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2); + __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2, + SetIsolateDataSlots::kNo); __ Move(central_stack_sp, kReturnRegister0); __ Pop(argv_input); __ Pop(target_input); @@ -3980,7 +3981,8 @@ void SwitchFromTheCentralStackIfNeeded(MacroAssembler* masm) { __ Push(kReturnRegister0, kReturnRegister1); __ PrepareCallCFunction(1); __ Move(kCArgRegs[0], ER::isolate_address(masm->isolate())); - __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1); + __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1, + SetIsolateDataSlots::kNo); __ Pop(kReturnRegister0, kReturnRegister1); } @@ -4120,7 +4122,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mov(r0, Operand(0)); __ mov(r1, Operand(0)); __ Move(r2, ExternalReference::isolate_address(masm->isolate())); - __ CallCFunction(find_handler, 3); + __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo); } // Retrieve the handler context, SP and FP. @@ -4283,7 +4285,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -4352,7 +4355,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // kData. switch (mode) { case CallApiCallbackMode::kGeneric: - __ ldr(scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ ldr( + scratch2, + FieldMemOperand(callback, FunctionTemplateInfo::kCallbackDataOffset)); __ str(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); break; @@ -4405,13 +4410,11 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ ldr(scratch, - FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); - __ str(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + __ str(callback, MemOperand(sp, 0 * kSystemPointerSize)); __ ldr(api_function_address, - FieldMemOperand(callback, - CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + FieldMemOperand( + callback, FunctionTemplateInfo::kMaybeRedirectedCallbackOffset)); __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); } else { @@ -4521,10 +4524,15 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ RecordComment( "Load address of v8::PropertyAccessorInfo::args_ array and name handle."); - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ mov(name_arg, scratch); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ mov(name_arg, sp); +#endif // property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow) - __ add(property_callback_info_arg, name_arg, Operand(1 * kPointerSize)); + __ add(property_callback_info_arg, sp, Operand(1 * kPointerSize)); constexpr int kNameOnStackSize = 1; constexpr int kStackUnwindSpace = PCA::kArgsLength + kNameOnStackSize; diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 5c607660fb913a..6f874055dfeb29 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -3678,7 +3678,7 @@ class RegisterAllocator { while (it != allocated_registers_.end()) { if (available_.IncludesAliasOf(**it)) { **it = no_reg; - allocated_registers_.erase(it); + it = allocated_registers_.erase(it); } else { it++; } @@ -4441,7 +4441,8 @@ void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, Register argc_input, __ Push(argc_input, target_input, argv_input, padreg); __ Mov(kCArgRegs[0], ER::isolate_address(masm->isolate())); __ Mov(kCArgRegs[1], kOldSPRegister); - __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2); + __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2, + SetIsolateDataSlots::kNo); __ Mov(central_stack_sp, kReturnRegister0); __ Pop(padreg, argv_input, target_input, argc_input); } @@ -4471,7 +4472,8 @@ void SwitchFromTheCentralStackIfNeeded(MacroAssembler* masm) { { __ Push(kReturnRegister0, kReturnRegister1); __ Mov(kCArgRegs[0], ER::isolate_address(masm->isolate())); - __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1); + __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1, + SetIsolateDataSlots::kNo); __ Pop(kReturnRegister1, kReturnRegister0); } @@ -4622,7 +4624,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Mov(x0, 0); // argc. __ Mov(x1, 0); // argv. __ Mov(x2, ER::isolate_address(masm->isolate())); - __ CallCFunction(ER::Create(Runtime::kUnwindAndFindExceptionHandler), 3); + __ CallCFunction(ER::Create(Runtime::kUnwindAndFindExceptionHandler), 3, + SetIsolateDataSlots::kNo); } // Retrieve the handler context, SP and FP. @@ -4772,7 +4775,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -4843,7 +4847,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, switch (mode) { case CallApiCallbackMode::kGeneric: __ LoadTaggedField( - scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + scratch2, + FieldMemOperand(callback, FunctionTemplateInfo::kCallbackDataOffset)); __ Str(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); break; @@ -4897,16 +4902,13 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ LoadTaggedField( - scratch, - FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); - __ Str(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + __ Str(callback, MemOperand(sp, 0 * kSystemPointerSize)); __ LoadExternalPointerField( api_function_address, FieldMemOperand(callback, - CallHandlerInfo::kMaybeRedirectedCallbackOffset), - kCallHandlerInfoCallbackTag); + FunctionTemplateInfo::kMaybeRedirectedCallbackOffset), + kFunctionTemplateInfoCallbackTag); __ EnterExitFrame(scratch, kApiStackSpace, StackFrame::API_CALLBACK_EXIT); } else { @@ -5041,8 +5043,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ RecordComment( "Load address of v8::PropertyAccessorInfo::args_ array and name handle."); - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ mov(name_arg, name); + USE(kNameStackIndex); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ Add(name_arg, sp, Operand(kNameStackIndex * kSystemPointerSize)); +#endif // property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow) __ Add(property_callback_info_arg, sp, Operand(kPCAStackIndex * kSystemPointerSize)); diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq index d85dc1ab48319b..5d0edc48e9d328 100644 --- a/deps/v8/src/builtins/array-join.tq +++ b/deps/v8/src/builtins/array-join.tq @@ -443,6 +443,8 @@ transitioning ArrayJoin( if (IsElementsKindGreaterThan(kind, ElementsKind::UINT32_ELEMENTS)) { if (kind == ElementsKind::INT32_ELEMENTS) { loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::FLOAT16_ELEMENTS) { + loadFn = LoadJoinTypedElement; } else if (kind == ElementsKind::FLOAT32_ELEMENTS) { loadFn = LoadJoinTypedElement; } else if (kind == ElementsKind::FLOAT64_ELEMENTS) { @@ -465,6 +467,8 @@ transitioning ArrayJoin( loadFn = LoadJoinTypedElement; } else if (kind == ElementsKind::RAB_GSAB_INT32_ELEMENTS) { loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_FLOAT16_ELEMENTS) { + loadFn = LoadJoinTypedElement; } else if (kind == ElementsKind::RAB_GSAB_FLOAT32_ELEMENTS) { loadFn = LoadJoinTypedElement; } else if (kind == ElementsKind::RAB_GSAB_FLOAT64_ELEMENTS) { diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 090e2ee31ad20d..02812274b79e58 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -123,6 +123,7 @@ type int64 generates 'TNode' constexpr 'int64_t'; type uint64 generates 'TNode' constexpr 'uint64_t'; type intptr generates 'TNode' constexpr 'intptr_t'; type uintptr generates 'TNode' constexpr 'uintptr_t'; +type float16 generates 'TNode' constexpr 'uint16_t'; type float32 generates 'TNode' constexpr 'float'; type float64 generates 'TNode' constexpr 'double'; type bool generates 'TNode' constexpr 'bool'; @@ -327,6 +328,7 @@ extern enum ElementsKind extends int32 { INT16_ELEMENTS, UINT32_ELEMENTS, INT32_ELEMENTS, + FLOAT16_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS, UINT8_CLAMPED_ELEMENTS, @@ -338,6 +340,7 @@ extern enum ElementsKind extends int32 { RAB_GSAB_INT16_ELEMENTS, RAB_GSAB_UINT32_ELEMENTS, RAB_GSAB_INT32_ELEMENTS, + RAB_GSAB_FLOAT16_ELEMENTS, RAB_GSAB_FLOAT32_ELEMENTS, RAB_GSAB_FLOAT64_ELEMENTS, RAB_GSAB_UINT8_CLAMPED_ELEMENTS, @@ -482,6 +485,7 @@ extern enum MessageTemplate { kFlattenPastSafeLength, kStrictReadOnlyProperty, kInvalidUsingInForInLoop, + kIllegalInvocation, ... } @@ -1005,7 +1009,7 @@ macro Float64IsNaN(n: float64): bool { // The type of all tagged values that can safely be compared with TaggedEqual. @if(V8_ENABLE_WEBASSEMBLY) type TaggedWithIdentity = JSReceiver|FixedArrayBase|Oddball|Hole|Map|WeakCell| - Context|EmptyString|Symbol|WasmInternalFunction|WasmNull; + Context|EmptyString|Symbol|WasmFuncRef|WasmNull; @ifnot(V8_ENABLE_WEBASSEMBLY) type TaggedWithIdentity = JSReceiver|FixedArrayBase|Oddball|Hole|Map|WeakCell| Context|EmptyString|Symbol; @@ -1294,9 +1298,9 @@ extern macro IntPtrRoundUpToPowerOfTwo32(intptr): intptr; extern macro ChangeFloat32ToFloat64(float32): float64; extern macro RoundInt32ToFloat32(int32): float32; extern macro ChangeNumberToFloat64(Number): float64; -extern macro ChangeNumberToUint32(Number): uint32; extern macro ChangeTaggedNonSmiToInt32( implicit context: Context)(HeapObject): int32; +extern macro ChangeFloat16ToFloat64(float16): float64; extern macro ChangeFloat32ToTagged(float32): Number; extern macro ChangeTaggedToFloat64(implicit context: Context)(JSAny): float64; extern macro ChangeFloat64ToTagged(float64): Number; @@ -1310,6 +1314,8 @@ extern macro ChangeInt32ToInt64(int32): int64; // Sign-extends. extern macro ChangeUint32ToUint64(uint32): uint64; // Doesn't sign-extend. extern macro LoadNativeContext(Context): NativeContext; extern macro GetContinuationPreservedEmbedderData(): Object; +extern macro TruncateFloat64ToFloat16(float64): float16; +extern macro TruncateFloat32ToFloat16(float32): float16; extern macro TruncateFloat64ToFloat32(float64): float32; extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32; extern macro LoadJSArrayElementsMap( @@ -1350,6 +1356,8 @@ extern macro PointerConstant(constexpr RawPtr): RawPtr; extern macro SingleCharacterStringConstant(constexpr string): String; extern macro Float64SilenceNaN(float64): float64; +extern macro BitcastFloat16ToUint32(float16): uint32; +extern macro BitcastUint32ToFloat16(uint32): float16; extern macro BitcastWordToTaggedSigned(intptr): Smi; extern macro BitcastWordToTaggedSigned(uintptr): Smi; extern macro BitcastWordToTagged(intptr): Object; diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc index 6b01c1716834bd..87c3526d20ace4 100644 --- a/deps/v8/src/builtins/builtins-api.cc +++ b/deps/v8/src/builtins/builtins-api.cc @@ -103,15 +103,11 @@ V8_WARN_UNUSED_RESULT MaybeHandle HandleApiCallHelper( } } - Tagged raw_call_data = fun_data->call_code(kAcquireLoad); - if (!IsUndefined(raw_call_data, isolate)) { - DCHECK(IsCallHandlerInfo(raw_call_data)); - Tagged call_data = CallHandlerInfo::cast(raw_call_data); - Tagged data_obj = call_data->data(); - + if (fun_data->has_callback(isolate)) { + Tagged data_obj = fun_data->callback_data(kAcquireLoad); FunctionCallbackArguments custom(isolate, data_obj, raw_holder, *new_target, argv, argc); - Handle result = custom.Call(call_data); + Handle result = custom.Call(*fun_data); RETURN_EXCEPTION_IF_EXCEPTION(isolate, Object); if (result.is_null()) { @@ -236,16 +232,18 @@ HandleApiCallAsFunctionOrConstructorDelegate(Isolate* isolate, Tagged handler = constructor->shared()->api_func_data()->GetInstanceCallHandler(); DCHECK(!IsUndefined(handler, isolate)); - Tagged call_data = CallHandlerInfo::cast(handler); + Tagged templ = FunctionTemplateInfo::cast(handler); + DCHECK(templ->is_object_template_call_handler()); + DCHECK(templ->has_callback(isolate)); // Get the data for the call and perform the callback. Tagged result; { HandleScope scope(isolate); FunctionCallbackArguments custom( - isolate, call_data->data(), obj, new_target, + isolate, templ->callback_data(kAcquireLoad), obj, new_target, args.address_of_first_argument(), args.length() - 1); - Handle result_handle = custom.Call(call_data); + Handle result_handle = custom.Call(templ); if (result_handle.is_null()) { result = ReadOnlyRoots(isolate).undefined_value(); } else { diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index 2ce9660930c132..3b479510702a3c 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -1406,9 +1406,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { // Check that the {index} is within range for the {array}. We handle all // kinds of JSArray's here, so we do the computation on Uint32. - TNode index32 = ChangeNumberToUint32(index); + TNode index32 = ChangeNonNegativeNumberToUint32(index); TNode length32 = - ChangeNumberToUint32(LoadJSArrayLength(CAST(array))); + ChangeNonNegativeNumberToUint32(LoadJSArrayLength(CAST(array))); GotoIfNot(Uint32LessThan(index32, length32), &set_done); StoreJSArrayIteratorNextIndex( iterator, ChangeUint32ToTagged(Uint32Add(index32, Uint32Constant(1)))); diff --git a/deps/v8/src/builtins/builtins-atomics-synchronization.cc b/deps/v8/src/builtins/builtins-atomics-synchronization.cc index cd09c218b88911..a28af5409f9b33 100644 --- a/deps/v8/src/builtins/builtins-atomics-synchronization.cc +++ b/deps/v8/src/builtins/builtins-atomics-synchronization.cc @@ -265,7 +265,7 @@ BUILTIN(AtomicsConditionNotify) { Handle js_condition = Handle::cast(js_condition_obj); return *isolate->factory()->NewNumberFromUint( - js_condition->Notify(isolate, count)); + JSAtomicsCondition::Notify(isolate, js_condition, count)); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index 580a02d3449164..c3bd2c9081f23d 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -717,9 +717,8 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate( GotoIfNot(IsSetWord32( LoadMapBitField(receiver_map)), &receiver_done); - TNode function_template_info_flags = - LoadAndUntagToWord32ObjectField(function_template_info, - FunctionTemplateInfo::kFlagOffset); + TNode function_template_info_flags = LoadObjectField( + function_template_info, FunctionTemplateInfo::kFlagOffset); Branch(IsSetWord32( function_template_info_flags), &receiver_done, &receiver_needs_access_check); @@ -772,39 +771,37 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate( } } - TNode call_code = CAST(LoadObjectField( - function_template_info, FunctionTemplateInfo::kCallCodeOffset)); + TNode callback_data = LoadObjectField( + function_template_info, FunctionTemplateInfo::kCallbackDataOffset); // If the function doesn't have an associated C++ code to execute, just // return the receiver as would an empty function do (see // HandleApiCallHelper). { Label if_continue(this); - GotoIfNot(IsUndefined(call_code), &if_continue); + GotoIfNot(IsTheHole(callback_data), &if_continue); args.PopAndReturn(receiver); Bind(&if_continue); } // Perform the actual API callback invocation via CallApiCallback. - TNode call_handler_info = CAST(call_code); switch (mode) { case CallFunctionTemplateMode::kGeneric: TailCallBuiltin(Builtin::kCallApiCallbackGeneric, context, TruncateIntPtrToInt32(args.GetLengthWithoutReceiver()), - topmost_script_having_context, call_handler_info, holder); + topmost_script_having_context, function_template_info, + holder); break; case CallFunctionTemplateMode::kCheckAccess: case CallFunctionTemplateMode::kCheckAccessAndCompatibleReceiver: case CallFunctionTemplateMode::kCheckCompatibleReceiver: { TNode callback_address = - LoadCallHandlerInfoJsCallbackPtr(call_handler_info); - TNode call_data = - LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset); + LoadFunctionTemplateInfoJsCallbackPtr(function_template_info); TailCallBuiltin(Builtin::kCallApiCallbackOptimized, context, callback_address, TruncateIntPtrToInt32(args.GetLengthWithoutReceiver()), - call_data, holder); + callback_data, holder); break; } } diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h index 951016ab9d38d1..c618f61bb17b0a 100644 --- a/deps/v8/src/builtins/builtins-call-gen.h +++ b/deps/v8/src/builtins/builtins-call-gen.h @@ -75,7 +75,6 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler { const LazyNode& feedback_vector, TNode slot); - private: TNode GetCompatibleReceiver(TNode receiver, TNode signature, TNode context); diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index 6fea5c37e8c2f2..e5e6026ce61632 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -2782,9 +2782,10 @@ TNode WeakCollectionsBuiltinsAssembler::ShouldShrink( TNode WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex( TNode key_index) { - return IntPtrAdd(key_index, - IntPtrConstant(EphemeronHashTable::ShapeT::kEntryValueIndex - - EphemeronHashTable::kEntryKeyIndex)); + return IntPtrAdd( + key_index, + IntPtrConstant(EphemeronHashTable::TodoShape::kEntryValueIndex - + EphemeronHashTable::kEntryKeyIndex)); } TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) { diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 8cdba0d032eea4..cf1ec0d4ad31f0 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -162,9 +162,9 @@ namespace internal { ASM(JSConstructEntry, JSEntry) \ ASM(JSRunMicrotasksEntry, RunMicrotasksEntry) \ /* Call a JSValue. */ \ - ASM(JSEntryTrampoline, JSTrampoline) \ + ASM(JSEntryTrampoline, JSEntry) \ /* Construct a JSValue. */ \ - ASM(JSConstructEntryTrampoline, JSTrampoline) \ + ASM(JSConstructEntryTrampoline, JSEntry) \ ASM(ResumeGeneratorTrampoline, ResumeGenerator) \ \ /* String helpers */ \ @@ -657,9 +657,11 @@ namespace internal { TFH(LoadSuperIC, LoadWithReceiverAndVector) \ TFH(LoadSuperICBaseline, LoadWithReceiverBaseline) \ TFH(KeyedLoadIC, KeyedLoadWithVector) \ + TFH(EnumeratedKeyedLoadIC, EnumeratedKeyedLoad) \ TFH(KeyedLoadIC_Megamorphic, KeyedLoadWithVector) \ TFH(KeyedLoadICTrampoline, KeyedLoad) \ TFH(KeyedLoadICBaseline, KeyedLoadBaseline) \ + TFH(EnumeratedKeyedLoadICBaseline, EnumeratedKeyedLoadBaseline) \ TFH(KeyedLoadICTrampoline_Megamorphic, KeyedLoad) \ TFH(StoreGlobalIC, StoreGlobalWithVector) \ TFH(StoreGlobalICTrampoline, StoreGlobal) \ @@ -1014,8 +1016,8 @@ namespace internal { IF_WASM(TFC, WasmToJsWrapperCSA, WasmToJSWrapper) \ IF_WASM(TFC, WasmToJsWrapperInvalidSig, WasmToJSWrapper) \ IF_WASM(ASM, WasmSuspend, WasmSuspend) \ - IF_WASM(ASM, WasmResume, WasmDummy) \ - IF_WASM(ASM, WasmReject, WasmDummy) \ + IF_WASM(ASM, WasmResume, WasmDummyWithJSLinkage) \ + IF_WASM(ASM, WasmReject, WasmDummyWithJSLinkage) \ IF_WASM(ASM, WasmTrapHandlerLandingPad, WasmDummy) \ IF_WASM(ASM, WasmCompileLazy, WasmDummy) \ IF_WASM(ASM, WasmLiftoffFrameSetup, WasmDummy) \ diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index 107c710bad9b55..ae9594599810be 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -212,6 +212,7 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW, V(INT16_ELEMENTS) \ V(UINT32_ELEMENTS) \ V(INT32_ELEMENTS) \ + V(FLOAT16_ELEMENTS) \ V(FLOAT32_ELEMENTS) \ V(FLOAT64_ELEMENTS) \ V(UINT8_CLAMPED_ELEMENTS) \ @@ -223,6 +224,7 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW, V(RAB_GSAB_INT16_ELEMENTS) \ V(RAB_GSAB_UINT32_ELEMENTS) \ V(RAB_GSAB_INT32_ELEMENTS) \ + V(RAB_GSAB_FLOAT16_ELEMENTS) \ V(RAB_GSAB_FLOAT32_ELEMENTS) \ V(RAB_GSAB_FLOAT64_ELEMENTS) \ V(RAB_GSAB_UINT8_CLAMPED_ELEMENTS) \ diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc index da645619dc5a74..0bbcbd858887c1 100644 --- a/deps/v8/src/builtins/builtins-ic-gen.cc +++ b/deps/v8/src/builtins/builtins-ic-gen.cc @@ -53,6 +53,16 @@ void Builtins::Generate_KeyedLoadIC(compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); assembler.GenerateKeyedLoadIC(); } +void Builtins::Generate_EnumeratedKeyedLoadIC( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateEnumeratedKeyedLoadIC(); +} +void Builtins::Generate_EnumeratedKeyedLoadICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateEnumeratedKeyedLoadICBaseline(); +} void Builtins::Generate_KeyedLoadIC_Megamorphic( compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 9b3c69a4dbf5ee..40c6a5b4135235 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -11,7 +11,7 @@ #include "src/codegen/macro-assembler-inl.h" #include "src/common/globals.h" #include "src/execution/frame-constants.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/ic/accessor-assembler.h" #include "src/ic/keyed-store-generic.h" #include "src/logging/counters.h" @@ -137,7 +137,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { } TNode IsPageFlagSet(TNode object, int mask) { - TNode header = PageHeaderFromAddress(object); + TNode header = MemoryChunkFromAddress(object); TNode flags = UncheckedCast( Load(MachineType::Pointer(), header, IntPtrConstant(MemoryChunkLayout::kFlagsOffset))); @@ -155,7 +155,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { void GetMarkBit(TNode object, TNode* cell, TNode* mask) { - TNode page = PageFromAddress(object); + TNode page = PageMetadataFromAddress(object); TNode bitmap = IntPtrAdd( page, IntPtrConstant(MemoryChunkLayout::kMarkingBitmapOffset)); @@ -165,10 +165,10 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { int shift = MarkingBitmap::kBitsPerCellLog2 + kTaggedSizeLog2 - MarkingBitmap::kBytesPerCellLog2; r0 = WordShr(object, IntPtrConstant(shift)); - r0 = WordAnd( - r0, IntPtrConstant( - (MemoryChunkHeader::GetAlignmentMaskForAssembler() >> shift) & - ~(MarkingBitmap::kBytesPerCell - 1))); + r0 = WordAnd(r0, + IntPtrConstant( + (MemoryChunk::GetAlignmentMaskForAssembler() >> shift) & + ~(MarkingBitmap::kBytesPerCell - 1))); *cell = IntPtrAdd(bitmap, Signed(r0)); } { @@ -187,12 +187,12 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { void InsertIntoRememberedSet(TNode object, TNode slot, SaveFPRegsMode fp_mode) { Label slow_path(this), next(this); - TNode page_header = PageHeaderFromAddress(object); - TNode page = PageFromPageHeader(page_header); + TNode chunk = MemoryChunkFromAddress(object); + TNode page = PageMetadataFromMemoryChunk(chunk); // Load address of SlotSet TNode slot_set = LoadSlotSet(page, &slow_path); - TNode slot_offset = IntPtrSub(slot, page_header); + TNode slot_offset = IntPtrSub(slot, chunk); // Load bucket TNode bucket = LoadBucket(slot_set, slot_offset, &slow_path); @@ -208,7 +208,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { CallCFunctionWithCallerSavedRegisters( function, MachineTypeOf::value, fp_mode, std::make_pair(MachineTypeOf::value, page), - std::make_pair(MachineTypeOf::value, slot)); + std::make_pair(MachineTypeOf::value, slot_offset)); Goto(&next); } @@ -218,7 +218,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { TNode LoadSlotSet(TNode page, Label* slow_path) { TNode slot_set = UncheckedCast( Load(MachineType::Pointer(), page, - IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset))); + IntPtrConstant(MutablePageMetadata::kOldToNewSlotSetOffset))); GotoIf(WordEqual(slot_set, IntPtrConstant(0)), slow_path); return slot_set; } diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc index 21f3997fa14d9e..cb0109e75798e8 100644 --- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -51,6 +51,10 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler { void RunPromiseHook(Runtime::FunctionId id, TNode context, TNode promise_or_capability, TNode promiseHookFlags); +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + void SetupContinuationPreservedEmbedderData(TNode microtask); + void ClearContinuationPreservedEmbedderData(); +#endif }; TNode MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue( @@ -115,6 +119,27 @@ void MicrotaskQueueBuiltinsAssembler::PrepareForContext( SetCurrentContext(native_context); } +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA +void MicrotaskQueueBuiltinsAssembler::SetupContinuationPreservedEmbedderData( + TNode microtask) { + TNode continuation_preserved_embedder_data = LoadObjectField( + microtask, Microtask::kContinuationPreservedEmbedderDataOffset); + Label continuation_preserved_data_done(this); + // The isolate's continuation preserved embedder data is cleared at the start + // of RunMicrotasks and after each microtask, so it only needs to be set if + // it's not undefined. + GotoIf(IsUndefined(continuation_preserved_embedder_data), + &continuation_preserved_data_done); + SetContinuationPreservedEmbedderData(continuation_preserved_embedder_data); + Goto(&continuation_preserved_data_done); + BIND(&continuation_preserved_data_done); +} + +void MicrotaskQueueBuiltinsAssembler::ClearContinuationPreservedEmbedderData() { + SetContinuationPreservedEmbedderData(UndefinedConstant()); +} +#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( TNode current_context, TNode microtask) { CSA_DCHECK(this, TaggedIsNotSmi(microtask)); @@ -152,6 +177,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( TNode native_context = LoadNativeContext(microtask_context); PrepareForContext(native_context, &done); +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + SetupContinuationPreservedEmbedderData(microtask); +#endif TNode callable = LoadObjectField(microtask, CallableTask::kCallableOffset); { @@ -160,6 +188,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( } RewindEnteredContext(saved_entered_context_count); SetCurrentContext(current_context); +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + ClearContinuationPreservedEmbedderData(); +#endif Goto(&done); } @@ -169,6 +200,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( LoadObjectField(microtask, CallbackTask::kCallbackOffset); const TNode microtask_data = LoadObjectField(microtask, CallbackTask::kDataOffset); +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + SetupContinuationPreservedEmbedderData(microtask); +#endif // If this turns out to become a bottleneck because of the calls // to C++ via CEntry, we can choose to speed them up using a @@ -185,6 +219,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( CallRuntime(Runtime::kRunMicrotaskCallback, current_context, microtask_callback, microtask_data); } +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + ClearContinuationPreservedEmbedderData(); +#endif Goto(&done); } @@ -202,7 +239,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( LoadObjectField(microtask, PromiseResolveThenableJobTask::kThenOffset); const TNode thenable = LoadObjectField( microtask, PromiseResolveThenableJobTask::kThenableOffset); - +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + SetupContinuationPreservedEmbedderData(microtask); +#endif RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context, CAST(promise_to_resolve)); @@ -217,6 +256,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( RewindEnteredContext(saved_entered_context_count); SetCurrentContext(current_context); +#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA + ClearContinuationPreservedEmbedderData(); +#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA Goto(&done); } @@ -236,27 +278,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset)); #ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA - TNode isolate_preserved_embedder_data = LoadObjectField( - microtask, PromiseReactionJobTask:: - kIsolateContinuationPreservedEmbedderDataOffset); - Label isolate_preserved_data_done(this); - GotoIf(IsUndefined(isolate_preserved_embedder_data), - &isolate_preserved_data_done); - SetContinuationPreservedEmbedderData(isolate_preserved_embedder_data); - Goto(&isolate_preserved_data_done); - BIND(&isolate_preserved_data_done); - - TNode context_preserved_embedder_data = LoadObjectField( - microtask, PromiseReactionJobTask:: - kContextContinuationPreservedEmbedderDataOffset); - Label context_preserved_data_done(this); - GotoIf(IsUndefined(context_preserved_embedder_data), - &context_preserved_data_done); - StoreContextElement(native_context, - Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX, - context_preserved_embedder_data); - Goto(&context_preserved_data_done); - BIND(&context_preserved_data_done); + SetupContinuationPreservedEmbedderData(microtask); #endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA // Run the promise before/debug hook if enabled. @@ -274,21 +296,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( promise_or_capability); #ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA - Label isolate_preserved_data_reset_done(this); - GotoIf(IsUndefined(isolate_preserved_embedder_data), - &isolate_preserved_data_reset_done); - SetContinuationPreservedEmbedderData(UndefinedConstant()); - Goto(&isolate_preserved_data_reset_done); - BIND(&isolate_preserved_data_reset_done); - - Label context_preserved_data_reset_done(this); - GotoIf(IsUndefined(context_preserved_embedder_data), - &context_preserved_data_reset_done); - StoreContextElement(native_context, - Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX, - UndefinedConstant()); - Goto(&context_preserved_data_reset_done); - BIND(&context_preserved_data_reset_done); + ClearContinuationPreservedEmbedderData(); #endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA RewindEnteredContext(saved_entered_context_count); @@ -312,27 +320,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset)); #ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA - TNode isolate_preserved_embedder_data = LoadObjectField( - microtask, PromiseReactionJobTask:: - kIsolateContinuationPreservedEmbedderDataOffset); - Label isolate_preserved_data_done(this); - GotoIf(IsUndefined(isolate_preserved_embedder_data), - &isolate_preserved_data_done); - SetContinuationPreservedEmbedderData(isolate_preserved_embedder_data); - Goto(&isolate_preserved_data_done); - BIND(&isolate_preserved_data_done); - - TNode context_preserved_embedder_data = LoadObjectField( - microtask, PromiseReactionJobTask:: - kContextContinuationPreservedEmbedderDataOffset); - Label context_preserved_data_done(this); - GotoIf(IsUndefined(context_preserved_embedder_data), - &context_preserved_data_done); - StoreContextElement(native_context, - Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX, - context_preserved_embedder_data); - Goto(&context_preserved_data_done); - BIND(&context_preserved_data_done); + SetupContinuationPreservedEmbedderData(microtask); #endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA // Run the promise before/debug hook if enabled. @@ -350,21 +338,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( promise_or_capability); #ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA - Label isolate_preserved_data_reset_done(this); - GotoIf(IsUndefined(isolate_preserved_embedder_data), - &isolate_preserved_data_reset_done); - SetContinuationPreservedEmbedderData(UndefinedConstant()); - Goto(&isolate_preserved_data_reset_done); - BIND(&isolate_preserved_data_reset_done); - - Label context_preserved_data_reset_done(this); - GotoIf(IsUndefined(context_preserved_embedder_data), - &context_preserved_data_reset_done); - StoreContextElement(native_context, - Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX, - UndefinedConstant()); - Goto(&context_preserved_data_reset_done); - BIND(&context_preserved_data_reset_done); + ClearContinuationPreservedEmbedderData(); #endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA RewindEnteredContext(saved_entered_context_count); diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index e3befac11c7f92..7f54ca5aee7184 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -322,13 +322,11 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( // - Receiver has no interceptors Label add_dictionary_property_slow(this, Label::kDeferred); TVARIABLE(IntPtrT, var_name_index); - Label add_name_entry_find_index(this), - add_name_entry_known_index(this, &var_name_index), + Label add_name_entry(this, &var_name_index), duplicate_name(this, &var_name_index), next(this); NameDictionaryLookup( CAST(properties), name, &duplicate_name, &var_name_index, - &add_name_entry_find_index, kFindExisting, - &add_name_entry_known_index); + &add_name_entry, kFindExistingOrInsertionIndex); BIND(&duplicate_name); GotoIf(IsUndefined(capture), &next); CSA_DCHECK(this, @@ -339,12 +337,7 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( var_name_index.value(), capture); Goto(&next); - BIND(&add_name_entry_find_index); - FindInsertionEntry(CAST(properties), name, - &var_name_index); - Goto(&add_name_entry_known_index); - - BIND(&add_name_entry_known_index); + BIND(&add_name_entry); AddToDictionary(CAST(properties), name, capture, &add_dictionary_property_slow, var_name_index.value()); diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 42e594a0ce9252..1c0fca4db2c8e7 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -488,6 +488,10 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric( StoreElement(data_ptr, elements_kind, index, TruncateTaggedToWord32(context, value)); break; + case FLOAT16_ELEMENTS: + StoreElement(data_ptr, elements_kind, index, + TruncateFloat64ToFloat16(LoadHeapNumberValue(CAST(value)))); + break; case FLOAT32_ELEMENTS: StoreElement(data_ptr, elements_kind, index, TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value)))); @@ -511,12 +515,13 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromPreparedValue( TNode context, TNode typed_array, TNode index, TNode prepared_value, ElementsKind elements_kind, Label* if_detached_or_out_of_bounds) { - static_assert( - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value, - "Only Word32T, Float32T, Float64T or BigInt values are allowed"); + static_assert(std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value, + "Only Word32T, Float16T, Float32T, Float64T or BigInt values " + "are allowed"); // ToNumber/ToBigInt (or other functions called by the upper level) may // execute JavaScript code, which could detach the TypedArray's buffer or make // the TypedArray out of bounds. @@ -548,6 +553,14 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged( if_detached_or_out_of_bounds); break; } + case FLOAT16_ELEMENTS: { + auto prepared_value = PrepareValueForWriteToTypedArray( + value, elements_kind, context); + StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index, + prepared_value, elements_kind, + if_detached_or_out_of_bounds); + break; + } case FLOAT32_ELEMENTS: { auto prepared_value = PrepareValueForWriteToTypedArray( value, elements_kind, context); diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc index d3a804e97e5af2..77e9ec1aa0788c 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.cc +++ b/deps/v8/src/builtins/builtins-wasm-gen.cc @@ -74,10 +74,10 @@ TNode WasmBuiltinsAssembler::LoadTablesFromInstanceData( WasmTrustedInstanceData::kTablesOffset); } -TNode WasmBuiltinsAssembler::LoadInternalFunctionsFromInstanceData( +TNode WasmBuiltinsAssembler::LoadFuncRefsFromInstanceData( TNode trusted_data) { - return LoadObjectField( - trusted_data, WasmTrustedInstanceData::kWasmInternalFunctionsOffset); + return LoadObjectField(trusted_data, + WasmTrustedInstanceData::kFuncRefsOffset); } TNode WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstanceData( diff --git a/deps/v8/src/builtins/builtins-wasm-gen.h b/deps/v8/src/builtins/builtins-wasm-gen.h index 7dffb8f400af68..9d404787cc39e2 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.h +++ b/deps/v8/src/builtins/builtins-wasm-gen.h @@ -27,7 +27,7 @@ class WasmBuiltinsAssembler : public CodeStubAssembler { TNode LoadTablesFromInstanceData(TNode); - TNode LoadInternalFunctionsFromInstanceData( + TNode LoadFuncRefsFromInstanceData( TNode); TNode LoadManagedObjectMapsFromInstanceData( diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index 6a909d15ee7128..88e989513ff594 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -216,6 +216,8 @@ const char* Builtins::NameForStackTrace(Isolate* isolate, Builtin builtin) { return "DataView.prototype.getBigInt64"; case Builtin::kDataViewPrototypeGetBigUint64: return "DataView.prototype.getBigUint64"; + case Builtin::kDataViewPrototypeGetFloat16: + return "DataView.prototype.getFloat16"; case Builtin::kDataViewPrototypeGetFloat32: return "DataView.prototype.getFloat32"; case Builtin::kDataViewPrototypeGetFloat64: @@ -236,6 +238,8 @@ const char* Builtins::NameForStackTrace(Isolate* isolate, Builtin builtin) { return "DataView.prototype.setBigInt64"; case Builtin::kDataViewPrototypeSetBigUint64: return "DataView.prototype.setBigUint64"; + case Builtin::kDataViewPrototypeSetFloat16: + return "DataView.prototype.setFloat16"; case Builtin::kDataViewPrototypeSetFloat32: return "DataView.prototype.setFloat32"; case Builtin::kDataViewPrototypeSetFloat64: @@ -471,17 +475,18 @@ CodeEntrypointTag Builtins::EntrypointTagFor(Builtin builtin) { Kind kind = Builtins::KindOf(builtin); switch (kind) { + case CPP: + case TFJ: + return kJSEntrypointTag; case BCH: return kBytecodeHandlerEntrypointTag; + case TFC: + case TFS: case TFH: - return kICHandlerEntrypointTag; case ASM: - // TODO(saelo) consider using this approach for the other kinds as well. return CallInterfaceDescriptorFor(builtin).tag(); - default: - // TODO(saelo): use more fine-grained tags here. - return kDefaultCodeEntrypointTag; } + UNREACHABLE(); } // static diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq index a79e6816e6f5c3..34e7934963e017 100644 --- a/deps/v8/src/builtins/convert.tq +++ b/deps/v8/src/builtins/convert.tq @@ -355,6 +355,13 @@ Convert(f: float64): float32 { Convert(n: Number): float32 { return Convert(ChangeNumberToFloat64(n)); } +Convert(n: Number): float16 { + return TruncateFloat64ToFloat16(ChangeNumberToFloat64(n)); +} + +Convert(n: float16): float64 { + return ChangeFloat16ToFloat64(n); +} Convert(n: int32): float32 { return RoundInt32ToFloat32(n); } diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq index 5358fad048b083..0dce66dfaae037 100644 --- a/deps/v8/src/builtins/data-view.tq +++ b/deps/v8/src/builtins/data-view.tq @@ -24,6 +24,8 @@ macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String { return 'DataView.prototype.getUint32'; } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) { return 'DataView.prototype.getInt32'; + } else if constexpr (kind == ElementsKind::FLOAT16_ELEMENTS) { + return 'DataView.prototype.getFloat16'; } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) { return 'DataView.prototype.getFloat32'; } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) { @@ -50,6 +52,8 @@ macro MakeDataViewSetterNameString(kind: constexpr ElementsKind): String { return 'DataView.prototype.setUint32'; } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) { return 'DataView.prototype.setInt32'; + } else if constexpr (kind == ElementsKind::FLOAT16_ELEMENTS) { + return 'DataView.prototype.setFloat16'; } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) { return 'DataView.prototype.setFloat32'; } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) { @@ -205,6 +209,23 @@ macro LoadDataView32( unreachable; } } +macro LoadDataViewFloat16( + buffer: JSArrayBuffer, offset: uintptr, + requestedLittleEndian: bool): Number { + const dataPointer: RawPtr = buffer.backing_store_ptr; + const b0: uint32 = LoadUint8(dataPointer, offset); + const b1: uint32 = LoadUint8(dataPointer, offset + 1); + let result: uint32; + + if (requestedLittleEndian) { + result = (b1 << 8) | b0; + } else { + result = (b0 << 8) | b1; + } + + const floatRes: float64 = Convert(BitcastUint32ToFloat16(result)); + return Convert(floatRes); +} macro LoadDataViewFloat64( buffer: JSArrayBuffer, offset: uintptr, @@ -449,6 +470,8 @@ transitioning macro DataViewGet( return LoadDataView32(buffer, bufferIndex, littleEndian, kind); } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) { return LoadDataView32(buffer, bufferIndex, littleEndian, kind); + } else if constexpr (kind == ElementsKind::FLOAT16_ELEMENTS) { + return LoadDataViewFloat16(buffer, bufferIndex, littleEndian); } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) { return LoadDataView32(buffer, bufferIndex, littleEndian, kind); } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) { @@ -511,6 +534,15 @@ transitioning javascript builtin DataViewPrototypeGetInt32( context, receiver, offset, isLittleEndian, ElementsKind::INT32_ELEMENTS); } +transitioning javascript builtin DataViewPrototypeGetFloat16( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + const offset: JSAny = arguments[0]; + const isLittleEndian: JSAny = arguments[1]; + return DataViewGet( + context, receiver, offset, isLittleEndian, + ElementsKind::FLOAT16_ELEMENTS); +} + transitioning javascript builtin DataViewPrototypeGetFloat32( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { const offset: JSAny = arguments[0]; @@ -777,6 +809,11 @@ transitioning macro DataViewSet( StoreDataView16( buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue), littleEndian); + } else if constexpr (kind == ElementsKind::FLOAT16_ELEMENTS) { + const floatValue: float16 = TruncateFloat64ToFloat16(doubleValue); + StoreDataView16( + buffer, bufferIndex, BitcastFloat16ToUint32(floatValue), + littleEndian); } else if constexpr ( kind == ElementsKind::UINT32_ELEMENTS || kind == ElementsKind::INT32_ELEMENTS) { @@ -857,6 +894,16 @@ transitioning javascript builtin DataViewPrototypeSetInt32( ElementsKind::INT32_ELEMENTS); } +transitioning javascript builtin DataViewPrototypeSetFloat16( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + const offset: JSAny = arguments[0]; + const value: JSAny = arguments[1]; + const isLittleEndian: JSAny = arguments[2]; + return DataViewSet( + context, receiver, offset, value, isLittleEndian, + ElementsKind::FLOAT16_ELEMENTS); +} + transitioning javascript builtin DataViewPrototypeSetFloat32( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { const offset: JSAny = arguments[0]; diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 7e29337e1bc8a8..1e6ddbaef78611 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -4085,7 +4085,8 @@ void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, int edi_slot_index) { Immediate(ER::isolate_address(masm->isolate()))); __ mov(Operand(esp, 1 * kSystemPointerSize), kOldSPRegister); - __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2); + __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2, + SetIsolateDataSlots::kNo); __ mov(central_stack_sp, kReturnRegister0); __ pop(kRuntimeCallFunctionRegister); @@ -4131,7 +4132,8 @@ void SwitchFromTheCentralStackIfNeeded(MacroAssembler* masm) { __ PrepareCallCFunction(1, ecx); __ Move(Operand(esp, 0 * kSystemPointerSize), Immediate(ER::isolate_address(masm->isolate()))); - __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1); + __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1, + SetIsolateDataSlots::kNo); __ pop(kReturnRegister1); __ pop(kReturnRegister0); @@ -4277,7 +4279,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(0)); // argv. __ Move(esi, Immediate(ER::isolate_address(masm->isolate()))); __ mov(Operand(esp, 2 * kSystemPointerSize), esi); - __ CallCFunction(find_handler, 3); + __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo); } // Retrieve the handler context, SP and FP. @@ -4434,7 +4436,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -4494,7 +4497,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, __ PushRoot(RootIndex::kUndefinedValue); // kNewTarget switch (mode) { case CallApiCallbackMode::kGeneric: - __ push(FieldOperand(callback, CallHandlerInfo::kDataOffset)); + __ push( + FieldOperand(callback, FunctionTemplateInfo::kCallbackDataOffset)); break; case CallApiCallbackMode::kOptimizedNoProfiling: @@ -4548,13 +4552,13 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ push(FieldOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + __ push(callback); __ PushReturnAddressFrom(argc); __ mov(api_function_address, FieldOperand(callback, - CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + FunctionTemplateInfo::kMaybeRedirectedCallbackOffset)); __ EnterExitFrame(kApiArgc + kApiStackSpace, StackFrame::API_CALLBACK_EXIT, api_function_address); @@ -4692,7 +4696,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { static constexpr int kNameOnStackSize = 1; static constexpr int kStackUnwindSpace = PCA::kArgsLength + kNameOnStackSize; - // The API function takes a name handle and v8::PropertyCallbackInfo + // The API function takes a name local handle and v8::PropertyCallbackInfo // reference, allocate them in non-GCed space of the exit frame. static constexpr int kApiArgc = 2; static constexpr int kApiArg0Offset = 0 * kSystemPointerSize; @@ -4714,8 +4718,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { Operand info_object = ExitFrameStackSlotOperand(kApiArgsSize); __ mov(info_object, args_array); - __ RecordComment("Handle"); + __ RecordComment("Local"); +#ifdef V8_ENABLE_DIRECT_LOCAL + __ mov(args_array, Operand(args_array, -kSystemPointerSize)); +#else __ sub(args_array, Immediate(kSystemPointerSize)); +#endif __ mov(ExitFrameStackSlotOperand(kApiArg0Offset), args_array); args_array = no_reg; __ RecordComment("&v8::PropertyCallbackInfo::args_"); diff --git a/deps/v8/src/builtins/js-to-js.tq b/deps/v8/src/builtins/js-to-js.tq index 691271e9caf0c5..5c78db71f9af9b 100644 --- a/deps/v8/src/builtins/js-to-js.tq +++ b/deps/v8/src/builtins/js-to-js.tq @@ -4,8 +4,6 @@ namespace runtime { extern runtime IsWasmExternalFunction(NoContext, JSAny): Boolean; -extern runtime TierUpJSToJSWrapper( - NoContext, WasmApiFunctionRef, WasmFunctionData): JSAny; } // namespace runtime namespace wasm { @@ -77,12 +75,6 @@ transitioning javascript builtin JSToJSWrapper( UnsafeCast(target.shared_function_info.function_data); const ref = UnsafeCast(functionData.internal.ref); - dcheck(ref.wrapper_budget > 0); - ref.wrapper_budget = ref.wrapper_budget - 1; - if (ref.wrapper_budget == 0) { - runtime::TierUpJSToJSWrapper(kNoContext, ref, functionData); - } - const signaturePod = &ref.sig.bytes; const serializedSig = torque_internal::unsafe::NewConstSlice( signaturePod.object, signaturePod.offset, diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index 7ac1b6d083deec..ed3b109840f352 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -346,9 +346,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, #endif // !V8_JITLESS __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); - __ LoadTrustedPointerField( - bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset), - kBytecodeArrayIndirectPointerTag); + __ LoadProtectedPointerField( + bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset)); __ bind(&done); } @@ -1334,7 +1333,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ Move(a2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(a2, closure); - __ JumpCodeObject(a2); + __ JumpCodeObject(a2, kJSEntrypointTag); __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -1712,9 +1711,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ JumpIfObjectType(&builtin_trampoline, ne, t0, INTERPRETER_DATA_TYPE, kInterpreterDispatchTableRegister); - __ LoadTaggedField( + __ LoadProtectedPointerField( t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); - __ LoadCodeInstructionStart(t0, t0); + __ LoadCodeInstructionStart(t0, t0, kJSEntrypointTag); __ Branch(&trampoline_loaded); __ bind(&builtin_trampoline); @@ -1971,7 +1970,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ LoadTaggedField( + __ LoadProtectedPointerField( a1, MemOperand(maybe_target_code, Code::kDeoptimizationDataOrInterpreterDataOffset - kHeapObjectTag)); @@ -1979,11 +1978,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load the OSR entrypoint offset from the deoptimization data. // = [#header_size + #osr_pc_offset] __ SmiUntagField(a1, - MemOperand(a1, FixedArray::OffsetOfElementAt( + MemOperand(a1, TrustedFixedArray::OffsetOfElementAt( DeoptimizationData::kOsrPcOffsetIndex) - kHeapObjectTag)); - __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code); + __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code, + kJSEntrypointTag); // Compute the target address = code_entry + osr_offset // = + @@ -3152,7 +3152,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mov(a0, zero_reg); __ mov(a1, zero_reg); __ li(a2, ExternalReference::isolate_address(masm->isolate())); - __ CallCFunction(find_handler, 3); + __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo); } // Retrieve the handler context, SP and FP. @@ -3286,7 +3286,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -3359,7 +3360,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, switch (mode) { case CallApiCallbackMode::kGeneric: __ LoadTaggedField( - scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + scratch2, + FieldMemOperand(callback, FunctionTemplateInfo::kCallbackDataOffset)); __ St_d(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); break; @@ -3412,16 +3414,13 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ LoadTaggedField( - scratch, - FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); - __ St_d(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + __ St_d(callback, MemOperand(sp, 0 * kSystemPointerSize)); __ LoadExternalPointerField( api_function_address, FieldMemOperand(callback, - CallHandlerInfo::kMaybeRedirectedCallbackOffset), - kCallHandlerInfoCallbackTag); + FunctionTemplateInfo::kMaybeRedirectedCallbackOffset), + kFunctionTemplateInfoCallbackTag); __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); } else { @@ -3556,8 +3555,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ RecordComment( "Load address of v8::PropertyAccessorInfo::args_ array and name handle."); - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ mov(name_arg, scratch); + USE(kNameStackIndex); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ Add_d(name_arg, sp, Operand(kNameStackIndex * kSystemPointerSize)); +#endif // property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow) __ Add_d(property_callback_info_arg, sp, Operand(kPCAStackIndex * kSystemPointerSize)); @@ -3951,7 +3956,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ PrepareCallCFunction(3, 0, a4); __ CallCFunction(get_baseline_pc, 3, 0); } - __ LoadCodeInstructionStart(code_obj, code_obj); + __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag); __ Add_d(code_obj, code_obj, kReturnRegister0); __ Pop(kInterpreterAccumulatorRegister); diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq index 5a64d533517ee3..edffab76c74490 100644 --- a/deps/v8/src/builtins/math.tq +++ b/deps/v8/src/builtins/math.tq @@ -282,6 +282,14 @@ transitioning javascript builtin MathFround( return Convert(x64); } +// ES6 #sec-math.f16round +transitioning javascript builtin MathF16round( + js-implicit context: NativeContext)(x: JSAny): Number { + const x16 = Convert(ToNumber_Inline(x)); + const x64 = Convert(x16); + return Convert(x64); +} + // ES6 #sec-math.imul transitioning javascript builtin MathImul( js-implicit context: NativeContext)(x: JSAny, y: JSAny): Number { diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 311b0ddffa5217..10d6990961bfee 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -1277,7 +1277,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ Move(a2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(a2, closure, t0, t1); - __ JumpCodeObject(a2); + __ JumpCodeObject(a2, kJSEntrypointTag); __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -1653,7 +1653,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { Operand(INTERPRETER_DATA_TYPE)); __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); - __ LoadCodeInstructionStart(t0, t0); + __ LoadCodeInstructionStart(t0, t0, kJSEntrypointTag); __ Branch(&trampoline_loaded); __ bind(&builtin_trampoline); @@ -1916,7 +1916,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, DeoptimizationData::kOsrPcOffsetIndex) - kHeapObjectTag)); - __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code); + __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code, + kJSEntrypointTag); // Compute the target address = code_entry + osr_offset // = + @@ -3268,7 +3269,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -3340,7 +3342,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // kData. switch (mode) { case CallApiCallbackMode::kGeneric: - __ Ld(scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ Ld(scratch2, FieldMemOperand( + callback, FunctionTemplateInfo::kCallbackDataOffset)); __ Sd(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); break; @@ -3393,13 +3396,11 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ Ld(scratch, - FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); - __ Sd(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + __ Sd(callback, MemOperand(sp, 0 * kSystemPointerSize)); __ Ld(api_function_address, - FieldMemOperand(callback, - CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + FieldMemOperand( + callback, FunctionTemplateInfo::kMaybeRedirectedCallbackOffset)); __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); } else { @@ -3533,8 +3534,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ RecordComment( "Load address of v8::PropertyAccessorInfo::args_ array and name handle."); - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ mov(name_arg, scratch); + USE(kNameStackIndex); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ Daddu(name_arg, sp, Operand(kNameStackIndex * kSystemPointerSize)); +#endif // property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow) __ Daddu(property_callback_info_arg, sp, Operand(kPCAStackIndex * kSystemPointerSize)); @@ -3932,7 +3939,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ PrepareCallCFunction(3, 0, a4); __ CallCFunction(get_baseline_pc, 3, 0); } - __ LoadCodeInstructionStart(code_obj, code_obj); + __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag); __ Daddu(code_obj, code_obj, kReturnRegister0); __ Pop(kInterpreterAccumulatorRegister); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 62f7b35518b682..5d138cf817a131 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -3382,7 +3382,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ li(r3, Operand::Zero()); __ li(r4, Operand::Zero()); __ Move(r5, ExternalReference::isolate_address(masm->isolate())); - __ CallCFunction(find_handler, 3); + __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo); } // Retrieve the handler context, SP and FP. @@ -3574,7 +3574,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -3646,7 +3647,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, switch (mode) { case CallApiCallbackMode::kGeneric: __ LoadTaggedField( - scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset), + scratch2, + FieldMemOperand(callback, FunctionTemplateInfo::kCallbackDataOffset), r0); __ StoreU64(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); @@ -3707,16 +3709,13 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ LoadTaggedField( - scratch, - FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset), r0); - __ StoreU64(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + __ StoreU64(callback, MemOperand(sp, 0 * kSystemPointerSize)); __ LoadExternalPointerField( api_function_address, FieldMemOperand(callback, - CallHandlerInfo::kMaybeRedirectedCallbackOffset), - kCallHandlerInfoCallbackTag, no_reg, scratch); + FunctionTemplateInfo::kMaybeRedirectedCallbackOffset), + kFunctionTemplateInfoCallbackTag, no_reg, scratch); __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); } else { @@ -3834,8 +3833,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { "Load address of v8::PropertyAccessorInfo::args_ array and name handle."); // Load address of v8::PropertyAccessorInfo::args_ array and name handle. - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ mr(name_arg, scratch); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ mr(name_arg, sp); +#endif + // property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow) __ addi(property_callback_info_arg, name_arg, Operand(1 * kSystemPointerSize)); diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq index fdbc6faa9776e0..834007399ffdec 100644 --- a/deps/v8/src/builtins/promise-abstract-operations.tq +++ b/deps/v8/src/builtins/promise-abstract-operations.tq @@ -128,6 +128,10 @@ transitioning macro MorphAndEnqueuePromiseReaction( static_assert( kPromiseReactionPromiseOrCapabilityOffset == kPromiseReactionJobTaskPromiseOrCapabilityOffset); + @if(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) + static_assert( + kPromiseReactionContinuationPreservedEmbedderDataOffset == + kPromiseReactionJobTaskContinuationPreservedEmbedderDataOffset); } else { static_assert(reactionType == kPromiseReactionReject); *UnsafeConstCast(&promiseReaction.map) = @@ -141,6 +145,10 @@ transitioning macro MorphAndEnqueuePromiseReaction( static_assert( kPromiseReactionPromiseOrCapabilityOffset == kPromiseReactionJobTaskPromiseOrCapabilityOffset); + @if(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) + static_assert( + kPromiseReactionContinuationPreservedEmbedderDataOffset == + kPromiseReactionJobTaskContinuationPreservedEmbedderDataOffset); } } @@ -450,13 +458,11 @@ transitioning macro PerformPromiseThenImpl( // PromiseReaction holding both the onFulfilled and onRejected callbacks. // Once the {promise} is resolved we decide on the concrete handler to // push onto the microtask queue. - const handlerContext = ExtractHandlerContext(onFulfilled, onRejected); const promiseReactions = UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result); const reaction = NewPromiseReaction( - handlerContext, promiseReactions, resultPromiseOrCapability, - onFulfilled, onRejected); + promiseReactions, resultPromiseOrCapability, onFulfilled, onRejected); promise.reactions_or_result = reaction; } else { const reactionsOrResult = promise.reactions_or_result; diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq index 6b167839f107b9..b769435d6386bc 100644 --- a/deps/v8/src/builtins/promise-misc.tq +++ b/deps/v8/src/builtins/promise-misc.tq @@ -77,19 +77,14 @@ macro NewPromiseFulfillReactionJobTask( promiseOrCapability: JSPromise|PromiseCapability| Undefined): PromiseFulfillReactionJobTask { @if(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) { - const isolateContinuationData = GetContinuationPreservedEmbedderData(); - const nativeContext = LoadNativeContext(handlerContext); return new PromiseFulfillReactionJobTask{ map: PromiseFulfillReactionJobTaskMapConstant(), + continuation_preserved_embedder_data: + GetContinuationPreservedEmbedderData(), argument, context: handlerContext, handler, - promise_or_capability: promiseOrCapability, - isolate_continuation_preserved_embedder_data: isolateContinuationData, - context_continuation_preserved_embedder_data: - *ContextSlot( - nativeContext, - ContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX) + promise_or_capability: promiseOrCapability }; } @@ -110,19 +105,14 @@ macro NewPromiseRejectReactionJobTask( promiseOrCapability: JSPromise|PromiseCapability| Undefined): PromiseRejectReactionJobTask { @if(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) { - const isolateContinuationData = GetContinuationPreservedEmbedderData(); - const nativeContext = LoadNativeContext(handlerContext); return new PromiseRejectReactionJobTask{ map: PromiseRejectReactionJobTaskMapConstant(), + continuation_preserved_embedder_data: + GetContinuationPreservedEmbedderData(), argument, context: handlerContext, handler, - promise_or_capability: promiseOrCapability, - isolate_continuation_preserved_embedder_data: isolateContinuationData, - context_continuation_preserved_embedder_data: - *ContextSlot( - nativeContext, - ContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX) + promise_or_capability: promiseOrCapability }; } @@ -305,30 +295,23 @@ transitioning macro NewJSPromise( } macro NewPromiseReaction( - implicit context: Context)(handlerContext: Context, - next: Zero|PromiseReaction, + implicit context: Context)(next: Zero|PromiseReaction, promiseOrCapability: JSPromise|PromiseCapability|Undefined, fulfillHandler: Callable|Undefined, rejectHandler: Callable|Undefined): PromiseReaction { @if(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) { - const isolateContinuationData = GetContinuationPreservedEmbedderData(); - const nativeContext = LoadNativeContext(handlerContext); return new PromiseReaction{ map: PromiseReactionMapConstant(), + continuation_preserved_embedder_data: + GetContinuationPreservedEmbedderData(), next: next, reject_handler: rejectHandler, fulfill_handler: fulfillHandler, - promise_or_capability: promiseOrCapability, - isolate_continuation_preserved_embedder_data: isolateContinuationData, - context_continuation_preserved_embedder_data: - *ContextSlot( - nativeContext, - ContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX) + promise_or_capability: promiseOrCapability }; } @ifnot(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) { - dcheck(IsContext(handlerContext)); return new PromiseReaction{ map: PromiseReactionMapConstant(), next: next, @@ -360,13 +343,27 @@ macro NewPromiseResolveThenableJobTask( // 1. Let job be a new Job abstract closure with no parameters that // captures promiseToResolve, thenable, and then... // 5. Return { [[Job]]: job, [[Realm]]: thenRealm }. - return new PromiseResolveThenableJobTask{ - map: PromiseResolveThenableJobTaskMapConstant(), - context: nativeContext, - promise_to_resolve: promiseToResolve, - thenable, - then - }; + @if(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) { + return new PromiseResolveThenableJobTask{ + map: PromiseResolveThenableJobTaskMapConstant(), + continuation_preserved_embedder_data: + GetContinuationPreservedEmbedderData(), + context: nativeContext, + promise_to_resolve: promiseToResolve, + thenable, + then + }; + } + + @ifnot(V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA) { + return new PromiseResolveThenableJobTask{ + map: PromiseResolveThenableJobTaskMapConstant(), + context: nativeContext, + promise_to_resolve: promiseToResolve, + thenable, + then + }; + } } struct InvokeThenOneArgFunctor { diff --git a/deps/v8/src/builtins/riscv/builtins-riscv.cc b/deps/v8/src/builtins/riscv/builtins-riscv.cc index 94593f08920567..9ef4d5f82df266 100644 --- a/deps/v8/src/builtins/riscv/builtins-riscv.cc +++ b/deps/v8/src/builtins/riscv/builtins-riscv.cc @@ -50,6 +50,7 @@ enum class ArgumentsElementType { void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, Register scratch, Register scratch2, ArgumentsElementType element_type) { + ASM_CODE_COMMENT(masm); DCHECK(!AreAliased(array, argc, scratch)); Label loop, entry; __ SubWord(scratch, argc, Operand(kJSArgcReceiverSlots)); @@ -314,22 +315,50 @@ static void AssertCodeIsBaseline(MacroAssembler* masm, Register code, __ Assert(eq, AbortReason::kExpectedBaselineData, scratch, Operand(static_cast(CodeKind::BASELINE))); } + +// Equivalent of SharedFunctionInfo::GetData +static void GetSharedFunctionInfoData(MacroAssembler* masm, Register data, + Register sfi, Register scratch) { + ASM_CODE_COMMENT(masm); +#ifdef V8_ENABLE_SANDBOX + DCHECK(!AreAliased(data, scratch)); + DCHECK(!AreAliased(sfi, scratch)); + // Use trusted_function_data if non-empy, otherwise the regular function_data. + Label use_tagged_field, done; + __ Lwu(scratch, + FieldMemOperand(sfi, SharedFunctionInfo::kTrustedFunctionDataOffset)); + __ Branch(&use_tagged_field, eq, scratch, Operand(zero_reg)); + __ ResolveIndirectPointerHandle(data, scratch, kUnknownIndirectPointerTag); + __ Branch(&done); + __ bind(&use_tagged_field); + __ LoadTaggedField( + data, FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset)); + __ bind(&done); +#else + __ LoadTaggedField( + data, FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset)); +#endif // V8_ENABLE_SANDBOX +} // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under // the more general dispatch. static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, - Register sfi_data, + Register sfi, + Register bytecode, Register scratch1, Label* is_baseline) { + DCHECK(!AreAliased(bytecode, scratch1)); ASM_CODE_COMMENT(masm); Label done; - __ GetObjectType(sfi_data, scratch1, scratch1); + Register data = bytecode; + GetSharedFunctionInfoData(masm, data, sfi, scratch1); + __ GetObjectType(data, scratch1, scratch1); #ifndef V8_JITLESS if (v8_flags.debug_code) { Label not_baseline; __ Branch(¬_baseline, ne, scratch1, Operand(CODE_TYPE)); - AssertCodeIsBaseline(masm, sfi_data, scratch1); + AssertCodeIsBaseline(masm, data, scratch1); __ Branch(is_baseline); __ bind(¬_baseline); } else { @@ -337,11 +366,9 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, } #endif // !V8_JITLESS - __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE), - Label::Distance::kNear); - __ LoadTaggedField( - sfi_data, - FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); + __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); + __ LoadProtectedPointerField( + bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset)); __ bind(&done); } @@ -429,13 +456,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Underlying function needs to have bytecode available. if (v8_flags.debug_code) { Label is_baseline; + Register sfi = a3; + Register bytecode = a3; __ LoadTaggedField( - a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedField( - a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); - GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline); - __ GetObjectType(a3, a3, a3); - __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, + sfi, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); + GetSharedFunctionInfoBytecodeOrBaseline(masm, sfi, bytecode, t5, + &is_baseline); + __ GetObjectType(a3, a3, bytecode); + __ Assert(eq, AbortReason::kMissingBytecodeArray, bytecode, Operand(BYTECODE_ARRAY_TYPE)); __ bind(&is_baseline); } @@ -1112,16 +1140,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( Register closure = a1; // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. + Register sfi = a4; __ LoadTaggedField( - kScratchReg, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - ResetSharedFunctionInfoAge(masm, kScratchReg); - __ LoadTaggedField( - kInterpreterBytecodeArrayRegister, - FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); + sfi, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, sfi); + Label is_baseline; GetSharedFunctionInfoBytecodeOrBaseline( - masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline); + masm, sfi, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline); // The bytecode array could have been flushed from the shared function info, // if so, call into CompileLazy. @@ -1328,7 +1354,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ Move(a2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(a2, closure); - __ JumpCodeObject(a2); + __ JumpCodeObject(a2, kJSEntrypointTag); __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -1699,16 +1725,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); __ LoadTaggedField( t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedField( - t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); + GetSharedFunctionInfoData(masm, t0, t0, t1); __ GetObjectType(t0, kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister); __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear); - __ LoadTaggedField( + __ LoadProtectedPointerField( t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); - __ LoadCodeInstructionStart(t0, t0); + __ LoadCodeInstructionStart(t0, t0, kJSEntrypointTag); __ BranchShort(&trampoline_loaded); __ bind(&builtin_trampoline); @@ -1967,18 +1992,17 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ LoadTaggedField( - a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset - - kHeapObjectTag)); + __ LoadProtectedPointerField( + a1, FieldMemOperand(maybe_target_code, + Code::kDeoptimizationDataOrInterpreterDataOffset)); // Load the OSR entrypoint offset from the deoptimization data. // = [#header_size + #osr_pc_offset] - __ SmiUntagField(a1, - MemOperand(a1, FixedArray::OffsetOfElementAt( - DeoptimizationData::kOsrPcOffsetIndex) - - kHeapObjectTag)); + __ SmiUntagField( + a1, FieldMemOperand(a1, TrustedFixedArray::OffsetOfElementAt( + DeoptimizationData::kOsrPcOffsetIndex))); - __ LoadCodeInstructionStart(a0, a0); + __ LoadCodeInstructionStart(a0, a0, kJSEntrypointTag); // Compute the target address = code_entry + osr_offset // = + @@ -3085,7 +3109,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Move(a0, zero_reg); __ Move(a1, zero_reg); __ li(a2, ExternalReference::isolate_address(masm->isolate())); - __ CallCFunction(find_handler, 3); + __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo); } // Retrieve the handler context, SP and FP. @@ -3270,7 +3294,27 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { } void Builtins::Generate_WasmTrapHandlerLandingPad(MacroAssembler* masm) { - __ Trap(); + // This builtin gets called from the WebAssembly trap handler when an + // out-of-bounds memory access happened or when a null reference gets + // dereferenced. This builtin then fakes a call from the instruction that + // triggered the signal to the runtime. This is done by setting a return + // address and then jumping to a builtin which will call further to the + // runtime. + // As the return address we use the fault address + 1. Using the fault address + // itself would cause problems with safepoints and source positions. + // + // The problem with safepoints is that a safepoint has to be registered at the + // return address, and that at most one safepoint should be registered at a + // location. However, there could already be a safepoint registered at the + // fault address if the fault address is the return address of a call. + // + // The problem with source positions is that the stack trace code looks for + // the source position of a call before the return address. The source + // position of the faulty memory access, however, is recorded at the fault + // address. Therefore the stack trace code would not find the source position + // if we used the fault address as the return address. + __ AddWord(ra, kWasmTrapHandlerFaultAddressRegister, 1); + __ TailCallBuiltin(Builtin::kWasmTrapHandlerThrowTrap); } void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { @@ -3330,7 +3374,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -3402,7 +3447,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, switch (mode) { case CallApiCallbackMode::kGeneric: __ LoadTaggedField( - scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + scratch2, + FieldMemOperand(callback, FunctionTemplateInfo::kCallbackDataOffset)); __ StoreWord(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); break; @@ -3458,16 +3504,13 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ LoadTaggedField( - scratch, - FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); - __ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + __ StoreWord(callback, MemOperand(sp, 0 * kSystemPointerSize)); __ LoadExternalPointerField( api_function_address, FieldMemOperand(callback, - CallHandlerInfo::kMaybeRedirectedCallbackOffset), - kCallHandlerInfoCallbackTag); + FunctionTemplateInfo::kMaybeRedirectedCallbackOffset), + kFunctionTemplateInfoCallbackTag); __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); } else { @@ -3608,8 +3651,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ RecordComment( "Load address of v8::PropertyAccessorInfo::args_ array and name handle."); - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ Move(name_arg, scratch); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ Move(name_arg, sp); +#endif // property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow) __ AddWord(property_callback_info_arg, name_arg, Operand(1 * kSystemPointerSize)); @@ -3913,9 +3961,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ResetSharedFunctionInfoAge(masm, code_obj); } - __ LoadTaggedField( - code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + GetSharedFunctionInfoData(masm, code_obj, code_obj, t2); // Check if we have baseline code. For OSR entry it is safe to assume we // always have baseline code. @@ -3957,10 +4003,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. - UseScratchRegisterScope temps(masm); - Register type = temps.Acquire(); - __ GetObjectType(feedback_vector, type, type); - __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE)); + { + UseScratchRegisterScope temps(masm); + Register type = temps.Acquire(); + __ GetObjectType(feedback_vector, type, type); + __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE)); + } // Save BytecodeOffset from the stack frame. __ SmiUntag(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); @@ -4020,7 +4068,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ PrepareCallCFunction(3, 0, a4); __ CallCFunction(get_baseline_pc, 3, 0); } - __ LoadCodeInstructionStart(code_obj, code_obj); + __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag); __ AddWord(code_obj, code_obj, kReturnRegister0); __ Pop(kInterpreterAccumulatorRegister); diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index d6b19ab4c471de..b6d38b35cc65b5 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -3297,7 +3297,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mov(r2, Operand::Zero()); __ mov(r3, Operand::Zero()); __ Move(r4, ExternalReference::isolate_address(masm->isolate())); - __ CallCFunction(find_handler, 3); + __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo); } // Retrieve the handler context, SP and FP. @@ -3479,7 +3479,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -3551,7 +3552,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, switch (mode) { case CallApiCallbackMode::kGeneric: __ LoadTaggedField( - scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + scratch2, + FieldMemOperand(callback, FunctionTemplateInfo::kCallbackDataOffset)); __ StoreU64(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); break; @@ -3612,14 +3614,12 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ LoadTaggedField( - scratch, - FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); - __ StoreU64(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + __ StoreU64(callback, MemOperand(sp, 0 * kSystemPointerSize)); - __ LoadU64(api_function_address, - FieldMemOperand( - callback, CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + __ LoadU64( + api_function_address, + FieldMemOperand(callback, + FunctionTemplateInfo::kMaybeRedirectedCallbackOffset)); __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); } else { @@ -3733,8 +3733,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ Push(smi_zero, scratch); __ RecordComment( "Load address of v8::PropertyAccessorInfo::args_ array and name handle."); - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ mov(name_arg, scratch); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ mov(name_arg, sp); +#endif // property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow) __ AddS64(property_callback_info_arg, name_arg, Operand(1 * kSystemPointerSize)); diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index bcb293eb542437..4f49b737371dc7 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -14,6 +14,7 @@ type Uint16Elements extends ElementsKind; type Int16Elements extends ElementsKind; type Uint32Elements extends ElementsKind; type Int32Elements extends ElementsKind; +type Float16Elements extends ElementsKind; type Float32Elements extends ElementsKind; type Float64Elements extends ElementsKind; type Uint8ClampedElements extends ElementsKind; @@ -146,6 +147,8 @@ macro GetTypedArrayAccessor(elementsKindParam: ElementsKind): if (IsElementsKindGreaterThan(elementsKind, ElementsKind::UINT32_ELEMENTS)) { if (elementsKind == ElementsKind::INT32_ELEMENTS) { return GetTypedArrayAccessor(); + } else if (elementsKind == ElementsKind::FLOAT16_ELEMENTS) { + return GetTypedArrayAccessor(); } else if (elementsKind == ElementsKind::FLOAT32_ELEMENTS) { return GetTypedArrayAccessor(); } else if (elementsKind == ElementsKind::FLOAT64_ELEMENTS) { @@ -266,6 +269,9 @@ KindForArrayType(): constexpr ElementsKind { KindForArrayType(): constexpr ElementsKind { return ElementsKind::INT32_ELEMENTS; } +KindForArrayType(): constexpr ElementsKind { + return ElementsKind::FLOAT16_ELEMENTS; +} KindForArrayType(): constexpr ElementsKind { return ElementsKind::FLOAT32_ELEMENTS; } diff --git a/deps/v8/src/builtins/wasm-strings.tq b/deps/v8/src/builtins/wasm-strings.tq index fb103155db439f..7d0e88381db699 100644 --- a/deps/v8/src/builtins/wasm-strings.tq +++ b/deps/v8/src/builtins/wasm-strings.tq @@ -30,8 +30,8 @@ transitioning javascript builtin WebAssemblyStringFromWtf16Array( js-implicit context: Context)(...arguments): JSAny { const array = WasmCastToSpecialPrimitiveArray(context, arguments[0], SmiConstant(16)); - const start = ChangeNumberToUint32(ToInteger_Inline(arguments[1])); - const end = ChangeNumberToUint32(ToInteger_Inline(arguments[2])); + const start = NumberToUint32(ToNumber_Inline(arguments[1])); + const end = NumberToUint32(ToNumber_Inline(arguments[2])); return wasm::WasmStringNewWtf16Array(array, start, end); } @@ -42,8 +42,8 @@ transitioning javascript builtin WebAssemblyStringFromUtf8Array( js-implicit context: Context)(...arguments): JSAny { const array = WasmCastToSpecialPrimitiveArray(context, arguments[0], SmiConstant(8)); - const start = ChangeNumberToUint32(ToInteger_Inline(arguments[1])); - const end = ChangeNumberToUint32(ToInteger_Inline(arguments[2])); + const start = NumberToUint32(ToNumber_Inline(arguments[1])); + const end = NumberToUint32(ToNumber_Inline(arguments[2])); return wasm::WasmStringNewWtf8Array( start, end, array, SmiConstant(kLossyUtf8)); } @@ -54,7 +54,7 @@ transitioning javascript builtin WebAssemblyStringIntoUtf8Array( const string = Cast(arguments[0]) otherwise goto IllegalCast; const array = WasmCastToSpecialPrimitiveArray(context, arguments[1], SmiConstant(8)); - const start = ChangeNumberToUint32(ToInteger_Inline(arguments[2])); + const start = NumberToUint32(ToNumber_Inline(arguments[2])); return runtime::WasmStringEncodeWtf8Array( context, SmiConstant(kLossyUtf8), string, array, ChangeUint32ToTagged(start)); @@ -79,7 +79,7 @@ transitioning javascript builtin WebAssemblyStringToWtf16Array( const string = Cast(arguments[0]) otherwise goto IllegalCast; const array = WasmCastToSpecialPrimitiveArray(context, arguments[1], SmiConstant(16)); - const start = ChangeNumberToUint32(ToInteger_Inline(arguments[2])); + const start = NumberToUint32(ToNumber_Inline(arguments[2])); const written = wasm::WasmStringEncodeWtf16Array(string, array, start); return Convert(written); } label IllegalCast deferred { @@ -89,14 +89,14 @@ transitioning javascript builtin WebAssemblyStringToWtf16Array( transitioning javascript builtin WebAssemblyStringFromCharCode( js-implicit context: Context)(...arguments): JSAny { - const code = ChangeNumberToUint32(ToInteger_Inline(arguments[0])); + const code = NumberToUint32(ToNumber_Inline(arguments[0])); return StringFromSingleCharCode(%RawDownCast(code & 0xFFFF)); } transitioning javascript builtin WebAssemblyStringFromCodePoint( js-implicit context: Context)(...arguments): JSAny { - const code = ToInteger_Inline(arguments[0]); - const codeUint = ChangeNumberToUint32(code); + const code = ToNumber_Inline(arguments[0]); + const codeUint = NumberToUint32(code); if (codeUint <= 0xFFFF) { return StringFromSingleCharCode(%RawDownCast(codeUint)); } @@ -107,7 +107,7 @@ transitioning javascript builtin WebAssemblyStringCodePointAt( js-implicit context: Context)(...arguments): JSAny { try { const string = Cast(arguments[0]) otherwise goto IllegalCast; - const index = ChangeNumberToUint32(ToInteger_Inline(arguments[1])); + const index = NumberToUint32(ToNumber_Inline(arguments[1])); if (index >= Unsigned(string.length)) goto OOB; const code: int32 = string::LoadSurrogatePairAt( string, string.length_intptr, Signed(Convert(index)), @@ -124,7 +124,7 @@ transitioning javascript builtin WebAssemblyStringCharCodeAt( js-implicit context: Context)(...arguments): JSAny { try { const string = Cast(arguments[0]) otherwise goto IllegalCast; - const index = ChangeNumberToUint32(ToInteger_Inline(arguments[1])); + const index = NumberToUint32(ToNumber_Inline(arguments[1])); if (index >= Unsigned(string.length)) goto OOB; const code: char16 = StringCharCodeAt(string, Convert(index)); return SmiTag(code); @@ -171,8 +171,8 @@ transitioning javascript builtin WebAssemblyStringSubstring( js-implicit context: Context)(...arguments): JSAny { try { const string = Cast(arguments[0]) otherwise goto IllegalCast; - const start = ChangeNumberToUint32(ToInteger_Inline(arguments[1])); - const end = ChangeNumberToUint32(ToInteger_Inline(arguments[2])); + const start = NumberToUint32(ToNumber_Inline(arguments[1])); + const end = NumberToUint32(ToNumber_Inline(arguments[2])); return wasm::WasmStringViewWtf16Slice(string, start, end); } label IllegalCast deferred { Trap(context, MessageTemplate::kWasmTrapIllegalCast); diff --git a/deps/v8/src/builtins/wasm-to-js.tq b/deps/v8/src/builtins/wasm-to-js.tq index 87e43f6f7835c3..bb228e34f124d3 100644 --- a/deps/v8/src/builtins/wasm-to-js.tq +++ b/deps/v8/src/builtins/wasm-to-js.tq @@ -56,6 +56,7 @@ macro HandleF32Returns( @export transitioning macro WasmToJSWrapper(ref: WasmApiFunctionRef): WasmToJSResult { + dcheck(Is(ref)); // Spill the signature on the stack so that it can be read by the GC. This is // done in the very beginning before a GC could be triggered. // Caller FP + return address. diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index 4179bcb7fd474e..f1dfe08960e9ff 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -3,6 +3,7 @@ // found in the LICENSE file. #include 'src/builtins/builtins-wasm-gen.h' +#include 'src/builtins/builtins-call-gen.h' namespace runtime { extern runtime WasmMemoryGrow(Context, WasmTrustedInstanceData, Smi, Smi): Smi; @@ -45,7 +46,7 @@ extern runtime WasmArrayCopy(Context, WasmArray, Smi, WasmArray, Smi, Smi): extern runtime WasmArrayNewSegment( Context, WasmTrustedInstanceData, Smi, Smi, Smi, Map): Object; extern runtime WasmStringNewSegmentWtf8( - Context, WasmTrustedInstanceData, Smi, Smi, Smi): String; + Context, WasmTrustedInstanceData, Smi, Smi, Smi, Smi): String; extern runtime WasmArrayInitSegment( Context, WasmTrustedInstanceData, Smi, WasmArray, Smi, Smi, Smi): JSAny; extern runtime WasmStringNewWtf8( @@ -82,6 +83,13 @@ extern macro Allocate(intptr): HeapObject; extern macro Allocate(intptr, constexpr AllocationFlag): HeapObject; } +macro NumberToInt32(input: Number): int32 { + return Convert(input); +} +macro NumberToUint32(input: Number): uint32 { + return Unsigned(Convert(input)); +} + namespace wasm { const kAnyType: constexpr int31 generates 'wasm::kWasmAnyRef.raw_bit_field()'; @@ -100,7 +108,7 @@ extern macro WasmBuiltinsAssembler::LoadContextFromInstanceData( WasmTrustedInstanceData): NativeContext; extern macro WasmBuiltinsAssembler::LoadTablesFromInstanceData( WasmTrustedInstanceData): FixedArray; -extern macro WasmBuiltinsAssembler::LoadInternalFunctionsFromInstanceData( +extern macro WasmBuiltinsAssembler::LoadFuncRefsFromInstanceData( WasmTrustedInstanceData): FixedArray; extern macro WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstanceData( WasmTrustedInstanceData): FixedArray; @@ -118,18 +126,18 @@ builtin WasmInt32ToHeapNumber(val: int32): HeapNumber { } builtin WasmFuncRefToJS( - implicit context: Context)(val: WasmInternalFunction|WasmNull): JSFunction - |Null { + implicit context: Context)(val: WasmFuncRef|WasmNull): JSFunction|Null { typeswitch (val) { case (WasmNull): { return Null; } - case (func: WasmInternalFunction): { - const maybeExternal: Object = func.external; + case (func: WasmFuncRef): { + const internal: WasmInternalFunction = func.internal; + const maybeExternal: Object = internal.external; if (maybeExternal != Undefined) { return %RawDownCast(maybeExternal); } - tail runtime::WasmInternalFunctionCreateExternal(context, func); + tail runtime::WasmInternalFunctionCreateExternal(context, internal); } } } @@ -261,6 +269,7 @@ builtin WasmTableSet(tableIndex: intptr, index: uint32, value: Object): } } +// Returns WasmFuncRef or WasmNull, or throws an exception. builtin WasmTableGetFuncRef(tableIndex: intptr, index: uint32): Object { const trustedData: WasmTrustedInstanceData = LoadInstanceDataFromFrame(); const entryIndex: intptr = Signed(ChangeUint32ToWord(index)); @@ -274,16 +283,14 @@ builtin WasmTableGetFuncRef(tableIndex: intptr, index: uint32): Object { if (index >= entriesCount) goto IndexOutOfRange; const entries: FixedArray = table.entries; - const entry: Object = LoadFixedArrayElement(entries, entryIndex); + const entry: HeapObject = + UnsafeCast(LoadFixedArrayElement(entries, entryIndex)); - try { - const entryObject: HeapObject = - TaggedToHeapObject(entry) otherwise ReturnEntry; - if (IsTuple2Map(entryObject.map)) goto CallRuntime; - goto ReturnEntry; - } label ReturnEntry { - return entry; - } + dcheck(Is(entry) || Is(entry) || Is(entry)); + if (IsTuple2Map(entry.map)) goto CallRuntime; + if (Is(entry)) return entry; + dcheck(Is(entry)); + return entry; } label CallRuntime deferred { tail runtime::WasmFunctionTableGet( LoadContextFromInstanceData(trustedData), trustedData, @@ -302,8 +309,9 @@ builtin WasmFunctionTableGet(tableIndex: intptr, index: int32): Object { SmiFromIntPtr(tableIndex), SmiFromInt32(index)); } -builtin WasmTableSetFuncRef(tableIndex: intptr, index: uint32, value: Object): - Object { +builtin WasmTableSetFuncRef( + tableIndex: intptr, index: uint32, value: WasmFuncRef): Object { + dcheck(Is(value) || Is(value)); const trustedData: WasmTrustedInstanceData = LoadInstanceDataFromFrame(); const entryIndex: intptr = Signed(ChangeUint32ToWord(index)); try { @@ -327,14 +335,13 @@ builtin WasmTableSetFuncRef(tableIndex: intptr, index: uint32, value: Object): builtin WasmRefFunc(index: uint32): Object { const trustedData: WasmTrustedInstanceData = LoadInstanceDataFromFrame(); try { - const table: FixedArray = - LoadInternalFunctionsFromInstanceData(trustedData); - const functionIndex: intptr = Signed(ChangeUint32ToWord(index)); - const result: Object = LoadFixedArrayElement(table, functionIndex); - // {result} is either a funcref or nullptr. A Smi check is the fastest - // way to distinguish these two cases. - if (TaggedIsSmi(result)) goto CallRuntime; - return result; + const funcRefs: FixedArray = LoadFuncRefsFromInstanceData(trustedData); + const funcref: Object = funcRefs.objects[index]; + // {funcref} is either a WasmFuncRef or Smi::zero(). A Smi check is the + // fastest way to distinguish these two cases. + if (TaggedIsSmi(funcref)) goto CallRuntime; + dcheck(Is(funcref)); + return funcref; } label CallRuntime deferred { tail runtime::WasmRefFunc( LoadContextFromInstanceData(trustedData), trustedData, @@ -571,8 +578,8 @@ extern macro LoadWasmInternalFunctionInstructionStart(WasmInternalFunction): // // TODO(rstz): The counter might overflow if it exceeds the range of a Smi. // This can lead to incorrect inlining decisions. -macro UpdateIC( - vector: FixedArray, index: intptr, funcref: WasmInternalFunction): void { +macro UpdateIC(vector: FixedArray, index: intptr, funcref: WasmFuncRef): + void { const value = vector.objects[index]; if (value == funcref) { // Monomorphic hit. Check for this case first to maximize its performance. @@ -616,7 +623,7 @@ macro UpdateIC( newEntries.objects[newIndex + 1] = SmiConstant(1); vector.objects[index] = newEntries; } - } else if (Is(value)) { + } else if (Is(value)) { // Monomorphic miss. const newEntries = UnsafeCast(AllocateFixedArray( ElementsKind::PACKED_ELEMENTS, 4, AllocationFlag::kNone)); @@ -638,19 +645,20 @@ macro UpdateIC( // Liftoff uses the two returned values directly. struct TargetAndRef { target: RawPtr; - ref: WasmInstanceObject|WasmApiFunctionRef; + ref: WasmTrustedInstanceData|WasmApiFunctionRef; } -builtin CallRefIC( - vector: FixedArray, index: intptr, - funcref: WasmInternalFunction): TargetAndRef { +builtin CallRefIC(vector: FixedArray, index: intptr, funcref: WasmFuncRef): + TargetAndRef { + dcheck(Is(funcref)); UpdateIC(vector, index, funcref); - let target = funcref.call_target_ptr; + const internal = funcref.internal; + let target = internal.call_target_ptr; if (Signed(target) == IntPtrConstant(0)) { - target = LoadWasmInternalFunctionInstructionStart(funcref); + target = LoadWasmInternalFunctionInstructionStart(internal); } - return TargetAndRef{target: target, ref: funcref.ref}; + return TargetAndRef{target: target, ref: internal.ref}; } extern macro TryHasOwnProperty(HeapObject, Map, InstanceType, Name): never @@ -935,7 +943,7 @@ builtin WasmStringNewWtf16Array(array: WasmArray, start: uint32, end: uint32): // Torque's type checker for tail calls. builtin WasmStringFromDataSegment( segmentLength: uint32, arrayStart: uint32, arrayEnd: uint32, - segmentIndex: Smi, segmentOffset: Smi): JSAny { + segmentIndex: Smi, segmentOffset: Smi, variant: Smi): JSAny { const trustedData = LoadInstanceDataFromFrame(); try { const segmentOffsetU: uint32 = Unsigned(SmiToInt32(segmentOffset)); @@ -951,7 +959,7 @@ builtin WasmStringFromDataSegment( const smiLength = Convert(arrayLength) otherwise SegmentOOB; tail runtime::WasmStringNewSegmentWtf8( LoadContextFromInstanceData(trustedData), trustedData, segmentIndex, - smiOffset, smiLength); + smiOffset, smiLength, variant); } label SegmentOOB deferred { tail ThrowWasmTrapElementSegmentOutOfBounds(); } label ArrayOutOfBounds deferred { @@ -974,11 +982,11 @@ builtin WasmStringConst(index: uint32): String { } builtin WasmStringMeasureUtf8(string: String): int32 { const result = runtime::WasmStringMeasureUtf8(LoadContextFromFrame(), string); - return Signed(ChangeNumberToUint32(result)); + return NumberToInt32(result); } builtin WasmStringMeasureWtf8(string: String): int32 { const result = runtime::WasmStringMeasureWtf8(LoadContextFromFrame(), string); - return Signed(ChangeNumberToUint32(result)); + return NumberToInt32(result); } builtin WasmStringEncodeWtf8( string: String, offset: uint32, memory: Smi, utf8Variant: Smi): uint32 { @@ -986,7 +994,7 @@ builtin WasmStringEncodeWtf8( const result = runtime::WasmStringEncodeWtf8( LoadContextFromInstanceData(trustedData), trustedData, memory, utf8Variant, string, WasmUint32ToNumber(offset)); - return ChangeNumberToUint32(result); + return NumberToUint32(result); } builtin WasmStringEncodeWtf8Array( string: String, array: WasmArray, start: uint32, utf8Variant: Smi): uint32 { @@ -994,7 +1002,7 @@ builtin WasmStringEncodeWtf8Array( const result = runtime::WasmStringEncodeWtf8Array( LoadContextFromInstanceData(trustedData), utf8Variant, string, array, WasmUint32ToNumber(start)); - return ChangeNumberToUint32(result); + return NumberToUint32(result); } builtin WasmStringToUtf8Array(string: String): WasmArray { return runtime::WasmStringToUtf8Array(LoadContextFromFrame(), string); @@ -1092,7 +1100,7 @@ builtin WasmStringEqual(a: String, b: String): int32 { builtin WasmStringIsUSVSequence(str: String): int32 { if (IsOneByteStringInstanceType(str.instanceType)) return 1; const length = runtime::WasmStringMeasureUtf8(LoadContextFromFrame(), str); - if (Signed(ChangeNumberToUint32(length)) < 0) return 0; + if (NumberToInt32(length) < 0) return 0; return 1; } @@ -1422,4 +1430,24 @@ builtin WasmAnyConvertExtern(externObject: JSAny): JSAny { context, externObject, SmiConstant(kAnyType)); } +extern macro CallOrConstructBuiltinsAssembler::GetCompatibleReceiver( + JSReceiver, HeapObject, Context): JSReceiver; + +builtin WasmFastApiCallTypeCheckAndUpdateIC( + implicit context: Context)(data: WasmFastApiCallData, + receiver: JSAny): Smi { + try { + const rec = Cast(receiver) otherwise goto IllegalCast; + ModifyThreadInWasmFlag(0); + // We don't care about the actual compatible receiver; we just rely + // on this helper throwing an exception when there isn't one. + GetCompatibleReceiver(rec, data.signature, context); + ModifyThreadInWasmFlag(1); + data.cached_map = StrongToWeak(rec.map); + return 1; + } label IllegalCast { + const error = MessageTemplate::kIllegalInvocation; + runtime::WasmThrowTypeError(context, SmiConstant(error), Convert(0)); + } +} } // namespace wasm diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 68d1f2c6641cef..b1b3a8bf4f1795 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -3996,7 +3996,8 @@ void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, __ Move(kCArgRegs[0], ER::isolate_address(masm->isolate())); __ Move(kCArgRegs[1], kOldSPRegister); __ PrepareCallCFunction(2); - __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2); + __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2, + SetIsolateDataSlots::kNo); __ movq(central_stack_sp, kReturnRegister0); __ popq(argc_input); @@ -4039,7 +4040,8 @@ void SwitchFromTheCentralStackIfNeeded(MacroAssembler* masm, __ Move(kCArgRegs[0], ER::isolate_address(masm->isolate())); __ PrepareCallCFunction(1); - __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1); + __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1, + SetIsolateDataSlots::kNo); __ popq(kReturnRegister1); __ popq(kReturnRegister0); @@ -4215,7 +4217,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Move(kCArgRegs[1], 0); // argv. __ Move(kCArgRegs[2], ER::isolate_address(masm->isolate())); __ PrepareCallCFunction(3); - __ CallCFunction(find_handler, 3); + __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo); } #ifdef V8_ENABLE_CET_SHADOW_STACK @@ -4366,7 +4368,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); topmost_script_having_context = CallApiCallbackGenericDescriptor:: TopmostScriptHavingContextRegister(); - callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + callback = + CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister(); holder = CallApiCallbackGenericDescriptor::HolderRegister(); break; @@ -4425,8 +4428,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, __ Push(kScratchRegister); // kNewTarget switch (mode) { case CallApiCallbackMode::kGeneric: - __ PushTaggedField(FieldOperand(callback, CallHandlerInfo::kDataOffset), - scratch2); + __ PushTaggedField( + FieldOperand(callback, FunctionTemplateInfo::kCallbackDataOffset), + scratch2); break; case CallApiCallbackMode::kOptimizedNoProfiling: @@ -4472,16 +4476,15 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // Target parameter. static_assert(ApiCallbackExitFrameConstants::kTargetOffset == 2 * kSystemPointerSize); - __ PushTaggedField( - FieldOperand(callback, CallHandlerInfo::kOwnerTemplateOffset), - scratch2); + __ Push(callback); __ PushReturnAddressFrom(scratch); __ LoadExternalPointerField( api_function_address, - FieldOperand(callback, CallHandlerInfo::kMaybeRedirectedCallbackOffset), - kCallHandlerInfoCallbackTag, kScratchRegister); + FieldOperand(callback, + FunctionTemplateInfo::kMaybeRedirectedCallbackOffset), + kFunctionTemplateInfoCallbackTag, kScratchRegister); __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT, api_function_address); @@ -4622,8 +4625,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { Operand info_object = ExitFrameStackSlotOperand(0); __ movq(info_object, args_array); - // name_arg = Handle(&name), name value was pushed to GC-ed stack space. +#ifdef V8_ENABLE_DIRECT_LOCAL + // name_arg = Local(name), name value was pushed to GC-ed stack space. + __ movq(name_arg, Operand(args_array, -kSystemPointerSize)); +#else + // name_arg = Local(&name), name value was pushed to GC-ed stack space. __ leaq(name_arg, Operand(args_array, -kSystemPointerSize)); +#endif // The context register (rsi) might overlap with property_callback_info_arg // but the context value has been saved in EnterExitFrame and thus it could // be used to pass arguments. @@ -4723,7 +4731,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ LoadAddress(arg5, ExternalReference::isolate_address(isolate)); __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5); #else - // r8 is kCArgRegs[4] on Linux + // r8 is kCArgRegs[4] on Linux. __ LoadAddress(r8, ExternalReference::isolate_address(isolate)); #endif diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index d476577f06dffa..5b17b9e31b4b14 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -5,9 +5,8 @@ ishell@chromium.org jgruber@chromium.org jkummerow@chromium.org leszeks@chromium.org -mslekova@chromium.org nicohartmann@chromium.org -tebbi@chromium.org victorgomes@chromium.org +dmercadier@chromium.org per-file compiler.*=marja@chromium.org diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h index 2142ca9119c4c3..11f90f093f7132 100644 --- a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h +++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h @@ -92,6 +92,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return r3; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return r4; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return r5; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return r2; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -347,7 +362,8 @@ CallApiCallbackGenericDescriptor::TopmostScriptHavingContextRegister() { return r1; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return r3; } // static diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 2310cd9e75b413..92215bd0fa6a0c 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -22,7 +22,7 @@ #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/objects-inl.h" @@ -441,8 +441,7 @@ void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code, } Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); } void MacroAssembler::Call(Label* target) { bl(target); } @@ -2783,6 +2782,13 @@ int MacroAssembler::CallCFunction(Register function, int num_reg_arguments, IsolateData::fast_c_call_caller_pc_offset())); str(fp, MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + mov(pc_scratch, Operand(Context::kNoContext)); + StoreRootRelative(IsolateData::context_offset(), pc_scratch); +#endif } else { DCHECK_NOT_NULL(isolate()); Register addr_scratch = r4; @@ -2794,7 +2800,15 @@ int MacroAssembler::CallCFunction(Register function, int num_reg_arguments, Move(addr_scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); str(fp, MemOperand(addr_scratch)); - +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + mov(pc_scratch, Operand(Context::kNoContext)); + str(pc_scratch, + ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), addr_scratch)); +#endif Pop(addr_scratch); } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 4c76a7af2f1a23..712978f1ec69d0 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -523,8 +523,7 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch, // currently referring to this label. label->Unuse(); } else { - label->link_to(static_cast(reinterpret_cast(next_link) - - buffer_start_)); + label->link_to(static_cast(InstructionOffset(next_link))); } } else if (branch == next_link) { @@ -545,6 +544,26 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch, next_link = link->ImmPCOffsetTarget(); end_of_chain = (link == next_link); link->SetImmPCOffsetTarget(options(), label_veneer); + // {link} is now resolved; remove it from {unresolved_branches_} so + // we won't later try to process it again, which would fail because + // by walking the chain of its label's unresolved branch instructions, + // we won't find it: {prev_link} is now the end of that chain after + // its update above. + if (link->IsCondBranchImm() || link->IsCompareBranch()) { + static_assert(Instruction::ImmBranchRange(CondBranchType) == + Instruction::ImmBranchRange(CompareBranchType)); + int max_reachable_pc = static_cast(InstructionOffset(link)) + + Instruction::ImmBranchRange(CondBranchType); + unresolved_branches_.erase(max_reachable_pc); + } else if (link->IsTestBranch()) { + // Add 1 to account for branch type tag bit. + int max_reachable_pc = static_cast(InstructionOffset(link)) + + Instruction::ImmBranchRange(TestBranchType) + + 1; + unresolved_branches_.erase(max_reachable_pc); + } else { + // Other branch types are not handled by veneers. + } link = next_link; } } else { @@ -4713,14 +4732,6 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, } } - // Update next_veneer_pool_check_ (tightly coupled with unresolved_branches_). - if (unresolved_branches_.empty()) { - next_veneer_pool_check_ = kMaxInt; - } else { - next_veneer_pool_check_ = - unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; - } - // Reminder: We iterate in reverse order to avoid duplicate linked-list // iteration in RemoveBranchFromLabelLinkChain (which starts at the target // label, and iterates backwards through linked branch instructions). @@ -4733,6 +4744,16 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, RemoveBranchFromLabelLinkChain(branch, tasks[i].label_, veneer); } + // Update next_veneer_pool_check_ (tightly coupled with unresolved_branches_). + // This must happen after the calls to {RemoveBranchFromLabelLinkChain}, + // because that function can resolve additional branches. + if (unresolved_branches_.empty()) { + next_veneer_pool_check_ = kMaxInt; + } else { + next_veneer_pool_check_ = + unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; + } + // Now emit the actual veneer and patch up the incoming branch. for (const FarBranchInfo& info : tasks) { diff --git a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h index aed10853571c46..4b82e94b210561 100644 --- a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h @@ -5,7 +5,6 @@ #ifndef V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_ #define V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_ -#include "src/base/v8-fallthrough.h" #include "src/codegen/arm64/decoder-arm64.h" namespace v8 { @@ -475,7 +474,7 @@ void Decoder::DecodeDataProcessing(Instruction* instr) { } break; } - V8_FALLTHROUGH; + [[fallthrough]]; } case 1: case 3: diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h index 0502dc16737acf..e8a51ae64c33f9 100644 --- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h @@ -85,6 +85,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return x3; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return x4; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return x5; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return x2; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -346,7 +361,8 @@ CallApiCallbackGenericDescriptor::TopmostScriptHavingContextRegister() { return x1; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return x3; } // static diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index 0eda579b9d12fb..29229dc6f60049 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -1136,6 +1136,8 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { void MacroAssembler::InitializeRootRegister() { ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); Mov(kRootRegister, Operand(isolate_root)); + Fmov(fp_zero, 0.0); + #ifdef V8_COMPRESS_POINTERS LoadRootRelative(kPtrComprCageBaseRegister, IsolateData::cage_base_offset()); #endif diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 636dcdc874382c..9553771259fbe2 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -19,7 +19,7 @@ #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/runtime/runtime.h" @@ -2091,13 +2091,20 @@ int MacroAssembler::CallCFunction(Register function, int num_of_reg_args, DCHECK(has_frame()); Label get_pc; + UseScratchRegisterScope temps(this); + // We're doing a C call, which means non-parameter caller-saved registers + // (x8-x17) will be clobbered and so are available to use as scratches. + // In the worst-case scenario, we'll need 2 scratch registers. We pick 3 + // registers minus the `function` register, in case `function` aliases with + // any of the registers. + temps.Include(CPURegList(64, {x8, x9, x10, function})); + temps.Exclude(function); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // Save the frame pointer and PC so that the stack layout remains iterable, // even without an ExitFrame which normally exists between JS and C frames. - Register pc_scratch = x4; - Register addr_scratch = x5; - Push(pc_scratch, addr_scratch); + UseScratchRegisterScope temps(this); + Register pc_scratch = temps.AcquireX(); Adr(pc_scratch, &get_pc); @@ -2107,7 +2114,16 @@ int MacroAssembler::CallCFunction(Register function, int num_of_reg_args, static_assert(IsolateData::fast_c_call_caller_pc_offset() == fp_offset + 8); Stp(fp, pc_scratch, MemOperand(kRootRegister, fp_offset)); + +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + static_assert(Context::kNoContext == 0); + StoreRootRelative(IsolateData::context_offset(), xzr); +#endif } else { + Register addr_scratch = temps.AcquireX(); DCHECK_NOT_NULL(isolate()); Mov(addr_scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); @@ -2115,9 +2131,16 @@ int MacroAssembler::CallCFunction(Register function, int num_of_reg_args, Mov(addr_scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); Str(fp, MemOperand(addr_scratch)); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + static_assert(Context::kNoContext == 0); + Str(xzr, + ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), addr_scratch)); +#endif } - - Pop(addr_scratch, pc_scratch); } // Call directly. The function called cannot cause a GC, or allow preemption, @@ -2134,12 +2157,11 @@ int MacroAssembler::CallCFunction(Register function, int num_of_reg_args, IsolateData::fast_c_call_caller_fp_offset())); } else { DCHECK_NOT_NULL(isolate()); - Register addr_scratch = x5; - Push(addr_scratch, xzr); + UseScratchRegisterScope temps(this); + Register addr_scratch = temps.AcquireX(); Mov(addr_scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); Str(xzr, MemOperand(addr_scratch)); - Pop(xzr, addr_scratch); } } @@ -2841,8 +2863,7 @@ void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch, } Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); } Operand MacroAssembler::ReceiverOperand() { return Operand(0); } @@ -3493,7 +3514,7 @@ void MacroAssembler::CheckPageFlag(const Register& object, int mask, ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); - And(scratch, object, ~MemoryChunkHeader::GetAlignmentMaskForAssembler()); + And(scratch, object, ~MemoryChunk::GetAlignmentMaskForAssembler()); Ldr(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); if (cc == ne) { TestAndBranchIfAnySet(scratch, mask, condition_met); @@ -3698,9 +3719,10 @@ void MacroAssembler::ResolveTrustedPointerHandle(Register destination, Mov(handle, Operand(handle, LSR, kTrustedPointerHandleShift)); Ldr(destination, MemOperand(table, handle, LSL, kTrustedPointerTableEntrySizeLog2)); - // The LSB is used as marking bit by the trusted pointer table, so here we - // have to set it using a bitwise OR as it may or may not be set. - Orr(destination, destination, Immediate(kHeapObjectTag)); + // Untag the pointer and remove the marking bit in one operation. + Register tag_reg = handle; + Mov(tag_reg, Immediate(~(tag | kTrustedPointerTableMarkBit))); + And(destination, destination, tag_reg); } void MacroAssembler::ResolveCodePointerHandle(Register destination, diff --git a/deps/v8/src/codegen/bailout-reason.cc b/deps/v8/src/codegen/bailout-reason.cc index f4573fbe9c17a5..9a26f3c112bda6 100644 --- a/deps/v8/src/codegen/bailout-reason.cc +++ b/deps/v8/src/codegen/bailout-reason.cc @@ -11,7 +11,11 @@ namespace internal { #define ERROR_MESSAGES_TEXTS(C, T) T, const char* GetBailoutReason(BailoutReason reason) { - DCHECK_LT(reason, BailoutReason::kLastErrorMessage); + // Currently, the BailoutReason is read from the SharedFunctionInfo object + // inside the sandbox and must therefore be considered untrusted. As such, it + // needs to be validated here. + static_assert(std::is_unsigned_v>); + SBXCHECK_LT(reason, BailoutReason::kLastErrorMessage); DCHECK_GE(reason, BailoutReason::kNoReason); static const char* error_messages_[] = { BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)}; diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 82774268e409e7..5f336e2744f09c 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -18,8 +18,8 @@ #include "src/execution/frames-inl.h" #include "src/execution/frames.h" #include "src/execution/protectors.h" -#include "src/heap/heap-inl.h" // For MemoryChunk. TODO(jkummerow): Drop. -#include "src/heap/memory-chunk.h" +#include "src/heap/heap-inl.h" // For MutablePageMetadata. TODO(jkummerow): Drop. +#include "src/heap/mutable-page.h" #include "src/logging/counters.h" #include "src/numbers/integer-literal-inl.h" #include "src/objects/api-callbacks.h" @@ -37,6 +37,7 @@ #include "src/objects/property-descriptor-object.h" #include "src/objects/tagged-field.h" #include "src/roots/roots.h" +#include "third_party/v8/codegen/fp16-inl.h" namespace v8 { namespace internal { @@ -213,7 +214,7 @@ void CodeStubAssembler::FailAssert( } } std::string files_and_lines_text = stream.str(); - if (files_and_lines_text.size() != 0) { + if (!files_and_lines_text.empty()) { SNPrintF(chars, "%s%s", message, files_and_lines_text.c_str()); message = chars.begin(); } @@ -1313,6 +1314,10 @@ TNode CodeStubAssembler::TruncateIntPtrToInt32(TNode value) { return ReinterpretCast(value); } +TNode CodeStubAssembler::TruncateWord64ToWord32(TNode value) { + return TruncateInt64ToInt32(ReinterpretCast(value)); +} + TNode CodeStubAssembler::TaggedIsSmi(TNode a) { static_assert(kSmiTagMask < kMaxUInt32); return Word32Equal( @@ -1915,10 +1920,9 @@ TNode CodeStubAssembler::ResolveTrustedPointerHandle( TNode offset = ChangeUint32ToWord(Word32Shl( index, UniqueUint32Constant(kTrustedPointerTableEntrySizeLog2))); TNode value = Load(table, offset); - // The LSB is used as marking bit by the code pointer table, so here we have - // to set it using a bitwise OR as it may or may not be set. - value = - UncheckedCast(WordOr(value, UintPtrConstant(kHeapObjectTag))); + // Untag the pointer and remove the marking bit in one operation. + value = UncheckedCast( + WordAnd(value, UintPtrConstant(~(tag | kTrustedPointerTableMarkBit)))); return UncheckedCast(BitcastWordToTagged(value)); } @@ -1950,20 +1954,6 @@ TNode CodeStubAssembler::LoadCodeEntrypointViaCodePointerField( } #endif // V8_ENABLE_SANDBOX -TNode CodeStubAssembler::LoadProtectedPointerFromObject( - TNode object, int offset) { -#ifdef V8_ENABLE_SANDBOX - TNode trusted_cage_base = LoadPointerFromRootRegister( - IntPtrConstant(IsolateData::trusted_cage_base_offset())); - TNode offset_from_cage_base = - ChangeUint32ToWord(LoadObjectField(object, offset)); - TNode pointer = - UncheckedCast(WordOr(trusted_cage_base, offset_from_cage_base)); - return UncheckedCast(BitcastWordToTagged(pointer)); -#else - return LoadObjectField(object, offset); -#endif -} TNode CodeStubAssembler::LoadFromParentFrame(int offset) { TNode frame_pointer = LoadParentFramePointer(); @@ -2667,10 +2657,12 @@ TNode CodeStubAssembler::LoadArrayElement(TNode array, TNode index_node, int additional_offset) { // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? - static_assert(std::is_same::value || - std::is_same::value || - std::is_same::value, - "Only Smi, UintPtrT or IntPtrT indices are allowed"); + static_assert( + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value, + "Only Smi, UintPtrT, IntPtrT or TaggedIndex indices are allowed"); CSA_DCHECK(this, IntPtrGreaterThanOrEqual(ParameterToIntPtr(index_node), IntPtrConstant(0))); DCHECK(IsAligned(additional_offset, kTaggedSize)); @@ -2703,10 +2695,12 @@ TNode CodeStubAssembler::LoadFixedArrayElement( TNode object, TNode index, int additional_offset, CheckBounds check_bounds) { // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? - static_assert(std::is_same::value || - std::is_same::value || - std::is_same::value, - "Only Smi, UintPtrT or IntPtrT indexes are allowed"); + static_assert( + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value, + "Only Smi, UintPtrT, IntPtrT or TaggedIndex indexes are allowed"); CSA_DCHECK(this, IsFixedArraySubclass(object)); CSA_DCHECK(this, IsNotWeakFixedArraySubclass(object)); @@ -2722,6 +2716,10 @@ template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadFixedArrayElement(TNode, TNode, int, CheckBounds); template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::LoadFixedArrayElement(TNode, + TNode, int, + CheckBounds); +template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadFixedArrayElement(TNode, TNode, int, CheckBounds); @@ -2767,6 +2765,19 @@ TNode CodeStubAssembler::LoadPropertyArrayElement( additional_offset)); } +void CodeStubAssembler::FixedArrayBoundsCheck(TNode array, + TNode index, + int additional_offset) { + if (!v8_flags.fixed_array_bounds_checks) return; + DCHECK(IsAligned(additional_offset, kTaggedSize)); + // IntPtrAdd does constant-folding automatically. + TNode effective_index = + IntPtrAdd(TaggedIndexToIntPtr(index), + IntPtrConstant(additional_offset / kTaggedSize)); + CSA_CHECK(this, UintPtrLessThan(effective_index, + LoadAndUntagFixedArrayBaseLength(array))); +} + TNode CodeStubAssembler::LoadPropertyArrayLength( TNode object) { TNode value = LoadAndUntagToWord32ObjectField( @@ -3005,6 +3016,9 @@ TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( return ChangeUint32ToTagged(Load(data_pointer, offset)); case INT32_ELEMENTS: return ChangeInt32ToTagged(Load(data_pointer, offset)); + case FLOAT16_ELEMENTS: + return AllocateHeapNumberWithValue( + ChangeFloat16ToFloat64(Load(data_pointer, offset))); case FLOAT32_ELEMENTS: return AllocateHeapNumberWithValue( ChangeFloat32ToFloat64(Load(data_pointer, offset))); @@ -3492,8 +3506,8 @@ TNode CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray( this, Word32Equal(DecodeWord32(code_flags), Int32Constant(static_cast(CodeKind::BASELINE)))); #endif // DEBUG - TNode baseline_data = LoadProtectedPointerFromObject( - code, Code::kDeoptimizationDataOrInterpreterDataOffset); + TNode baseline_data = CAST(LoadProtectedPointerField( + code, Code::kDeoptimizationDataOrInterpreterDataOffset)); var_result = baseline_data; } Goto(&check_for_interpreter_data); @@ -3501,7 +3515,7 @@ TNode CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray( BIND(&check_for_interpreter_data); GotoIfNot(HasInstanceType(var_result.value(), INTERPRETER_DATA_TYPE), &done); - TNode bytecode_array = CAST(LoadProtectedPointerFromObject( + TNode bytecode_array = CAST(LoadProtectedPointerField( CAST(var_result.value()), InterpreterData::kBytecodeArrayOffset)); var_result = bytecode_array; Goto(&done); @@ -3595,12 +3609,11 @@ void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier( void CodeStubAssembler::StoreSharedObjectField(TNode object, TNode offset, TNode value) { - CSA_DCHECK( - this, - WordNotEqual( - WordAnd(LoadBasicMemoryChunkFlags(object), - IntPtrConstant(BasicMemoryChunk::IN_WRITABLE_SHARED_SPACE)), - IntPtrConstant(0))); + CSA_DCHECK(this, + WordNotEqual( + WordAnd(LoadBasicMemoryChunkFlags(object), + IntPtrConstant(MemoryChunk::IN_WRITABLE_SHARED_SPACE)), + IntPtrConstant(0))); int const_offset; if (TryToInt32Constant(offset, &const_offset)) { StoreObjectField(object, const_offset, value); @@ -5060,7 +5073,7 @@ TNode CodeStubAssembler::ExtractToFixedArray( #ifndef V8_ENABLE_SINGLE_GENERATION #ifdef DEBUG TNode object_word = BitcastTaggedToWord(to_elements); - TNode object_page_header = PageHeaderFromAddress(object_word); + TNode object_page_header = MemoryChunkFromAddress(object_word); TNode page_flags = Load( object_page_header, IntPtrConstant(MemoryChunkLayout::kFlagsOffset)); CSA_DCHECK( @@ -5461,7 +5474,7 @@ void CodeStubAssembler::JumpIfPointersFromHereAreInteresting( TNode object, Label* interesting) { Label finished(this); TNode object_word = BitcastTaggedToWord(object); - TNode object_page_header = PageHeaderFromAddress(object_word); + TNode object_page_header = MemoryChunkFromAddress(object_word); TNode page_flags = UncheckedCast( Load(MachineType::IntPtr(), object_page_header, IntPtrConstant(MemoryChunkLayout::kFlagsOffset))); @@ -6380,7 +6393,6 @@ TNode CodeStubAssembler::TryFloat64ToSmi(TNode value, TNode value64 = ChangeInt32ToFloat64(value32); Label if_int32(this); - GotoIfNot(Float64Equal(value, value64), not_smi); GotoIfNot(Word32Equal(value32, Int32Constant(0)), &if_int32); Branch(Int32LessThan(UncheckedCast(Float64ExtractHighWord32(value)), @@ -6400,6 +6412,203 @@ TNode CodeStubAssembler::TryFloat64ToSmi(TNode value, } } +TNode CodeStubAssembler::TruncateFloat64ToFloat16( + TNode value) { + // This is a verbatim CSA implementation of DoubleToFloat16. + // + // The 64-bit and 32-bit paths are implemented separately, but the algorithm + // is the same in both cases. The 32-bit version requires manual pairwise + // operations. + + if (Is64()) { + TVARIABLE(Uint16T, out); + TNode signed_in = BitcastFloat64ToInt64(value); + + // Take the absolute value of the input. + TNode sign = Word64And(signed_in, Uint64Constant(kFP64SignMask)); + TNode in = Word64Xor(signed_in, sign); + + Label if_infinity_or_nan(this), if_finite(this), done(this); + Branch(Uint64GreaterThanOrEqual(in, + Uint64Constant(kFP16InfinityAndNaNInfimum)), + &if_infinity_or_nan, &if_finite); + + BIND(&if_infinity_or_nan); + { + // Result is infinity or NaN. + out = Select( + Uint64GreaterThan(in, Uint64Constant(kFP64Infinity)), + [=] { return Uint16Constant(kFP16qNaN); }, // NaN->qNaN + [=] { return Uint16Constant(kFP16Infinity); }); // Inf->Inf + Goto(&done); + } + + BIND(&if_finite); + { + // Result is a (de)normalized number or zero. + + Label if_denormal(this), not_denormal(this); + Branch(Uint64LessThan(in, Uint64Constant(kFP16DenormalThreshold)), + &if_denormal, ¬_denormal); + + BIND(&if_denormal); + { + // Result is a denormal or zero. Use the magic value and FP addition to + // align 10 mantissa bits at the bottom of the float. Depends on FP + // addition being round-to-nearest-even. + TNode temp = Float64Add( + BitcastInt64ToFloat64(ReinterpretCast(in)), + Float64Constant(base::bit_cast(kFP64To16DenormalMagic))); + out = ReinterpretCast(TruncateWord64ToWord32( + Uint64Sub(ReinterpretCast(BitcastFloat64ToInt64(temp)), + Uint64Constant(kFP64To16DenormalMagic)))); + Goto(&done); + } + + BIND(¬_denormal); + { + // Result is not a denormal. + + // Remember if the result mantissa will be odd before rounding. + TNode mant_odd = ReinterpretCast(Word64And( + Word64Shr(in, Int64Constant(kFP64MantissaBits - kFP16MantissaBits)), + Uint64Constant(1))); + + // Update the exponent and round to nearest even. + // + // Rounding to nearest even is handled in two parts. First, adding + // kFP64To16RebiasExponentAndRound has the effect of rebiasing the + // exponent and that if any of the lower 41 bits of the mantissa are + // set, the 11th mantissa bit from the front becomes set. Second, adding + // mant_odd ensures ties are rounded to even. + TNode temp1 = + Uint64Add(ReinterpretCast(in), + Uint64Constant(kFP64To16RebiasExponentAndRound)); + TNode temp2 = Uint64Add(temp1, mant_odd); + + out = ReinterpretCast(TruncateWord64ToWord32(Word64Shr( + temp2, Int64Constant(kFP64MantissaBits - kFP16MantissaBits)))); + + Goto(&done); + } + } + + BIND(&done); + return ReinterpretCast( + Word32Or(TruncateWord64ToWord32(Word64Shr(sign, Int64Constant(48))), + out.value())); + } else { + TVARIABLE(Uint16T, out); + TNode signed_in_hi_word = Float64ExtractHighWord32(value); + TNode in_lo_word = Float64ExtractLowWord32(value); + + // Take the absolute value of the input. + TNode sign = Word32And( + signed_in_hi_word, Uint64HighWordConstantNoLowWord(kFP64SignMask)); + TNode in_hi_word = Word32Xor(signed_in_hi_word, sign); + + Label if_infinity_or_nan(this), if_finite(this), done(this); + Branch(Uint32GreaterThanOrEqual( + in_hi_word, + Uint64HighWordConstantNoLowWord(kFP16InfinityAndNaNInfimum)), + &if_infinity_or_nan, &if_finite); + + BIND(&if_infinity_or_nan); + { + // Result is infinity or NaN. + out = Select( + Uint32GreaterThan(in_hi_word, + Uint64HighWordConstantNoLowWord(kFP64Infinity)), + [=] { return Uint16Constant(kFP16qNaN); }, // NaN->qNaN + [=] { return Uint16Constant(kFP16Infinity); }); // Inf->Inf + Goto(&done); + } + + BIND(&if_finite); + { + // Result is a (de)normalized number or zero. + + Label if_denormal(this), not_denormal(this); + Branch(Uint32LessThan(in_hi_word, Uint64HighWordConstantNoLowWord( + kFP16DenormalThreshold)), + &if_denormal, ¬_denormal); + + BIND(&if_denormal); + { + // Result is a denormal or zero. Use the magic value and FP addition to + // align 10 mantissa bits at the bottom of the float. Depends on FP + // addition being round-to-nearest-even. + TNode double_in = Float64InsertHighWord32( + Float64InsertLowWord32(Float64Constant(0), in_lo_word), in_hi_word); + TNode temp = Float64Add( + double_in, + Float64Constant(base::bit_cast(kFP64To16DenormalMagic))); + out = ReinterpretCast(Projection<0>(Int32PairSub( + Float64ExtractLowWord32(temp), Float64ExtractHighWord32(temp), + Uint64LowWordConstant(kFP64To16DenormalMagic), + Uint64HighWordConstant(kFP64To16DenormalMagic)))); + + Goto(&done); + } + + BIND(¬_denormal); + { + // Result is not a denormal. + + // Remember if the result mantissa will be odd before rounding. + TNode mant_odd = ReinterpretCast(Word32And( + Word32Shr(in_hi_word, Int32Constant(kFP64MantissaBits - + kFP16MantissaBits - 32)), + Uint32Constant(1))); + + // Update the exponent and round to nearest even. + // + // Rounding to nearest even is handled in two parts. First, adding + // kFP64To16RebiasExponentAndRound has the effect of rebiasing the + // exponent and that if any of the lower 41 bits of the mantissa are + // set, the 11th mantissa bit from the front becomes set. Second, adding + // mant_odd ensures ties are rounded to even. + TNode> temp1 = Int32PairAdd( + in_lo_word, in_hi_word, + Uint64LowWordConstant(kFP64To16RebiasExponentAndRound), + Uint64HighWordConstant(kFP64To16RebiasExponentAndRound)); + TNode> temp2 = + Int32PairAdd(Projection<0>(temp1), Projection<1>(temp1), mant_odd, + Int32Constant(0)); + + out = ReinterpretCast((Word32Shr( + Projection<1>(temp2), + Int32Constant(kFP64MantissaBits - kFP16MantissaBits - 32)))); + + Goto(&done); + } + } + + BIND(&done); + return ReinterpretCast( + Word32Or(Word32Shr(sign, Int32Constant(16)), out.value())); + } +} + +TNode CodeStubAssembler::BitcastFloat16ToUint32( + TNode value) { + return ReinterpretCast(value); +} + +TNode CodeStubAssembler::BitcastUint32ToFloat16( + TNode value) { + return ReinterpretCast(value); +} + +TNode CodeStubAssembler::RoundInt32ToFloat16(TNode value) { + return TruncateFloat32ToFloat16(RoundInt32ToFloat32(value)); +} + +TNode CodeStubAssembler::ChangeFloat16ToFloat64( + TNode value) { + return ChangeFloat32ToFloat64(ChangeFloat16ToFloat32(value)); +} + TNode CodeStubAssembler::ChangeFloat32ToTagged(TNode value) { Label not_smi(this), done(this); TVARIABLE(Number, var_result); @@ -6574,7 +6783,9 @@ TNode CodeStubAssembler::ToThisString(TNode context, return CAST(var_value.value()); } -TNode CodeStubAssembler::ChangeNumberToUint32(TNode value) { +// This has platform-specific and ill-defined behavior for negative inputs. +TNode CodeStubAssembler::ChangeNonNegativeNumberToUint32( + TNode value) { TVARIABLE(Uint32T, var_result); Label if_smi(this), if_heapnumber(this, Label::kDeferred), done(this); Branch(TaggedIsSmi(value), &if_smi, &if_heapnumber); @@ -7533,6 +7744,10 @@ TNode CodeStubAssembler::IsString(TNode object) { #endif } +TNode CodeStubAssembler::IsStringWrapper(TNode object) { + return IsStringWrapperElementsKind(LoadMap(object)); +} + TNode CodeStubAssembler::IsSeqOneByteString(TNode object) { return IsSeqOneByteStringInstanceType(LoadInstanceType(object)); } @@ -7867,7 +8082,7 @@ TNode CodeStubAssembler::IsNumberArrayIndex(TNode number) { TNode CodeStubAssembler::LoadBasicMemoryChunkFlags( TNode object) { TNode object_word = BitcastTaggedToWord(object); - TNode page_header = PageHeaderFromAddress(object_word); + TNode page_header = MemoryChunkFromAddress(object_word); return UncheckedCast( Load(MachineType::Pointer(), page_header, IntPtrConstant(MemoryChunkLayout::kFlagsOffset))); @@ -9454,8 +9669,7 @@ TNode CodeStubAssembler::NameToIndexHashTableLookup( template void CodeStubAssembler::NameDictionaryLookup( TNode dictionary, TNode unique_name, Label* if_found, - TVariable* var_name_index, Label* if_not_found_no_insertion_index, - LookupMode mode, Label* if_not_found_with_insertion_index) { + TVariable* var_name_index, Label* if_not_found, LookupMode mode) { static_assert(std::is_same::value || std::is_same::value || std::is_same::value, @@ -9463,13 +9677,8 @@ void CodeStubAssembler::NameDictionaryLookup( DCHECK_IMPLIES(var_name_index != nullptr, MachineType::PointerRepresentation() == var_name_index->rep()); DCHECK_IMPLIES(mode == kFindInsertionIndex, if_found == nullptr); - DCHECK_IMPLIES(if_not_found_with_insertion_index != nullptr, - var_name_index != nullptr); Comment("NameDictionaryLookup"); CSA_DCHECK(this, IsUniqueName(unique_name)); - if (if_not_found_with_insertion_index == nullptr) { - if_not_found_with_insertion_index = if_not_found_no_insertion_index; - } Label if_not_computed(this, Label::kDeferred); @@ -9503,17 +9712,19 @@ void CodeStubAssembler::NameDictionaryLookup( TNode current = CAST(UnsafeLoadFixedArrayElement(dictionary, index)); - GotoIf(TaggedEqual(current, undefined), if_not_found_with_insertion_index); - if (mode == kFindExisting) { - if (Dictionary::ShapeT::kMatchNeedsHoleCheck) { - GotoIf(TaggedEqual(current, TheHoleConstant()), &next_probe); - } - current = LoadName(current); - GotoIf(TaggedEqual(current, unique_name), if_found); - } else { - DCHECK_EQ(kFindInsertionIndex, mode); - GotoIf(TaggedEqual(current, TheHoleConstant()), - if_not_found_with_insertion_index); + GotoIf(TaggedEqual(current, undefined), if_not_found); + switch (mode) { + case kFindInsertionIndex: + GotoIf(TaggedEqual(current, TheHoleConstant()), if_not_found); + break; + case kFindExisting: + case kFindExistingOrInsertionIndex: + if (Dictionary::TodoShape::kMatchNeedsHoleCheck) { + GotoIf(TaggedEqual(current, TheHoleConstant()), &next_probe); + } + current = LoadName(current); + GotoIf(TaggedEqual(current, unique_name), if_found); + break; } Goto(&next_probe); @@ -9532,46 +9743,8 @@ void CodeStubAssembler::NameDictionaryLookup( // memory features turned on. To minimize affecting the fast path, the // forwarding index branch defers both fetching the actual hash value and // the dictionary lookup to the runtime. - using ER = ExternalReference; // To avoid super long lines below. - ER func_ref; - if constexpr (std::is_same::value) { - func_ref = - mode == kFindExisting - ? ER::name_dictionary_lookup_forwarded_string() - : ER::name_dictionary_find_insertion_entry_forwarded_string(); - } else if constexpr (std::is_same::value) { - func_ref = - mode == kFindExisting - ? ER::global_dictionary_lookup_forwarded_string() - : ER::global_dictionary_find_insertion_entry_forwarded_string(); - } else { - auto ref0 = ER::name_to_index_hashtable_lookup_forwarded_string(); - auto ref1 = - ER::name_to_index_hashtable_find_insertion_entry_forwarded_string(); - func_ref = mode == kFindExisting ? ref0 : ref1; - } - const TNode function = ExternalConstant(func_ref); - const TNode isolate_ptr = - ExternalConstant(ER::isolate_address(isolate())); - TNode entry = UncheckedCast(CallCFunction( - function, MachineType::IntPtr(), - std::make_pair(MachineType::Pointer(), isolate_ptr), - std::make_pair(MachineType::TaggedPointer(), dictionary), - std::make_pair(MachineType::TaggedPointer(), unique_name))); - - if (var_name_index) *var_name_index = EntryToIndex(entry); - if (mode == kFindExisting) { - GotoIf(IntPtrEqual(entry, - IntPtrConstant(InternalIndex::NotFound().raw_value())), - if_not_found_no_insertion_index); - Goto(if_found); - } else { - CSA_DCHECK( - this, - WordNotEqual(entry, - IntPtrConstant(InternalIndex::NotFound().raw_value()))); - Goto(if_not_found_with_insertion_index); - } + NameDictionaryLookupWithForwardIndex(dictionary, unique_name, if_found, + var_name_index, if_not_found, mode); } } @@ -9580,11 +9753,66 @@ template V8_EXPORT_PRIVATE void CodeStubAssembler::NameDictionaryLookup(TNode, TNode, Label*, TVariable*, - Label*, LookupMode, - Label*); + Label*, LookupMode); template V8_EXPORT_PRIVATE void CodeStubAssembler::NameDictionaryLookup< GlobalDictionary>(TNode, TNode, Label*, - TVariable*, Label*, LookupMode, Label*); + TVariable*, Label*, LookupMode); + +template +void CodeStubAssembler::NameDictionaryLookupWithForwardIndex( + TNode dictionary, TNode unique_name, Label* if_found, + TVariable* var_name_index, Label* if_not_found, LookupMode mode) { + using ER = ExternalReference; // To avoid super long lines below. + ER func_ref; + if constexpr (std::is_same::value) { + func_ref = mode == kFindInsertionIndex + ? ER::name_dictionary_find_insertion_entry_forwarded_string() + : ER::name_dictionary_lookup_forwarded_string(); + } else if constexpr (std::is_same::value) { + func_ref = + mode == kFindInsertionIndex + ? ER::global_dictionary_find_insertion_entry_forwarded_string() + : ER::global_dictionary_lookup_forwarded_string(); + } else { + auto ref0 = + ER::name_to_index_hashtable_find_insertion_entry_forwarded_string(); + auto ref1 = ER::name_to_index_hashtable_lookup_forwarded_string(); + func_ref = mode == kFindInsertionIndex ? ref0 : ref1; + } + const TNode function = ExternalConstant(func_ref); + const TNode isolate_ptr = + ExternalConstant(ER::isolate_address(isolate())); + TNode entry = UncheckedCast( + CallCFunction(function, MachineType::IntPtr(), + std::make_pair(MachineType::Pointer(), isolate_ptr), + std::make_pair(MachineType::TaggedPointer(), dictionary), + std::make_pair(MachineType::TaggedPointer(), unique_name))); + + if (var_name_index) *var_name_index = EntryToIndex(entry); + switch (mode) { + case kFindInsertionIndex: + CSA_DCHECK( + this, + WordNotEqual(entry, + IntPtrConstant(InternalIndex::NotFound().raw_value()))); + Goto(if_not_found); + break; + case kFindExisting: + GotoIf(IntPtrEqual(entry, + IntPtrConstant(InternalIndex::NotFound().raw_value())), + if_not_found); + Goto(if_found); + break; + case kFindExistingOrInsertionIndex: + GotoIfNot(IntPtrEqual(entry, IntPtrConstant( + InternalIndex::NotFound().raw_value())), + if_found); + NameDictionaryLookupWithForwardIndex(dictionary, unique_name, if_found, + var_name_index, if_not_found, + kFindInsertionIndex); + break; + } +} TNode CodeStubAssembler::ComputeSeededHash(TNode key) { const TNode function_addr = @@ -9604,13 +9832,12 @@ TNode CodeStubAssembler::ComputeSeededHash(TNode key) { template <> void CodeStubAssembler::NameDictionaryLookup( TNode dictionary, TNode unique_name, - Label* if_found, TVariable* var_name_index, - Label* if_not_found_no_insertion_index, LookupMode mode, - Label* if_not_found_with_insertion_index) { - // TODO(pthier): Support path for not found with valid insertion index for + Label* if_found, TVariable* var_name_index, Label* if_not_found, + LookupMode mode) { + // TODO(pthier): Support mode kFindExistingOrInsertionIndex for // SwissNameDictionary. SwissNameDictionaryFindEntry(dictionary, unique_name, if_found, - var_name_index, if_not_found_no_insertion_index); + var_name_index, if_not_found); } void CodeStubAssembler::NumberDictionaryLookup( @@ -11958,6 +12185,7 @@ MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) { return MachineRepresentation::kWord8; case UINT16_ELEMENTS: case INT16_ELEMENTS: + case FLOAT16_ELEMENTS: return MachineRepresentation::kWord16; case UINT32_ELEMENTS: case INT32_ELEMENTS: @@ -12074,7 +12302,8 @@ void CodeStubAssembler::StoreElementTypedArray(TNode elements, static_assert(std::is_same::value || std::is_same::value, "Only RawPtrT or FixedArrayBase elements are allowed"); - static_assert(std::is_same::value || + static_assert(std::is_same::value || + std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value, @@ -12124,7 +12353,8 @@ void CodeStubAssembler::StoreElement(TNode elements, ElementsKind kind, std::is_same::value, "Only Smi, IntPtrT or UintPtrT indices are allowed"); static_assert( - std::is_same::value || + std::is_same::value || + std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || @@ -12151,6 +12381,8 @@ template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement(TNode, ElementsKind, TNode, TNode); +template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement( + TNode, ElementsKind, TNode, TNode); TNode CodeStubAssembler::Int32ToUint8Clamped( TNode int32_value) { @@ -12224,6 +12456,8 @@ TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( LoadObjectField(heap_object, offsetof(HeapNumber, value_)); if (elements_kind == UINT8_CLAMPED_ELEMENTS) { var_result = Float64ToUint8Clamped(value); + } else if (elements_kind == FLOAT16_ELEMENTS) { + var_result = ReinterpretCast(TruncateFloat64ToFloat16(value)); } else { var_result = TruncateFloat64ToWord32(value); } @@ -12235,6 +12469,8 @@ TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( TNode value = SmiToInt32(CAST(var_input.value())); if (elements_kind == UINT8_CLAMPED_ELEMENTS) { var_result = Int32ToUint8Clamped(value); + } else if (elements_kind == FLOAT16_ELEMENTS) { + var_result = ReinterpretCast(RoundInt32ToFloat16(value)); } else { var_result = value; } @@ -12251,6 +12487,54 @@ TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( return var_result.value(); } +template <> +TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( + TNode input, ElementsKind elements_kind, TNode context) { + DCHECK(IsTypedArrayElementsKind(elements_kind)); + CHECK_EQ(elements_kind, FLOAT16_ELEMENTS); + + TVARIABLE(Float16T, var_result); + TVARIABLE(Object, var_input, input); + Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this), + convert(this), loop(this, &var_input); + Goto(&loop); + BIND(&loop); + GotoIf(TaggedIsSmi(var_input.value()), &if_smi); + // We can handle both HeapNumber and Oddball here, since Oddball has the + // same layout as the HeapNumber for the HeapNumber::value field. This + // way we can also properly optimize stores of oddballs to typed arrays. + TNode heap_object = CAST(var_input.value()); + GotoIf(IsHeapNumber(heap_object), &if_heapnumber_or_oddball); + STATIC_ASSERT_FIELD_OFFSETS_EQUAL(offsetof(HeapNumber, value_), + offsetof(Oddball, to_number_raw_)); + Branch(HasInstanceType(heap_object, ODDBALL_TYPE), &if_heapnumber_or_oddball, + &convert); + + BIND(&if_heapnumber_or_oddball); + { + TNode value = + LoadObjectField(heap_object, offsetof(HeapNumber, value_)); + var_result = TruncateFloat64ToFloat16(value); + Goto(&done); + } + + BIND(&if_smi); + { + TNode value = SmiToInt32(CAST(var_input.value())); + var_result = RoundInt32ToFloat16(value); + Goto(&done); + } + + BIND(&convert); + { + var_input = CallBuiltin(Builtin::kNonNumberToNumber, context, input); + Goto(&loop); + } + + BIND(&done); + return var_result.value(); +} + template <> TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( TNode input, ElementsKind elements_kind, TNode context) { @@ -12424,6 +12708,26 @@ void CodeStubAssembler::EmitElementStoreTypedArrayUpdateValue( } } +template <> +void CodeStubAssembler::EmitElementStoreTypedArrayUpdateValue( + TNode value, ElementsKind elements_kind, + TNode converted_value, TVariable* maybe_converted_value) { + Label dont_allocate_heap_number(this), end(this); + GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number); + GotoIf(IsHeapNumber(CAST(value)), &dont_allocate_heap_number); + { + *maybe_converted_value = + AllocateHeapNumberWithValue(ChangeFloat16ToFloat64(converted_value)); + Goto(&end); + } + BIND(&dont_allocate_heap_number); + { + *maybe_converted_value = value; + Goto(&end); + } + BIND(&end); +} + template <> void CodeStubAssembler::EmitElementStoreTypedArrayUpdateValue( TNode value, ElementsKind elements_kind, @@ -12604,6 +12908,12 @@ void CodeStubAssembler::EmitElementStore( elements_kind, store_mode, bailout, context, maybe_converted_value); break; + case FLOAT16_ELEMENTS: + case RAB_GSAB_FLOAT16_ELEMENTS: + EmitElementStoreTypedArray(typed_array, intptr_key, value, + elements_kind, store_mode, bailout, + context, maybe_converted_value); + break; default: UNREACHABLE(); } @@ -12815,7 +13125,7 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, TNode object_word = BitcastTaggedToWord(object); // TODO(v8:11641): Skip TrapAllocationMemento when allocation-site // tracking is disabled. - TNode object_page_header = PageHeaderFromAddress(object_word); + TNode object_page_header = MemoryChunkFromAddress(object_word); { TNode page_flags = Load( object_page_header, IntPtrConstant(MemoryChunkLayout::kFlagsOffset)); @@ -12835,11 +13145,11 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, TNode memento_last_word = IntPtrAdd( object_word, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag)); TNode memento_last_word_page_header = - PageHeaderFromAddress(memento_last_word); + MemoryChunkFromAddress(memento_last_word); TNode new_space_top = Load(new_space_top_address); TNode new_space_top_page_header = - PageHeaderFromAddress(new_space_top); + MemoryChunkFromAddress(new_space_top); // If the object is in new space, we need to check whether respective // potential memento object is on the same page as the current top. @@ -12874,22 +13184,24 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, Comment("] TrapAllocationMemento"); } -TNode CodeStubAssembler::PageHeaderFromAddress( +TNode CodeStubAssembler::MemoryChunkFromAddress( TNode address) { DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); - return WordAnd( - address, - IntPtrConstant(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); + return WordAnd(address, + IntPtrConstant(~MemoryChunk::GetAlignmentMaskForAssembler())); } -TNode CodeStubAssembler::PageFromPageHeader(TNode address) { +TNode CodeStubAssembler::PageMetadataFromMemoryChunk( + TNode address) { DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); - return address; + return Load(address, + IntPtrConstant(MemoryChunkLayout::kMetadataOffset)); } -TNode CodeStubAssembler::PageFromAddress(TNode address) { +TNode CodeStubAssembler::PageMetadataFromAddress( + TNode address) { DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); - return PageFromPageHeader(PageHeaderFromAddress(address)); + return PageMetadataFromMemoryChunk(MemoryChunkFromAddress(address)); } TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( @@ -15943,14 +16255,14 @@ TNode CodeStubAssembler::RabGsabElementsKindToElementByteSize( int32_t elements_kinds[] = { RAB_GSAB_UINT8_ELEMENTS, RAB_GSAB_UINT8_CLAMPED_ELEMENTS, RAB_GSAB_INT8_ELEMENTS, RAB_GSAB_UINT16_ELEMENTS, - RAB_GSAB_INT16_ELEMENTS, RAB_GSAB_UINT32_ELEMENTS, - RAB_GSAB_INT32_ELEMENTS, RAB_GSAB_FLOAT32_ELEMENTS, - RAB_GSAB_FLOAT64_ELEMENTS, RAB_GSAB_BIGINT64_ELEMENTS, - RAB_GSAB_BIGUINT64_ELEMENTS}; + RAB_GSAB_INT16_ELEMENTS, RAB_GSAB_FLOAT16_ELEMENTS, + RAB_GSAB_UINT32_ELEMENTS, RAB_GSAB_INT32_ELEMENTS, + RAB_GSAB_FLOAT32_ELEMENTS, RAB_GSAB_FLOAT64_ELEMENTS, + RAB_GSAB_BIGINT64_ELEMENTS, RAB_GSAB_BIGUINT64_ELEMENTS}; Label* elements_kind_labels[] = {&elements_8, &elements_8, &elements_8, - &elements_16, &elements_16, &elements_32, - &elements_32, &elements_32, &elements_64, - &elements_64, &elements_64}; + &elements_16, &elements_16, &elements_16, + &elements_32, &elements_32, &elements_32, + &elements_64, &elements_64, &elements_64}; const size_t kTypedElementsKindCount = LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND - FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1; @@ -16396,7 +16708,7 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( CSA_DCHECK(this, Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE))); { - TNode trampoline = CAST(LoadProtectedPointerFromObject( + TNode trampoline = CAST(LoadProtectedPointerField( CAST(sfi_data), InterpreterData::kInterpreterTrampolineOffset)); sfi_code = trampoline; } @@ -16637,7 +16949,8 @@ void CodeStubAssembler::PrintToStream(const char* prefix, HeapConstantNoHole(string), SmiConstant(stream)); } // CallRuntime only accepts Objects, so do an UncheckedCast to object. - // DebugPrint explicitly checks whether the tagged value is a MaybeObject. + // DebugPrint explicitly checks whether the tagged value is a + // Tagged. TNode arg = UncheckedCast(tagged_value); CallRuntime(Runtime::kDebugPrint, NoContextConstant(), arg, SmiConstant(stream)); @@ -17701,9 +18014,9 @@ void CodeStubAssembler::SharedValueBarrier( // trivially shared. CSA_DCHECK(this, BoolConstant(ReadOnlyHeap::IsReadOnlySpaceShared())); TNode page_flags = LoadBasicMemoryChunkFlags(CAST(value)); - GotoIf(WordNotEqual(WordAnd(page_flags, - IntPtrConstant(BasicMemoryChunk::READ_ONLY_HEAP)), - IntPtrConstant(0)), + GotoIf(WordNotEqual( + WordAnd(page_flags, IntPtrConstant(MemoryChunk::READ_ONLY_HEAP)), + IntPtrConstant(0)), &skip_barrier); // Fast path: Check if the HeapObject is already shared. @@ -17717,12 +18030,11 @@ void CodeStubAssembler::SharedValueBarrier( BIND(&check_in_shared_heap); { - Branch( - WordNotEqual( - WordAnd(page_flags, - IntPtrConstant(BasicMemoryChunk::IN_WRITABLE_SHARED_SPACE)), - IntPtrConstant(0)), - &skip_barrier, &slow); + Branch(WordNotEqual( + WordAnd(page_flags, + IntPtrConstant(MemoryChunk::IN_WRITABLE_SHARED_SPACE)), + IntPtrConstant(0)), + &skip_barrier, &slow); } // Slow path: Call out to runtime to share primitives and to throw on @@ -17740,8 +18052,8 @@ void CodeStubAssembler::SharedValueBarrier( this, WordNotEqual( WordAnd(LoadBasicMemoryChunkFlags(CAST(var_shared_value->value())), - IntPtrConstant(BasicMemoryChunk::READ_ONLY_HEAP | - BasicMemoryChunk::IN_WRITABLE_SHARED_SPACE)), + IntPtrConstant(MemoryChunk::READ_ONLY_HEAP | + MemoryChunk::IN_WRITABLE_SHARED_SPACE)), IntPtrConstant(0))); Goto(&done); } diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index b57ec139dd3452..b082bb8dc7bf34 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -33,6 +33,7 @@ #include "src/objects/swiss-name-dictionary.h" #include "src/objects/tagged-index.h" #include "src/objects/tagged.h" +#include "src/objects/templates.h" #include "src/roots/roots.h" #include "torque-generated/exported-macros-assembler.h" @@ -72,6 +73,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(SetIteratorProtector, set_iterator_protector, SetIteratorProtector) \ V(StringIteratorProtector, string_iterator_protector, \ StringIteratorProtector) \ + V(StringWrapperToPrimitiveProtector, string_wrapper_to_primitive_protector, \ + StringWrapperToPrimitiveProtector) \ V(TypedArraySpeciesProtector, typed_array_species_protector, \ TypedArraySpeciesProtector) \ V(AsyncFunctionAwaitRejectSharedFun, async_function_await_reject_shared_fun, \ @@ -402,6 +405,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode ParameterToIntPtr(TNode value) { return Signed(value); } + TNode ParameterToIntPtr(TNode value) { + return TaggedIndexToIntPtr(value); + } TNode ParameterToTagged(TNode value) { return value; } @@ -1036,8 +1042,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler single_char[0])); } + TNode TruncateFloat32ToFloat16(TNode value); + TNode TruncateFloat64ToFloat16(TNode value); + TNode TruncateWordToInt32(TNode value); TNode TruncateIntPtrToInt32(TNode value); + TNode TruncateWord64ToWord32(TNode value); // Check a value for smi-ness TNode TaggedIsSmi(TNode a); @@ -1253,19 +1263,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler CodeEntrypointTag tag); #endif - TNode LoadProtectedPointerFromObject( - TNode object, int offset); + TNode LoadProtectedPointerField(TNode object, + TNode offset) { + return CAST(LoadProtectedPointerFromObject( + object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)))); + } + TNode LoadProtectedPointerField(TNode object, + int offset) { + return CAST(LoadProtectedPointerFromObject( + object, IntPtrConstant(offset - kHeapObjectTag))); + } TNode LoadForeignForeignAddressPtr(TNode object) { return LoadExternalPointerFromObject(object, Foreign::kForeignAddressOffset, kForeignForeignAddressTag); } - TNode LoadCallHandlerInfoJsCallbackPtr( - TNode object) { + TNode LoadFunctionTemplateInfoJsCallbackPtr( + TNode object) { return LoadExternalPointerFromObject( - object, CallHandlerInfo::kMaybeRedirectedCallbackOffset, - kCallHandlerInfoCallbackTag); + object, FunctionTemplateInfo::kMaybeRedirectedCallbackOffset, + kFunctionTemplateInfoCallbackTag); } TNode LoadExternalStringResourcePtr(TNode object) { @@ -1293,6 +1311,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } #if V8_ENABLE_WEBASSEMBLY + // Returns WasmApiFunctionRef or WasmTrustedInstanceData. + TNode LoadRefFromWasmInternalFunction( + TNode object) { + TNode ref = LoadTrustedPointerFromObject( + object, WasmInternalFunction::kIndirectRefOffset, + kUnknownIndirectPointerTag); + CSA_DCHECK(this, + Word32Or(HasInstanceType(ref, WASM_TRUSTED_INSTANCE_DATA_TYPE), + HasInstanceType(ref, WASM_API_FUNCTION_REF_TYPE))); + return CAST(ref); + } + TNode LoadWasmInternalFunctionCallTargetPtr( TNode object) { return LoadExternalPointerFromObject( @@ -1547,7 +1577,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadFixedArrayBaseLength(TNode array); template TNode LoadArrayCapacity(TNode array) { - return LoadObjectField(array, Array::ShapeT::kCapacityOffset); + return LoadObjectField(array, Array::Shape::kCapacityOffset); } // Load the length of a fixed array base instance. TNode LoadAndUntagFixedArrayBaseLength(TNode array); @@ -1642,7 +1672,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void DispatchMaybeObject(TNode maybe_object, Label* if_smi, Label* if_cleared, Label* if_weak, Label* if_strong, TVariable* extracted); - // See MaybeObject for semantics of these functions. + // See Tagged for semantics of these functions. TNode IsStrong(TNode value); TNode IsStrong(TNode value); TNode GetHeapObjectIfStrong(TNode value, @@ -1697,6 +1727,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler FixedArrayBoundsCheck(array, Signed(index), additional_offset); } + void FixedArrayBoundsCheck(TNode array, + TNode index, int additional_offset); + void FixedArrayBoundsCheck(TNode array, TNode index, + int additional_offset) { + FixedArrayBoundsCheck(UncheckedCast(array), index, + additional_offset); + } + // Array is any array-like type that has a fixed header followed by // tagged elements. template @@ -2768,13 +2806,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode TryHeapNumberToSmi(TNode number, Label* not_smi); TNode TryFloat32ToSmi(TNode number, Label* not_smi); TNode TryFloat64ToSmi(TNode number, Label* not_smi); + + TNode BitcastFloat16ToUint32(TNode value); + TNode BitcastUint32ToFloat16(TNode value); + TNode RoundInt32ToFloat16(TNode value); + + TNode ChangeFloat16ToFloat64(TNode value); + TNode ChangeFloat16ToFloat32(TNode value); TNode ChangeFloat32ToTagged(TNode value); TNode ChangeFloat64ToTagged(TNode value); TNode ChangeInt32ToTagged(TNode value); TNode ChangeInt32ToTaggedNoOverflow(TNode value); TNode ChangeUint32ToTagged(TNode value); TNode ChangeUintPtrToTagged(TNode value); - TNode ChangeNumberToUint32(TNode value); + TNode ChangeNonNegativeNumberToUint32(TNode value); TNode ChangeNumberToFloat64(TNode value); TNode ChangeTaggedNonSmiToInt32(TNode context, @@ -3000,6 +3045,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsSpecialReceiverMap(TNode map); TNode IsStringInstanceType(TNode instance_type); TNode IsString(TNode object); + TNode IsStringWrapper(TNode object); TNode IsSeqOneByteString(TNode object); TNode IsSymbolInstanceType(TNode instance_type); @@ -3533,33 +3579,37 @@ class V8_EXPORT_PRIVATE CodeStubAssembler template void SetNameDictionaryFlags(TNode, TNode flags); - enum LookupMode { kFindExisting, kFindInsertionIndex }; + enum LookupMode { + kFindExisting, + kFindInsertionIndex, + kFindExistingOrInsertionIndex + }; template TNode LoadName(TNode key); // Looks up an entry in a NameDictionaryBase successor. - // For {mode} == kFindExisting: - // If the entry is found control goes to {if_found} and {var_name_index} - // contains an index of the key field of the entry found. - // If the key is not found and {if_not_found_with_insertion_index} is - // provided, control goes to {if_not_found_with_insertion_index} and - // {var_name_index} contains the index of the key field to insert the given - // name at. - // Otherwise control goes to {if_not_found_no_insertion_index}. - // For {mode} == kFindInsertionIndex: - // {if_not_found_no_insertion_index} and {if_not_found_with_insertion_index} - // are treated equally. If {if_not_found_with_insertion_index} is provided, - // control goes to {if_not_found_with_insertion_index}, otherwise control - // goes to {if_not_found_no_insertion_index}. In both cases {var_name_index} - // contains the index of the key field to insert the given name at. + // If the entry is found control goes to {if_found} and {var_name_index} + // contains an index of the key field of the entry found. + // If the key is not found control goes to {if_not_found}. If mode is + // {kFindExisting}, {var_name_index} might contain garbage, otherwise + // {var_name_index} contains the index of the key field to insert the given + // name at. template void NameDictionaryLookup(TNode dictionary, TNode unique_name, Label* if_found, TVariable* var_name_index, - Label* if_not_found_no_insertion_index, - LookupMode mode = kFindExisting, - Label* if_not_found_with_insertion_index = nullptr); + Label* if_not_found, + LookupMode mode = kFindExisting); + // Slow lookup for unique_names with forwarding index. + // Both resolving the actual hash and the lookup are handled via runtime. + template + void NameDictionaryLookupWithForwardIndex(TNode dictionary, + TNode unique_name, + Label* if_found, + TVariable* var_name_index, + Label* if_not_found, + LookupMode mode = kFindExisting); TNode ComputeSeededHash(TNode key); @@ -3915,13 +3965,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void TrapAllocationMemento(TNode object, Label* memento_found); - // Helpers to look up MemoryChunk/Page metadata for a given address. - // Equivalent to MemoryChunkHeader::FromAddress(). - TNode PageHeaderFromAddress(TNode address); - // Equivalent to MemoryChunkHeader::MemoryChunk(). - TNode PageFromPageHeader(TNode address); - // Equivalent to BasicMemoryChunk::FromAddress(). - TNode PageFromAddress(TNode address); + // Helpers to look up Page metadata for a given address. + // Equivalent to MemoryChunk::FromAddress(). + TNode MemoryChunkFromAddress(TNode address); + // Equivalent to MemoryChunk::MutablePageMetadata(). + TNode PageMetadataFromMemoryChunk(TNode address); + // Equivalent to MemoryChunkMetadata::FromAddress(). + TNode PageMetadataFromAddress(TNode address); // Store a weak in-place reference into the FeedbackVector. TNode StoreWeakReferenceInFeedbackVector( diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index ef0153c6697d8f..49afab11b6c6e4 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -1796,7 +1796,7 @@ class MergeAssumptionChecker final : public ObjectVisitor { void VisitPointers(Tagged host, MaybeObjectSlot start, MaybeObjectSlot end) override { for (MaybeObjectSlot current = start; current != end; ++current) { - MaybeObject maybe_obj = current.load(cage_base_); + Tagged maybe_obj = current.load(cage_base_); Tagged obj; bool is_weak = maybe_obj.IsWeak(); if (maybe_obj.GetHeapObject(&obj)) { @@ -1946,8 +1946,6 @@ void BackgroundCompileTask::Run( // Update the character stream's runtime call stats. info.character_stream()->set_runtime_call_stats(info.runtime_call_stats()); - // Parser needs to stay alive for finalizing the parsing on the main - // thread. Parser parser(isolate, &info, script_); if (flags().is_toplevel()) { parser.InitializeEmptyScopeChain(&info); @@ -2132,7 +2130,7 @@ void BackgroundMergeTask::BeginMergeInBackground(LocalIsolate* isolate, { DisallowGarbageCollection no_gc; - MaybeObject maybe_old_toplevel_sfi = + Tagged maybe_old_toplevel_sfi = old_script->shared_function_infos()->get(kFunctionLiteralIdTopLevel); if (maybe_old_toplevel_sfi.IsWeak()) { Tagged old_toplevel_sfi = SharedFunctionInfo::cast( @@ -2148,11 +2146,13 @@ void BackgroundMergeTask::BeginMergeInBackground(LocalIsolate* isolate, new_script->shared_function_infos()->length()); for (int i = 0; i < old_script->shared_function_infos()->length(); ++i) { DisallowGarbageCollection no_gc; - MaybeObject maybe_new_sfi = new_script->shared_function_infos()->get(i); + Tagged maybe_new_sfi = + new_script->shared_function_infos()->get(i); if (maybe_new_sfi.IsWeak()) { Tagged new_sfi = SharedFunctionInfo::cast(maybe_new_sfi.GetHeapObjectAssumeWeak()); - MaybeObject maybe_old_sfi = old_script->shared_function_infos()->get(i); + Tagged maybe_old_sfi = + old_script->shared_function_infos()->get(i); if (maybe_old_sfi.IsWeak()) { // The old script and the new script both have SharedFunctionInfos for // this function literal. @@ -2225,8 +2225,9 @@ Handle BackgroundMergeTask::CompleteMergeInForeground( for (Handle new_sfi : used_new_sfis_) { DisallowGarbageCollection no_gc; DCHECK_GE(new_sfi->function_literal_id(), 0); - MaybeObject maybe_old_sfi = old_script->shared_function_infos()->get( - new_sfi->function_literal_id()); + Tagged maybe_old_sfi = + old_script->shared_function_infos()->get( + new_sfi->function_literal_id()); if (maybe_old_sfi.IsWeak()) { // The old script's SFI didn't exist during the background work, but // does now. This means a re-merge is necessary so that any pointers to @@ -2235,9 +2236,8 @@ Handle BackgroundMergeTask::CompleteMergeInForeground( SharedFunctionInfo::cast(maybe_old_sfi.GetHeapObjectAssumeWeak()); forwarder.Forward(*new_sfi, old_sfi); } else { - old_script->shared_function_infos()->set( - new_sfi->function_literal_id(), - MaybeObject::MakeWeak(MaybeObject::FromObject(*new_sfi))); + old_script->shared_function_infos()->set(new_sfi->function_literal_id(), + MakeWeak(*new_sfi)); } } @@ -2259,7 +2259,7 @@ Handle BackgroundMergeTask::CompleteMergeInForeground( forwarder.IterateAndForwardPointers(); } - MaybeObject maybe_toplevel_sfi = + Tagged maybe_toplevel_sfi = old_script->shared_function_infos()->get(kFunctionLiteralIdTopLevel); CHECK(maybe_toplevel_sfi.IsWeak()); Handle result = handle( @@ -2309,8 +2309,7 @@ MaybeHandle BackgroundCompileTask::FinalizeScript( maybe_result = result; script = handle(Script::cast(result->script()), isolate); DCHECK(Object::StrictEquals(script->source(), *source)); - DCHECK(isolate->factory()->script_list()->Contains( - MaybeObject::MakeWeak(MaybeObject::FromObject(*script)))); + DCHECK(isolate->factory()->script_list()->Contains(MakeWeak(*script))); } else { Script::SetSource(isolate, script, source); script->set_origin_options(origin_options); @@ -2564,7 +2563,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate, if (base::Optional> debug_info = shared_info->TryGetDebugInfo(isolate)) { if (debug_info.value()->HasInstrumentedBytecodeArray()) { - Tagged source_position_table = + Tagged source_position_table = job->compilation_info()->bytecode_array()->SourcePositionTable(); shared_info->GetActiveBytecodeArray(isolate)->set_source_position_table( source_position_table, kReleaseStore); diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc index 7842918f0bcc1c..bbbaadb12f7ae1 100644 --- a/deps/v8/src/codegen/external-reference.cc +++ b/deps/v8/src/codegen/external-reference.cc @@ -1422,6 +1422,10 @@ ExternalReference ExternalReference::fast_c_call_caller_fp_address( isolate->isolate_data()->fast_c_call_caller_fp_address()); } +ExternalReference ExternalReference::context_address(Isolate* isolate) { + return ExternalReference(isolate->context_address()); +} + ExternalReference ExternalReference::fast_c_call_caller_pc_address( Isolate* isolate) { return ExternalReference( diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h index befb835fe4448b..77b77e11e328d5 100644 --- a/deps/v8/src/codegen/external-reference.h +++ b/deps/v8/src/codegen/external-reference.h @@ -16,7 +16,7 @@ class CFunctionInfo; namespace internal { class Isolate; -class Page; +class PageMetadata; class SCTableReference; class StatsCounter; @@ -69,6 +69,7 @@ class StatsCounter; V(execution_mode_address, "IsolateData::execution_mode") \ V(debug_suspended_generator_address, \ "Debug::step_suspended_generator_address()") \ + V(context_address, "Isolate::context_address()") \ V(fast_c_call_caller_fp_address, \ "IsolateData::fast_c_call_caller_fp_address") \ V(fast_c_call_caller_pc_address, \ diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc index 54724742ef3822..c75b08e2e69a38 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc @@ -408,7 +408,7 @@ void Assembler::Nop(int bytes) { switch (bytes) { case 2: EMIT(0x66); - V8_FALLTHROUGH; + [[fallthrough]]; case 1: EMIT(0x90); return; @@ -425,7 +425,7 @@ void Assembler::Nop(int bytes) { return; case 6: EMIT(0x66); - V8_FALLTHROUGH; + [[fallthrough]]; case 5: EMIT(0xF); EMIT(0x1F); @@ -446,15 +446,15 @@ void Assembler::Nop(int bytes) { case 11: EMIT(0x66); bytes--; - V8_FALLTHROUGH; + [[fallthrough]]; case 10: EMIT(0x66); bytes--; - V8_FALLTHROUGH; + [[fallthrough]]; case 9: EMIT(0x66); bytes--; - V8_FALLTHROUGH; + [[fallthrough]]; case 8: EMIT(0xF); EMIT(0x1F); diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h index 223de6898fae4f..aba6c453b142d1 100644 --- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h +++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h @@ -79,6 +79,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return no_reg; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return ecx; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return no_reg; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return no_reg; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -316,7 +331,8 @@ CallApiCallbackGenericDescriptor::TopmostScriptHavingContextRegister() { return eax; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return edx; } // static diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index ecbec91ad42f6d..783d4760564fa2 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -35,10 +35,10 @@ #include "src/flags/flags.h" #include "src/handles/handles-inl.h" #include "src/handles/handles.h" -#include "src/heap/basic-memory-chunk.h" #include "src/heap/factory-inl.h" #include "src/heap/factory.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/memory-chunk-metadata.h" +#include "src/heap/mutable-page.h" #include "src/logging/counters.h" #include "src/objects/code.h" #include "src/objects/contexts.h" @@ -736,8 +736,7 @@ void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code) { } Immediate MacroAssembler::ClearedValue() const { - return Immediate( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Immediate(static_cast(i::ClearedValue(isolate()).ptr())); } namespace { @@ -1998,6 +1997,17 @@ int MacroAssembler::CallCFunction(Register function, int num_arguments, ExternalReference::fast_c_call_caller_fp_address(isolate()), scratch), ebp); + +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + mov(root_array_available() + ? Operand(kRootRegister, IsolateData::context_offset()) + : ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), scratch), + Immediate(Context::kNoContext)); +#endif } call(function); @@ -2197,7 +2207,7 @@ void MacroAssembler::LoadLabelAddress(Register dst, Label* lbl) { void MacroAssembler::MemoryChunkHeaderFromObject(Register object, Register header) { constexpr intptr_t alignment_mask = - MemoryChunkHeader::GetAlignmentMaskForAssembler(); + MemoryChunk::GetAlignmentMaskForAssembler(); if (header == object) { and_(header, Immediate(~alignment_mask)); } else { diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h index 9f7ee01571beb1..d0e645512cfcb0 100644 --- a/deps/v8/src/codegen/interface-descriptors-inl.h +++ b/deps/v8/src/codegen/interface-descriptors-inl.h @@ -626,6 +626,25 @@ constexpr auto KeyedLoadBaselineDescriptor::registers() { return RegisterArray(ReceiverRegister(), NameRegister(), SlotRegister()); } +// static +constexpr auto EnumeratedKeyedLoadBaselineDescriptor::registers() { + return RegisterArray(KeyedLoadBaselineDescriptor::ReceiverRegister(), + KeyedLoadBaselineDescriptor::NameRegister(), + EnumIndexRegister(), CacheTypeRegister(), + SlotRegister()); +} + +// static +constexpr auto EnumeratedKeyedLoadDescriptor::registers() { + return RegisterArray( + KeyedLoadBaselineDescriptor::ReceiverRegister(), + KeyedLoadBaselineDescriptor::NameRegister(), + EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister(), + EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister(), + EnumeratedKeyedLoadBaselineDescriptor::SlotRegister(), + KeyedLoadWithVectorDescriptor::VectorRegister()); +} + // static constexpr auto KeyedLoadDescriptor::registers() { return KeyedLoadBaselineDescriptor::registers(); @@ -680,7 +699,7 @@ constexpr auto CallApiCallbackOptimizedDescriptor::registers() { constexpr auto CallApiCallbackGenericDescriptor::registers() { return RegisterArray(ActualArgumentsCountRegister(), TopmostScriptHavingContextRegister(), - CallHandlerInfoRegister(), HolderRegister()); + FunctionTemplateInfoRegister(), HolderRegister()); } // static diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h index be8aa938c2c764..370ad131a32329 100644 --- a/deps/v8/src/codegen/interface-descriptors.h +++ b/deps/v8/src/codegen/interface-descriptors.h @@ -101,7 +101,9 @@ namespace internal { V(KeyedHasICWithVector) \ V(KeyedLoad) \ V(KeyedLoadBaseline) \ + V(EnumeratedKeyedLoadBaseline) \ V(KeyedLoadWithVector) \ + V(EnumeratedKeyedLoad) \ V(Load) \ V(LoadBaseline) \ V(LoadGlobal) \ @@ -145,6 +147,7 @@ namespace internal { V(UnaryOp_WithFeedback) \ V(Void) \ V(WasmDummy) \ + V(WasmDummyWithJSLinkage) \ V(WasmFloat32ToNumber) \ V(WasmFloat64ToTagged) \ V(WasmJSToWasmWrapper) \ @@ -482,9 +485,6 @@ class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor { static constexpr bool kNoContext = false; static constexpr bool kAllowVarArgs = false; static constexpr bool kNoStackScan = false; - // TODO(saelo): we should not have a default value here to force all interface - // descriptors to define a (unique) tag. - static constexpr CodeEntrypointTag kEntrypointTag = kDefaultCodeEntrypointTag; static constexpr auto kStackArgumentOrder = StackArgumentOrder::kDefault; // The set of registers available to the parameters, as a @@ -752,6 +752,23 @@ constexpr EmptyDoubleRegisterArray DoubleRegisterArray() { return {}; } MachineType::Int32(), /* kActualArgumentsCount */ \ ##__VA_ARGS__) +// Code/Builtins using this descriptor are referenced from inside the sandbox +// through a code pointer and must therefore be exposed via the code pointer +// table (CPT). They should use a code entrypoint tag which will be used to tag +// the entry in the CPT and will be checked to match the tag expected at the +// callsite. Only "compatible" builtins should use the same code entrypoint tag +// as it must be assumed that an attacker can swap code pointers (the indices +// into the CPT) and therefore can invoke all builtins that use the same tag +// from a given callsite. +#define SANDBOX_EXPOSED_DESCRIPTOR(tag) \ + static constexpr CodeEntrypointTag kEntrypointTag = tag; + +// Code/Builtins using this descriptor are not referenced from inside the +// sandbox but only called directly from other code. They are therefore not +// exposed to the sandbox via the CPT and so use the kInvalidEntrypointTag. +#define INTERNAL_DESCRIPTOR() \ + static constexpr CodeEntrypointTag kEntrypointTag = kInvalidEntrypointTag; + #define DECLARE_DESCRIPTOR(name) \ DECLARE_DESCRIPTOR_WITH_BASE(name, StaticCallInterfaceDescriptor) \ protected: \ @@ -763,9 +780,9 @@ constexpr EmptyDoubleRegisterArray DoubleRegisterArray() { return {}; } class V8_EXPORT_PRIVATE VoidDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kInvalidEntrypointTag; // The void descriptor could (and indeed probably should) also be NO_CONTEXT, // but this breaks some code assembler unittests. + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS() DEFINE_PARAMETER_TYPES() DECLARE_DESCRIPTOR(VoidDescriptor) @@ -791,7 +808,7 @@ using ContinueToBuiltinDescriptor = VoidDescriptor; // TODO(jgruber): Define real descriptors for C calling conventions. class CCallDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kDefaultCodeEntrypointTag; + SANDBOX_EXPOSED_DESCRIPTOR(kDefaultCodeEntrypointTag) DEFINE_PARAMETERS() DEFINE_PARAMETER_TYPES() DECLARE_DESCRIPTOR(CCallDescriptor) @@ -802,7 +819,7 @@ class CCallDescriptor : public StaticCallInterfaceDescriptor { class CEntryDummyDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kDefaultCodeEntrypointTag; + SANDBOX_EXPOSED_DESCRIPTOR(kDefaultCodeEntrypointTag) DEFINE_PARAMETERS() DEFINE_PARAMETER_TYPES() DECLARE_DESCRIPTOR(CEntryDummyDescriptor) @@ -813,15 +830,27 @@ class CEntryDummyDescriptor class WasmDummyDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kWasmEntrypointTag; + SANDBOX_EXPOSED_DESCRIPTOR(kWasmEntrypointTag) DEFINE_PARAMETERS() DEFINE_PARAMETER_TYPES() DECLARE_DESCRIPTOR(WasmDummyDescriptor) }; +// TODO(wasm): Consider filling in details / defining real descriptors for all +// builtins still using this placeholder descriptor. +class WasmDummyWithJSLinkageDescriptor + : public StaticCallInterfaceDescriptor { + public: + SANDBOX_EXPOSED_DESCRIPTOR(kJSEntrypointTag) + DEFINE_PARAMETERS() + DEFINE_PARAMETER_TYPES() + DECLARE_DESCRIPTOR(WasmDummyWithJSLinkageDescriptor) +}; + class AllocateDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1 MachineType::IntPtr()) // kRequestedSize @@ -833,6 +862,7 @@ class AllocateDescriptor class NewHeapNumberDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kValue) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // Result MachineType::Float64()) // kValue @@ -845,6 +875,7 @@ class NewHeapNumberDescriptor class JSTrampolineDescriptor : public StaticJSCallInterfaceDescriptor { public: + SANDBOX_EXPOSED_DESCRIPTOR(kJSEntrypointTag) DEFINE_JS_PARAMETERS() DEFINE_JS_PARAMETER_TYPES() @@ -856,7 +887,7 @@ class JSTrampolineDescriptor class RegExpTrampolineDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kRegExpEntrypointTag; + SANDBOX_EXPOSED_DESCRIPTOR(kRegExpEntrypointTag) DEFINE_PARAMETERS() DEFINE_PARAMETER_TYPES() DECLARE_DESCRIPTOR(RegExpTrampolineDescriptor) @@ -865,6 +896,7 @@ class RegExpTrampolineDescriptor class ContextOnlyDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS() DEFINE_PARAMETER_TYPES() DECLARE_DESCRIPTOR(ContextOnlyDescriptor) @@ -875,6 +907,7 @@ class ContextOnlyDescriptor class NoContextDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT() DEFINE_PARAMETER_TYPES() DECLARE_DESCRIPTOR(NoContextDescriptor) @@ -885,6 +918,7 @@ class NoContextDescriptor // LoadDescriptor is used by all stubs that implement Load ICs. class LoadDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -903,6 +937,7 @@ class LoadDescriptor : public StaticCallInterfaceDescriptor { class LoadBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -915,6 +950,7 @@ class LoadBaselineDescriptor class LoadGlobalNoFeedbackDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kName, kICKind) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::TaggedSigned()) // kICKind @@ -928,6 +964,7 @@ class LoadGlobalNoFeedbackDescriptor class LoadNoFeedbackDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kICKind) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -942,6 +979,7 @@ class LoadNoFeedbackDescriptor class LoadGlobalDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kName, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::TaggedSigned()) // kSlot @@ -953,6 +991,7 @@ class LoadGlobalDescriptor class LoadGlobalBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kName, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::TaggedSigned()) // kSlot @@ -964,6 +1003,7 @@ class LoadGlobalBaselineDescriptor class LookupWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kName, kDepth, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::AnyTagged(), // kDepth @@ -975,6 +1015,7 @@ class LookupWithVectorDescriptor class LookupTrampolineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kName, kDepth, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::AnyTagged(), // kDepth @@ -985,6 +1026,7 @@ class LookupTrampolineDescriptor class LookupBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kName, kDepth, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::AnyTagged(), // kDepth @@ -996,6 +1038,7 @@ class MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor : public StaticCallInterfaceDescriptor< MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kFlags, kFeedbackVector, kTemporary) DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kFlags MachineType::TaggedPointer(), // kFeedbackVector @@ -1012,6 +1055,7 @@ class MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor class StoreDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1030,6 +1074,7 @@ class StoreDescriptor : public StaticCallInterfaceDescriptor { class StoreNoFeedbackDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kValue) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1042,6 +1087,7 @@ class StoreNoFeedbackDescriptor class StoreBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kValue, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1055,8 +1101,7 @@ class StoreBaselineDescriptor class StoreTransitionDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kICHandlerEntrypointTag; - + SANDBOX_EXPOSED_DESCRIPTOR(kStoreTransitionICHandlerEntrypointTag) DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1074,8 +1119,7 @@ class StoreTransitionDescriptor class StoreWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kICHandlerEntrypointTag; - + SANDBOX_EXPOSED_DESCRIPTOR(kStoreWithVectorICHandlerEntrypointTag) DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1092,6 +1136,7 @@ class StoreWithVectorDescriptor class StoreGlobalDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kName, kValue, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::AnyTagged(), // kValue @@ -1104,6 +1149,7 @@ class StoreGlobalDescriptor class StoreGlobalBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kName, kValue, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::AnyTagged(), // kValue @@ -1116,6 +1162,7 @@ class StoreGlobalBaselineDescriptor class StoreGlobalWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kName, kValue, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::AnyTagged(), // kValue @@ -1129,6 +1176,7 @@ class StoreGlobalWithVectorDescriptor class DefineKeyedOwnDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kValue, kFlags, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1145,6 +1193,7 @@ class DefineKeyedOwnDescriptor class DefineKeyedOwnBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kValue, kFlags, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1159,6 +1208,7 @@ class DefineKeyedOwnBaselineDescriptor class DefineKeyedOwnWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kValue, kFlags, kSlot, // register argument kVector // stack argument @@ -1177,8 +1227,7 @@ class DefineKeyedOwnWithVectorDescriptor class LoadWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: - static constexpr CodeEntrypointTag kEntrypointTag = kICHandlerEntrypointTag; - + SANDBOX_EXPOSED_DESCRIPTOR(kLoadWithVectorICHandlerEntrypointTag) // TODO(v8:9497): Revert the Machine type for kSlot to the // TaggedSigned once Torque can emit better call descriptors DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector) @@ -1196,6 +1245,7 @@ class LoadWithVectorDescriptor class KeyedLoadBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1212,6 +1262,7 @@ class KeyedLoadBaselineDescriptor class KeyedLoadDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1224,6 +1275,7 @@ class KeyedLoadDescriptor class KeyedLoadWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1236,9 +1288,46 @@ class KeyedLoadWithVectorDescriptor static constexpr auto registers(); }; +class EnumeratedKeyedLoadBaselineDescriptor + : public StaticCallInterfaceDescriptor< + EnumeratedKeyedLoadBaselineDescriptor> { + public: + INTERNAL_DESCRIPTOR() + DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kEnumIndex, kCacheType, kSlot) + DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver + MachineType::AnyTagged(), // kName + MachineType::TaggedSigned(), // kEnumIndex + MachineType::AnyTagged(), // kCacheType + MachineType::TaggedSigned()) // kSlot + DECLARE_DESCRIPTOR(EnumeratedKeyedLoadBaselineDescriptor) + + static constexpr inline Register EnumIndexRegister(); + static constexpr inline Register CacheTypeRegister(); + static constexpr inline Register SlotRegister(); + + static constexpr auto registers(); +}; + +class EnumeratedKeyedLoadDescriptor + : public StaticCallInterfaceDescriptor { + public: + INTERNAL_DESCRIPTOR() + DEFINE_PARAMETERS(kReceiver, kName, kEnumIndex, kCacheType, kSlot, kVector) + DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver + MachineType::AnyTagged(), // kName + MachineType::TaggedSigned(), // kEnumIndex + MachineType::AnyTagged(), // kCacheType + MachineType::TaggedSigned(), // kSlot + MachineType::AnyTagged()) // kVector + DECLARE_DESCRIPTOR(EnumeratedKeyedLoadDescriptor) + + static constexpr auto registers(); +}; + class KeyedHasICBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1255,6 +1344,7 @@ class KeyedHasICBaselineDescriptor class KeyedHasICWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1274,6 +1364,7 @@ class LoadWithReceiverAndVectorDescriptor : public StaticCallInterfaceDescriptor< LoadWithReceiverAndVectorDescriptor> { public: + INTERNAL_DESCRIPTOR() // TODO(v8:9497): Revert the Machine type for kSlot to the // TaggedSigned once Torque can emit better call descriptors DEFINE_PARAMETERS(kReceiver, kLookupStartObject, kName, kSlot, kVector) @@ -1292,6 +1383,7 @@ class LoadWithReceiverAndVectorDescriptor class LoadWithReceiverBaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() // TODO(v8:9497): Revert the Machine type for kSlot to the // TaggedSigned once Torque can emit better call descriptors DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kLookupStartObject, kName, kSlot) @@ -1307,6 +1399,7 @@ class LoadWithReceiverBaselineDescriptor class LoadGlobalWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kName, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName MachineType::TaggedSigned(), // kSlot @@ -1321,6 +1414,7 @@ class LoadGlobalWithVectorDescriptor class FastNewObjectDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kTarget, kNewTarget) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::AnyTagged()) // kNewTarget @@ -1335,6 +1429,7 @@ class FastNewObjectDescriptor class WriteBarrierDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlotAddress) DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject MachineType::Pointer()) // kSlotAddress @@ -1361,6 +1456,7 @@ class IndirectPointerWriteBarrierDescriptor final : public StaticCallInterfaceDescriptor< IndirectPointerWriteBarrierDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlotAddress, kIndirectPointerTag) DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject MachineType::Pointer(), // kSlotAddress @@ -1384,6 +1480,7 @@ class IndirectPointerWriteBarrierDescriptor final class TSANStoreDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue) DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress MachineType::AnyTagged()) // kValue @@ -1397,6 +1494,7 @@ class TSANStoreDescriptor final class TSANLoadDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kAddress) DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress @@ -1411,6 +1509,7 @@ class TSANLoadDescriptor final class TypeConversionDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kArgument) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) DECLARE_DESCRIPTOR(TypeConversionDescriptor) @@ -1423,6 +1522,7 @@ class TypeConversionDescriptor final class TypeConversionNoContextDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kArgument) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) DECLARE_DESCRIPTOR(TypeConversionNoContextDescriptor) @@ -1433,6 +1533,7 @@ class TypeConversionNoContextDescriptor final class TypeConversion_BaselineDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kArgument, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::UintPtr()) DECLARE_DESCRIPTOR(TypeConversion_BaselineDescriptor) @@ -1441,6 +1542,7 @@ class TypeConversion_BaselineDescriptor final class SingleParameterOnStackDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kArgument) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) DECLARE_DESCRIPTOR(SingleParameterOnStackDescriptor) @@ -1452,6 +1554,7 @@ class AsyncFunctionStackParameterDescriptor final : public StaticCallInterfaceDescriptor< AsyncFunctionStackParameterDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kPromise, kResult) DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineType::AnyTagged()) DECLARE_DESCRIPTOR(AsyncFunctionStackParameterDescriptor) @@ -1463,6 +1566,7 @@ class GetIteratorStackParameterDescriptor final : public StaticCallInterfaceDescriptor< GetIteratorStackParameterDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kCallSlot, kFeedback, kResult) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::AnyTagged()) @@ -1474,6 +1578,7 @@ class GetIteratorStackParameterDescriptor final class GetPropertyDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kObject, kKey) DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor) }; @@ -1481,6 +1586,7 @@ class GetPropertyDescriptor final class TypeofDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kObject) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) DECLARE_DESCRIPTOR(TypeofDescriptor) @@ -1491,6 +1597,7 @@ class TypeofDescriptor class CallTrampolineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction MachineType::Int32()) // kActualArgumentsCount @@ -1503,6 +1610,7 @@ class CopyDataPropertiesWithExcludedPropertiesDescriptor : public StaticCallInterfaceDescriptor< CopyDataPropertiesWithExcludedPropertiesDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kSource, kExcludedPropertyCount) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource MachineType::AnyTagged()) // kExcludedPropertyCount @@ -1515,6 +1623,7 @@ class CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor : public StaticCallInterfaceDescriptor< CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kSource, kExcludedPropertyCount, kExcludedPropertyBase) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource MachineType::IntPtr(), @@ -1527,6 +1636,7 @@ class CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor class CallVarargsDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kTarget, kActualArgumentsCount, kArgumentsLength, kArgumentsList) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget @@ -1541,6 +1651,7 @@ class CallVarargsDescriptor class CallForwardVarargsDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kTarget, kActualArgumentsCount, kStartIndex) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::Int32(), // kActualArgumentsCount @@ -1553,6 +1664,7 @@ class CallForwardVarargsDescriptor class CallFunctionTemplateDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kFunctionTemplateInfo, kArgumentsCount) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunctionTemplateInfo MachineType::Int32()) // kArgumentsCount @@ -1565,6 +1677,7 @@ class CallFunctionTemplateGenericDescriptor : public StaticCallInterfaceDescriptor< CallFunctionTemplateGenericDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kFunctionTemplateInfo, kArgumentsCount, kTopmostScriptHavingContext) DEFINE_PARAMETER_TYPES( @@ -1579,6 +1692,7 @@ class CallFunctionTemplateGenericDescriptor class CallWithSpreadDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::Int32(), // kArgumentsCount @@ -1591,6 +1705,7 @@ class CallWithSpreadDescriptor class CallWithSpread_BaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::Int32(), // kArgumentsCount @@ -1603,6 +1718,7 @@ class CallWithSpread_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor< CallWithSpread_WithFeedbackDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot, kFeedbackVector, kReceiver) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget @@ -1617,6 +1733,7 @@ class CallWithSpread_WithFeedbackDescriptor class CallWithArrayLikeDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kTarget, kArgumentsList) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::AnyTagged()) // kArgumentsList @@ -1629,6 +1746,7 @@ class CallWithArrayLike_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor< CallWithArrayLike_WithFeedbackDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kFeedbackVector, kReceiver) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::AnyTagged(), // kArgumentsList @@ -1641,6 +1759,7 @@ class CallWithArrayLike_WithFeedbackDescriptor class ConstructVarargsDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_JS_PARAMETERS(kArgumentsLength, kArgumentsList) DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kArgumentsLength MachineType::AnyTagged()) // kArgumentsList @@ -1653,6 +1772,7 @@ class ConstructVarargsDescriptor class ConstructForwardVarargsDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_JS_PARAMETERS(kStartIndex) DEFINE_JS_PARAMETER_TYPES(MachineType::Int32()) DECLARE_DESCRIPTOR(ConstructForwardVarargsDescriptor) @@ -1663,6 +1783,7 @@ class ConstructForwardVarargsDescriptor class ConstructWithSpreadDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_JS_PARAMETERS(kSpread) DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged()) DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor) @@ -1674,6 +1795,7 @@ class ConstructWithSpread_BaselineDescriptor : public StaticCallInterfaceDescriptor< ConstructWithSpread_BaselineDescriptor> { public: + INTERNAL_DESCRIPTOR() // Note: kSlot comes before kSpread since as an untagged value it must be // passed in a register. DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot, kSpread) @@ -1686,6 +1808,7 @@ class ConstructWithSpread_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor< ConstructWithSpread_WithFeedbackDescriptor> { public: + INTERNAL_DESCRIPTOR() // Note: kSlot comes before kSpread since as an untagged value it must be // passed in a register. DEFINE_JS_PARAMETERS(kSlot, kSpread, kFeedbackVector) @@ -1698,6 +1821,7 @@ class ConstructWithSpread_WithFeedbackDescriptor class ConstructWithArrayLikeDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::AnyTagged(), // kNewTarget @@ -1711,6 +1835,7 @@ class ConstructWithArrayLike_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor< ConstructWithArrayLike_WithFeedbackDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot, kFeedbackVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::AnyTagged(), // kNewTarget @@ -1723,6 +1848,7 @@ class ConstructWithArrayLike_WithFeedbackDescriptor class ConstructForwardAllArgsDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kConstructor, kNewTarget) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kConstructor MachineType::AnyTagged()) // kNewTarget @@ -1735,6 +1861,7 @@ class ConstructForwardAllArgs_BaselineDescriptor : public StaticCallInterfaceDescriptor< ConstructForwardAllArgs_BaselineDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kTarget, kNewTarget, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::AnyTagged(), // kNewTarget @@ -1746,6 +1873,7 @@ class ConstructForwardAllArgs_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor< ConstructForwardAllArgs_WithFeedbackDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kTarget, kNewTarget, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget MachineType::AnyTagged(), // kNewTarget @@ -1758,6 +1886,7 @@ class ConstructForwardAllArgs_WithFeedbackDescriptor class ConstructStubDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_JS_PARAMETERS() DEFINE_JS_PARAMETER_TYPES() @@ -1769,6 +1898,7 @@ class ConstructStubDescriptor class AbortDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kMessageOrMessageId) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) DECLARE_DESCRIPTOR(AbortDescriptor) @@ -1779,6 +1909,7 @@ class AbortDescriptor : public StaticCallInterfaceDescriptor { class ArrayConstructorDescriptor : public StaticJSCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_JS_PARAMETERS(kAllocationSite) DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged()) @@ -1789,6 +1920,7 @@ class ArrayNArgumentsConstructorDescriptor : public StaticCallInterfaceDescriptor< ArrayNArgumentsConstructorDescriptor> { public: + INTERNAL_DESCRIPTOR() // This descriptor declares only register arguments while respective number // of JS arguments stay on the expression stack. // The ArrayNArgumentsConstructor builtin does not access stack arguments @@ -1806,6 +1938,7 @@ class ArrayNoArgumentConstructorDescriptor : public StaticCallInterfaceDescriptor< ArrayNoArgumentConstructorDescriptor> { public: + INTERNAL_DESCRIPTOR() // This descriptor declares same register arguments as the parent // ArrayNArgumentsConstructorDescriptor and it declares indices for // JS arguments passed on the expression stack. @@ -1824,6 +1957,7 @@ class ArraySingleArgumentConstructorDescriptor : public StaticCallInterfaceDescriptor< ArraySingleArgumentConstructorDescriptor> { public: + INTERNAL_DESCRIPTOR() // This descriptor declares same register arguments as the parent // ArrayNArgumentsConstructorDescriptor and it declares indices for // JS arguments passed on the expression stack. @@ -1843,6 +1977,7 @@ class ArraySingleArgumentConstructorDescriptor class CompareDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kLeft, kRight) DECLARE_DESCRIPTOR(CompareDescriptor) @@ -1852,6 +1987,7 @@ class CompareDescriptor class CompareNoContextDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight) DECLARE_DESCRIPTOR(CompareNoContextDescriptor) @@ -1861,6 +1997,7 @@ class CompareNoContextDescriptor class StringEqualDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kLength) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft MachineType::AnyTagged(), // kRight @@ -1871,6 +2008,7 @@ class StringEqualDescriptor class BinaryOpDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kLeft, kRight) DECLARE_DESCRIPTOR(BinaryOpDescriptor) @@ -1880,6 +2018,7 @@ class BinaryOpDescriptor class BinaryOp_BaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft MachineType::AnyTagged(), // kRight @@ -1892,6 +2031,7 @@ class BinaryOp_BaselineDescriptor class BinarySmiOp_BaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft MachineType::TaggedSigned(), // kRight @@ -1904,6 +2044,7 @@ class BinarySmiOp_BaselineDescriptor class StringAtAsStringDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kPosition) // TODO(turbofan): Return untagged value here. DEFINE_RESULT_AND_PARAMETER_TYPES( @@ -1916,6 +2057,7 @@ class StringAtAsStringDescriptor final class StringSubstringDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kString, kFrom, kTo) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kString MachineType::IntPtr(), // kFrom @@ -1928,6 +2070,7 @@ class StringSubstringDescriptor final class CppBuiltinAdaptorDescriptor : public StaticJSCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_JS_PARAMETERS(kCFunction) DEFINE_JS_PARAMETER_TYPES(MachineType::Pointer()) DECLARE_JS_COMPATIBLE_DESCRIPTOR(CppBuiltinAdaptorDescriptor) @@ -1936,6 +2079,7 @@ class CppBuiltinAdaptorDescriptor class CEntry1ArgvOnStackDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kArity, // register argument kCFunction, // register argument kPadding, // stack argument 1 (just padding) @@ -1956,6 +2100,7 @@ class CEntry1ArgvOnStackDescriptor class CallApiCallbackOptimizedDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount, kCallData, kHolder) // receiver is implicit stack argument 1 @@ -1977,20 +2122,21 @@ class CallApiCallbackOptimizedDescriptor class CallApiCallbackGenericDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kActualArgumentsCount, kTopmostScriptHavingContext, - kCallHandlerInfo, kHolder) + kFunctionTemplateInfo, kHolder) // receiver is implicit stack argument 1 // argv are implicit stack arguments [2, 2 + kArgc[ DEFINE_PARAMETER_TYPES( MachineType::Int32(), // kActualArgumentsCount MachineType::AnyTagged(), // kTopmostScriptHavingContext - MachineType::AnyTagged(), // kCallHandlerInfo + MachineType::AnyTagged(), // kFunctionTemplateInfo MachineType::AnyTagged()) // kHolder DECLARE_DESCRIPTOR(CallApiCallbackGenericDescriptor) static constexpr inline Register ActualArgumentsCountRegister(); static constexpr inline Register TopmostScriptHavingContextRegister(); - static constexpr inline Register CallHandlerInfoRegister(); + static constexpr inline Register FunctionTemplateInfoRegister(); static constexpr inline Register HolderRegister(); static constexpr inline auto registers(); @@ -1999,6 +2145,7 @@ class CallApiCallbackGenericDescriptor class ApiGetterDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kReceiver, kHolder, kCallback) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kHolder @@ -2016,6 +2163,7 @@ class ApiGetterDescriptor class GrowArrayElementsDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kObject, kKey) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kObject MachineType::AnyTagged()) // kKey @@ -2031,6 +2179,7 @@ class BaselineOutOfLinePrologueDescriptor : public StaticCallInterfaceDescriptor< BaselineOutOfLinePrologueDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kCalleeContext, kClosure, kJavaScriptCallArgCount, kStackFrameSize, kJavaScriptCallNewTarget, @@ -2053,6 +2202,7 @@ class BaselineOutOfLinePrologueDescriptor class BaselineLeaveFrameDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kParamsSize, kWeight) DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kParamsSize MachineType::Int32()) // kWeight @@ -2067,6 +2217,7 @@ class BaselineLeaveFrameDescriptor class OnStackReplacementDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kMaybeTargetCode) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kMaybeTargetCode DECLARE_DESCRIPTOR(OnStackReplacementDescriptor) @@ -2079,6 +2230,7 @@ class OnStackReplacementDescriptor class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kAccumulator @@ -2094,6 +2246,7 @@ class InterpreterPushArgsThenCallDescriptor : public StaticCallInterfaceDescriptor< InterpreterPushArgsThenCallDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunction) DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kNumberOfArguments MachineType::Pointer(), // kFirstArgument @@ -2107,6 +2260,7 @@ class InterpreterPushArgsThenConstructDescriptor : public StaticCallInterfaceDescriptor< InterpreterPushArgsThenConstructDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kConstructor, kNewTarget, kFeedbackElement) DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kNumberOfArguments @@ -2122,6 +2276,7 @@ class InterpreterPushArgsThenConstructDescriptor class InterpreterCEntry1Descriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(1, kNumberOfArguments, kFirstArgument, kFunctionEntry) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result 1 @@ -2136,6 +2291,7 @@ class InterpreterCEntry1Descriptor class InterpreterCEntry2Descriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(2, kNumberOfArguments, kFirstArgument, kFunctionEntry) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result 1 @@ -2152,6 +2308,7 @@ class FindNonDefaultConstructorOrConstructDescriptor : public StaticCallInterfaceDescriptor< FindNonDefaultConstructorOrConstructDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(2, kThisFunction, kNewTarget) DEFINE_RESULT_AND_PARAMETER_TYPES( MachineType::AnyTagged(), // result 1 (true / false) @@ -2164,6 +2321,7 @@ class FindNonDefaultConstructorOrConstructDescriptor class ForInPrepareDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(2, kEnumerator, kVectorIndex, kFeedbackVector) DEFINE_RESULT_AND_PARAMETER_TYPES( MachineType::AnyTagged(), // result 1 (cache array) @@ -2177,6 +2335,7 @@ class ForInPrepareDescriptor class ResumeGeneratorDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kValue, kGenerator) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue MachineType::AnyTagged()) // kGenerator @@ -2188,6 +2347,7 @@ class ResumeGeneratorDescriptor final class ResumeGeneratorBaselineDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kGeneratorObject, kRegisterCount) DEFINE_RESULT_AND_PARAMETER_TYPES( MachineType::TaggedSigned(), // return type @@ -2200,6 +2360,7 @@ class ResumeGeneratorBaselineDescriptor final class SuspendGeneratorBaselineDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kGeneratorObject, kSuspendId, kBytecodeOffset, kRegisterCount) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kGeneratorObject @@ -2213,6 +2374,7 @@ class SuspendGeneratorBaselineDescriptor final class RestartFrameTrampolineDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS() DECLARE_DESCRIPTOR(RestartFrameTrampolineDescriptor) }; @@ -2220,6 +2382,7 @@ class RestartFrameTrampolineDescriptor final class RunMicrotasksEntryDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_ENTRY(kRootRegisterValue, kMicrotaskQueue) DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kRootRegisterValue MachineType::Pointer()) // kMicrotaskQueue @@ -2231,6 +2394,7 @@ class RunMicrotasksEntryDescriptor final class RunMicrotasksDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kMicrotaskQueue) DEFINE_PARAMETER_TYPES(MachineType::Pointer()) DECLARE_DESCRIPTOR(RunMicrotasksDescriptor) @@ -2241,6 +2405,7 @@ class RunMicrotasksDescriptor final class WasmFloat32ToNumberDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kValue) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result MachineType::Float32()) // value @@ -2250,6 +2415,7 @@ class WasmFloat32ToNumberDescriptor final class WasmFloat64ToTaggedDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kValue) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result MachineType::Float64()) // value @@ -2259,6 +2425,7 @@ class WasmFloat64ToTaggedDescriptor final class WasmJSToWasmWrapperDescriptor final : public StaticCallInterfaceDescriptor { public: + SANDBOX_EXPOSED_DESCRIPTOR(kJSEntrypointTag) DEFINE_PARAMETERS_NO_CONTEXT(kWrapperBuffer, kInstance, kResultJSArray) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result MachineType::IntPtr(), // ParamBuffer @@ -2280,6 +2447,7 @@ class WasmJSToWasmWrapperDescriptor final class WasmToJSWrapperDescriptor final : public StaticCallInterfaceDescriptor { public: + SANDBOX_EXPOSED_DESCRIPTOR(kWasmEntrypointTag) DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(4, kWasmApiFunctionRef) DEFINE_RESULT_AND_PARAMETER_TYPES( MachineType::IntPtr(), // GP return 1 @@ -2298,6 +2466,7 @@ class WasmToJSWrapperDescriptor final class WasmSuspendDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(1, kArg0) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result MachineType::AnyTagged()) // value @@ -2307,6 +2476,7 @@ class WasmSuspendDescriptor final class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kArgument) DEFINE_PARAMETER_TYPES(MachineType::Int64()) // kArgument DECLARE_DESCRIPTOR(I64ToBigIntDescriptor) @@ -2316,6 +2486,7 @@ class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final class V8_EXPORT_PRIVATE I32PairToBigIntDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kLow, kHigh) DEFINE_PARAMETER_TYPES(MachineType::Uint32(), // kLow MachineType::Uint32()) // kHigh @@ -2325,6 +2496,7 @@ class V8_EXPORT_PRIVATE I32PairToBigIntDescriptor final class V8_EXPORT_PRIVATE BigIntToI64Descriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kArgument) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int64(), // result 1 MachineType::AnyTagged()) // kArgument @@ -2334,6 +2506,7 @@ class V8_EXPORT_PRIVATE BigIntToI64Descriptor final class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(2, kArgument) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1 MachineType::Uint32(), // result 2 @@ -2344,6 +2517,7 @@ class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final class CloneObjectWithVectorDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1 MachineType::AnyTagged(), // kSource @@ -2356,6 +2530,7 @@ class CloneObjectWithVectorDescriptor final class CloneObjectBaselineDescriptor final : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kSource, kFlags, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource MachineType::TaggedSigned(), // kFlags @@ -2366,6 +2541,7 @@ class CloneObjectBaselineDescriptor final class BinaryOp_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft MachineType::AnyTagged(), // kRight @@ -2378,6 +2554,7 @@ class CallTrampoline_Baseline_CompactDescriptor : public StaticCallInterfaceDescriptor< CallTrampoline_Baseline_CompactDescriptor> { public: + INTERNAL_DESCRIPTOR() using ArgumentCountField = base::BitField; using SlotField = base::BitField; @@ -2399,6 +2576,7 @@ class CallTrampoline_Baseline_CompactDescriptor class CallTrampoline_BaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kFunction, kActualArgumentsCount, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction MachineType::Int32(), // kActualArgumentsCount @@ -2410,6 +2588,7 @@ class CallTrampoline_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor< CallTrampoline_WithFeedbackDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot, kFeedbackVector, kReceiver) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction @@ -2423,6 +2602,7 @@ class CallTrampoline_WithFeedbackDescriptor class Compare_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft MachineType::AnyTagged(), // kRight @@ -2434,6 +2614,7 @@ class Compare_WithFeedbackDescriptor class Compare_BaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft MachineType::AnyTagged(), // kRight @@ -2446,6 +2627,7 @@ class Compare_BaselineDescriptor class Construct_BaselineDescriptor : public StaticJSCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot) DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr()) // kSlot DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_BaselineDescriptor) @@ -2454,6 +2636,7 @@ class Construct_BaselineDescriptor class Construct_WithFeedbackDescriptor : public StaticJSCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() // kSlot is passed in a register, kFeedbackVector on the stack. DEFINE_JS_PARAMETERS(kSlot, kFeedbackVector) DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot @@ -2464,6 +2647,7 @@ class Construct_WithFeedbackDescriptor class UnaryOp_WithFeedbackDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS(kValue, kSlot, kFeedbackVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue MachineType::UintPtr(), // kSlot @@ -2474,6 +2658,7 @@ class UnaryOp_WithFeedbackDescriptor class UnaryOp_BaselineDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_PARAMETERS_NO_CONTEXT(kValue, kSlot) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue MachineType::UintPtr()) // kSlot @@ -2484,6 +2669,7 @@ class CheckTurboshaftFloat32TypeDescriptor : public StaticCallInterfaceDescriptor< CheckTurboshaftFloat32TypeDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(1, kValue, kExpectedType, kNodeId) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineTypeOf::value, @@ -2496,6 +2682,7 @@ class CheckTurboshaftFloat64TypeDescriptor : public StaticCallInterfaceDescriptor< CheckTurboshaftFloat64TypeDescriptor> { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(1, kValue, kExpectedType, kNodeId) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineTypeOf::value, @@ -2507,6 +2694,7 @@ class CheckTurboshaftFloat64TypeDescriptor class DebugPrintWordPtrDescriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(1, kValue) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineType::UintPtr()) @@ -2516,6 +2704,7 @@ class DebugPrintWordPtrDescriptor class DebugPrintFloat64Descriptor : public StaticCallInterfaceDescriptor { public: + INTERNAL_DESCRIPTOR() DEFINE_RESULT_AND_PARAMETERS(1, kValue) DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineType::Float64()) @@ -2526,6 +2715,7 @@ class DebugPrintFloat64Descriptor class Name##Descriptor \ : public StaticCallInterfaceDescriptor { \ public: \ + INTERNAL_DESCRIPTOR() \ DEFINE_PARAMETERS(__VA_ARGS__) \ static constexpr bool kNoContext = DoesNeedContext == NeedsContext::kNo; \ DECLARE_DEFAULT_DESCRIPTOR(Name##Descriptor) \ diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc index ae92d8faa2796a..ac8c35cab56ac2 100644 --- a/deps/v8/src/codegen/loong64/assembler-loong64.cc +++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc @@ -361,10 +361,9 @@ bool Assembler::IsMov(Instr instr, Register rd, Register rj) { return instr == instr1; } -bool Assembler::IsPcAddi(Instr instr, Register rd, int32_t si20) { - DCHECK(is_int20(si20)); - Instr instr1 = PCADDI | (si20 & 0xfffff) << kRjShift | rd.code(); - return instr == instr1; +bool Assembler::IsPcAddi(Instr instr) { + uint32_t opcode = (instr >> 25) << 25; + return opcode == PCADDI; } bool Assembler::IsNop(Instr instr, unsigned int type) { @@ -453,26 +452,23 @@ int Assembler::target_at(int pos, bool is_internal) { } } - // Check we have a branch or jump instruction. - DCHECK(IsBranch(instr) || IsPcAddi(instr, t8, 16)); + // Check we have a branch, jump or pcaddi instruction. + DCHECK(IsBranch(instr) || IsPcAddi(instr)); // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming // the compiler uses arithmetic shifts for signed integers. if (IsBranch(instr)) { return AddBranchOffset(pos, instr); - } else { - DCHECK(IsPcAddi(instr, t8, 16)); - // see BranchLong(Label* L) and BranchAndLinkLong ?? - int32_t imm32; - Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize); - Instr instr_ori = instr_at(pos + 2 * kInstrSize); - DCHECK(IsLu12i_w(instr_lu12i_w)); - imm32 = ((instr_lu12i_w >> 5) & 0xfffff) << 12; - imm32 |= ((instr_ori >> 10) & static_cast(kImm12Mask)); - if (imm32 == kEndOfJumpChain) { + } else if (IsPcAddi(instr)) { + // see LoadLabelRelative + int32_t si20; + si20 = (instr >> kRjShift) & 0xfffff; + if (si20 == kEndOfJumpChain) { // EndOfChain sentinel is returned directly, not relative to pc or pos. return kEndOfChain; } - return pos + imm32; + return pos + (si20 << 2); + } else { + UNREACHABLE(); } } @@ -521,6 +517,18 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { return; } + if (IsPcAddi(instr)) { + // For LoadLabelRelative function. + int32_t imm = target_pos - pos; + DCHECK_EQ(imm & 3, 0); + DCHECK(is_int22(imm)); + uint32_t siMask = 0xfffff << kRjShift; + uint32_t si20 = ((imm >> 2) << kRjShift) & siMask; + instr = (instr & ~siMask) | si20; + instr_at_put(pos, instr); + return; + } + DCHECK(IsBranch(instr)); instr = SetBranchOffset(pos, target_pos, instr); instr_at_put(pos, instr); @@ -585,7 +593,7 @@ void Assembler::bind_to(Label* L, int pos) { target_at_put(fixup_pos, pos, false); } else { DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) || - IsPcAddi(instr, t8, 8)); + IsPcAddi(instr)); target_at_put(fixup_pos, pos, false); } } diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h index ec65062144be82..c478731779e672 100644 --- a/deps/v8/src/codegen/loong64/assembler-loong64.h +++ b/deps/v8/src/codegen/loong64/assembler-loong64.h @@ -168,7 +168,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // but it may be bound only once. void bind(Label* L); // Binds an unbound label L to current code position. - enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; + enum OffsetSize : int { + kOffset26 = 26, + kOffset21 = 21, + kOffset20 = 20, + kOffset16 = 16 + }; // Determines if Label is bound and near enough so that branch instruction // can be used to reach it, instead of jump instruction. @@ -804,7 +809,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static bool IsJump(Instr instr); static bool IsMov(Instr instr, Register rd, Register rs); - static bool IsPcAddi(Instr instr, Register rd, int32_t si20); + static bool IsPcAddi(Instr instr); static bool IsJ(Instr instr); static bool IsLu12i_w(Instr instr); diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h index 9bf79ce3c4e23b..0c6838fb4ea5d1 100644 --- a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h +++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h @@ -84,6 +84,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return a3; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return a4; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return a5; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return a2; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -323,7 +338,8 @@ CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister() { return a2; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return a3; } // static diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc index 4e234b803cc8c4..6b3fee982e1501 100644 --- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc +++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc @@ -19,7 +19,7 @@ #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/heap-number.h" @@ -332,9 +332,10 @@ void MacroAssembler::ResolveTrustedPointerHandle(Register destination, srli_d(handle, handle, kTrustedPointerHandleShift); Alsl_d(destination, handle, table, kTrustedPointerTableEntrySizeLog2); Ld_d(destination, MemOperand(destination, 0)); - // The LSB is used as marking bit by the trusted pointer table, so here we - // have to set it using a bitwise OR as it may or may not be set. - Or(destination, destination, Operand(kHeapObjectTag)); + // Untag the pointer and remove the marking bit in one operation. + Register tag_reg = handle; + li(tag_reg, Operand(~(tag | kTrustedPointerTableMarkBit))); + and_(destination, destination, tag_reg); } void MacroAssembler::ResolveCodePointerHandle(Register destination, @@ -352,16 +353,22 @@ void MacroAssembler::ResolveCodePointerHandle(Register destination, Or(destination, destination, Operand(kHeapObjectTag)); } -void MacroAssembler::LoadCodeEntrypointViaCodePointer( - Register destination, MemOperand field_operand) { +void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination, + MemOperand field_operand, + CodeEntrypointTag tag) { + DCHECK_NE(tag, kInvalidEntrypointTag); ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); - Register table = temps.Acquire(); - li(table, ExternalReference::code_pointer_table_address()); + Register scratch = temps.Acquire(); + li(scratch, ExternalReference::code_pointer_table_address()); Ld_wu(destination, field_operand); srli_d(destination, destination, kCodePointerHandleShift); slli_d(destination, destination, kCodePointerTableEntrySizeLog2); - Ld_d(destination, MemOperand(table, destination)); + Ld_d(destination, MemOperand(scratch, destination)); + if (tag != 0) { + li(scratch, Operand(tag)); + xor_(destination, destination, scratch); + } } #endif // V8_ENABLE_SANDBOX @@ -2770,6 +2777,14 @@ void MacroAssembler::CompareTaggedAndBranch(Label* label, Condition cond, } } +void MacroAssembler::LoadLabelRelative(Register dest, Label* target) { + ASM_CODE_COMMENT(this); + // pcaddi could handle 22-bit pc offset. + int32_t offset = branch_offset_helper(target, OffsetSize::kOffset20); + DCHECK(is_int22(offset)); + pcaddi(dest, offset >> 2); +} + void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); @@ -3330,8 +3345,7 @@ void MacroAssembler::TestCodeIsMarkedForDeoptimizationAndJump( } Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); } void MacroAssembler::InvokePrologue(Register expected_parameter_count, @@ -4419,41 +4433,49 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); li(t7, function); - CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments, - set_isolate_data_slots); + return CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments, + set_isolate_data_slots, return_location); } -void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { ASM_CODE_COMMENT(this); - CallCFunctionHelper(function, num_reg_arguments, num_double_arguments, - set_isolate_data_slots); + return CallCFunctionHelper(function, num_reg_arguments, num_double_arguments, + set_isolate_data_slots, return_location); } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_location); } -void MacroAssembler::CallCFunction(Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(Register function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_location); } -void MacroAssembler::CallCFunctionHelper( +int MacroAssembler::CallCFunctionHelper( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { + SetIsolateDataSlots set_isolate_data_slots, Label* return_location) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); DCHECK(has_frame()); + + Label get_pc; + // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -4497,7 +4519,7 @@ void MacroAssembler::CallCFunctionHelper( Register scratch = t2; DCHECK(!AreAliased(pc_scratch, scratch, function)); - pcaddi(pc_scratch, 1); + LoadLabelRelative(pc_scratch, &get_pc); // See x64 code for reasoning about how to address the isolate data // fields. @@ -4507,6 +4529,13 @@ void MacroAssembler::CallCFunctionHelper( IsolateData::fast_c_call_caller_pc_offset())); St_d(fp, MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that + // the GC can visit this field unconditionally. This is necessary + // because CEntry sets it to kInvalidContext in debug build only. + static_assert(Context::kNoContext == 0); + StoreRootRelative(IsolateData::context_offset(), zero_reg); +#endif } else { DCHECK_NOT_NULL(isolate()); li(scratch, @@ -4515,10 +4544,22 @@ void MacroAssembler::CallCFunctionHelper( li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); St_d(fp, MemOperand(scratch, 0)); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that + // the GC can visit this field unconditionally. This is necessary + // because CEntry sets it to kInvalidContext in debug build only. + static_assert(Context::kNoContext == 0); + St_d(zero_reg, + ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), pc_scratch)); +#endif } } Call(function); + int call_pc_offset = pc_offset(); + bind(&get_pc); + if (return_location) bind(return_location); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // We don't unset the PC; the FP is the source of truth. @@ -4544,6 +4585,8 @@ void MacroAssembler::CallCFunctionHelper( } set_pc_for_safepoint(); + + return call_pc_offset; } } @@ -4555,8 +4598,7 @@ void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc, UseScratchRegisterScope temps(this); temps.Include(t8); Register scratch = temps.Acquire(); - And(scratch, object, - Operand(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); + And(scratch, object, Operand(~MemoryChunk::GetAlignmentMaskForAssembler())); Ld_d(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); @@ -4596,28 +4638,31 @@ void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code_object) { + Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX LoadCodeEntrypointViaCodePointer( destination, - FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset)); + FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset), tag); #else Ld_d(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); #endif } -void MacroAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); - LoadCodeInstructionStart(code_object, code_object); + LoadCodeInstructionStart(code_object, code_object, tag); Call(code_object); } -void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag, + JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); - LoadCodeInstructionStart(code_object, code_object); + LoadCodeInstructionStart(code_object, code_object, tag); Jump(code_object); } @@ -4628,12 +4673,13 @@ void MacroAssembler::CallJSFunction(Register function_object) { // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. LoadCodeEntrypointViaCodePointer( - code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); + code, FieldMemOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); Call(code); #else LoadTaggedField(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - CallCodeObject(code); + CallCodeObject(code, kJSEntrypointTag); #endif } @@ -4645,13 +4691,14 @@ void MacroAssembler::JumpJSFunction(Register function_object, // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. LoadCodeEntrypointViaCodePointer( - code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); + code, FieldMemOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); DCHECK_EQ(jump_mode, JumpMode::kJump); Jump(code); #else LoadTaggedField(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - JumpCodeObject(code, jump_mode); + JumpCodeObject(code, kJSEntrypointTag, jump_mode); #endif } @@ -4691,7 +4738,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ LoadCodeInstructionStart(a2, optimized_code_entry); + __ LoadCodeInstructionStart(a2, optimized_code_entry, kJSEntrypointTag); __ Jump(a2); // Optimized code slot contains deoptimized code or code is cleared and @@ -4748,7 +4795,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode( kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister); CallRuntime(function_id, 1); - LoadCodeInstructionStart(a2, a0); + LoadCodeInstructionStart(a2, a0, kJSEntrypointTag); // Restore target function, new target and actual argument count. Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, kJavaScriptCallArgCountRegister); diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h index 6779ff5d754c53..35bc5a3dc5f21e 100644 --- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h +++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h @@ -172,6 +172,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { RelocInfo::Mode rmode = RelocInfo::NO_INFO, LiFlags mode = OPTIMIZE_SIZE); void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); + void LoadLabelRelative(Register dst, Label* target); void LoadFromConstantsTable(Register destination, int constant_index) final; void LoadRootRegisterOffset(Register destination, intptr_t offset) final; @@ -222,9 +223,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { Operand range); // Load the code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); - void CallCodeObject(Register code_object); - void JumpCodeObject(Register code_object, + void LoadCodeInstructionStart(Register destination, Register code_object, + CodeEntrypointTag tag); + void CallCodeObject(Register code_object, CodeEntrypointTag tag); + void JumpCodeObject(Register code_object, CodeEntrypointTag tag, JumpMode jump_mode = JumpMode::kJump); // Convenience functions to call/jmp to the code of a JSFunction object. @@ -518,19 +520,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - void CallCFunction( + int CallCFunction( ExternalReference function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( ExternalReference function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); // See comments at the beginning of Builtins::Generate_CEntry. inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } @@ -893,7 +899,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // Only available when the sandbox is enabled as it requires the code pointer // table. void LoadCodeEntrypointViaCodePointer(Register destination, - MemOperand field_operand); + MemOperand field_operand, + CodeEntrypointTag tag); #endif // Load a protected pointer field. @@ -1213,9 +1220,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, bool f32 = true); - void CallCFunctionHelper( + int CallCFunctionHelper( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h index 21e82f312f6c4d..17dcdba1e3e2c0 100644 --- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h +++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h @@ -84,6 +84,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return a3; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return a4; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return a5; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return a2; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -323,7 +338,8 @@ CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister() { return a2; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return a3; } // static diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc index 5068ce795e977e..9a9790d75ee2a5 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc @@ -19,7 +19,7 @@ #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/heap-number.h" @@ -4437,7 +4437,7 @@ void MacroAssembler::CallBuiltin(Builtin builtin) { case BuiltinCallJumpMode::kForMksnapshot: { Handle code = isolate()->builtins()->code_handle(builtin); IndirectLoadConstant(temp, code); - CallCodeObject(temp); + CallCodeObject(temp, kJSEntrypointTag); break; } case BuiltinCallJumpMode::kPCRelative: @@ -4477,7 +4477,7 @@ void MacroAssembler::TailCallBuiltin(Builtin builtin) { case BuiltinCallJumpMode::kForMksnapshot: { Handle code = isolate()->builtins()->code_handle(builtin); IndirectLoadConstant(temp, code); - JumpCodeObject(temp); + JumpCodeObject(temp, kJSEntrypointTag); break; } case BuiltinCallJumpMode::kPCRelative: @@ -4915,8 +4915,7 @@ void MacroAssembler::TestCodeIsMarkedForDeoptimizationAndJump( } Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); } void MacroAssembler::InvokePrologue(Register expected_parameter_count, @@ -6172,8 +6171,7 @@ void MacroAssembler::CallCFunctionHelper( void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); - And(scratch, object, - Operand(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); + And(scratch, object, Operand(~MemoryChunk::GetAlignmentMaskForAssembler())); Ld(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); @@ -6228,39 +6226,41 @@ void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, } void MacroAssembler::LoadCodeInstructionStart( - Register destination, Register code_data_container_object) { + Register destination, Register code_data_container_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); Ld(destination, FieldMemOperand(code_data_container_object, Code::kInstructionStartOffset)); } -void MacroAssembler::CallCodeObject(Register code_data_container_object) { +void MacroAssembler::CallCodeObject(Register code_data_container_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); LoadCodeInstructionStart(code_data_container_object, - code_data_container_object); + code_data_container_object, tag); Call(code_data_container_object); } void MacroAssembler::JumpCodeObject(Register code_data_container_object, - JumpMode jump_mode) { + CodeEntrypointTag tag, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeInstructionStart(code_data_container_object, - code_data_container_object); + code_data_container_object, tag); Jump(code_data_container_object); } void MacroAssembler::CallJSFunction(Register function_object) { Register code = kJavaScriptCallCodeStartRegister; Ld(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - CallCodeObject(code); + CallCodeObject(code, kJSEntrypointTag); } void MacroAssembler::JumpJSFunction(Register function_object, JumpMode jump_mode) { Register code = kJavaScriptCallCodeStartRegister; Ld(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - JumpCodeObject(code, jump_mode); + JumpCodeObject(code, kJSEntrypointTag, jump_mode); } namespace { @@ -6301,7 +6301,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, scratch1, scratch2); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ LoadCodeInstructionStart(a2, optimized_code_entry); + __ LoadCodeInstructionStart(a2, optimized_code_entry, kJSEntrypointTag); __ Jump(a2); // Optimized code slot contains deoptimized code or code is cleared and @@ -6368,7 +6368,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode( } static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - LoadCodeInstructionStart(a2, v0); + LoadCodeInstructionStart(a2, v0, kJSEntrypointTag); Jump(a2); } diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h index 68e1fae7506eeb..b6fb336b143ae9 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -274,9 +274,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // Load the code entry point from the Code object. void LoadCodeInstructionStart(Register destination, - Register code_data_container_object); - void CallCodeObject(Register code_data_container_object); + Register code_data_container_object, + CodeEntrypointTag tag); + void CallCodeObject(Register code_data_container_object, + CodeEntrypointTag tag); void JumpCodeObject(Register code_data_container_object, + CodeEntrypointTag tag, JumpMode jump_mode = JumpMode::kJump); // Convenience functions to call/jmp to the code of a JSFunction object. diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc index 10b3c72ce2dd57..5f44372b59b1c7 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.cc +++ b/deps/v8/src/codegen/optimized-compilation-info.cc @@ -86,7 +86,7 @@ void OptimizedCompilationInfo::ConfigureFlags() { #ifdef V8_ENABLE_BUILTIN_JUMP_TABLE_SWITCH set_switch_jump_table(); #endif // V8_TARGET_ARCH_X64 - V8_FALLTHROUGH; + [[fallthrough]]; case CodeKind::FOR_TESTING: if (v8_flags.turbo_splitting) set_splitting(); if (v8_flags.enable_allocation_folding) set_allocation_folding(); @@ -99,7 +99,6 @@ void OptimizedCompilationInfo::ConfigureFlags() { set_switch_jump_table(); break; case CodeKind::C_WASM_ENTRY: - case CodeKind::JS_TO_JS_FUNCTION: case CodeKind::JS_TO_WASM_FUNCTION: case CodeKind::WASM_TO_JS_FUNCTION: break; diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc index 84e7581f776898..d6a7c345d1d6a6 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc @@ -1502,8 +1502,8 @@ void Assembler::bitwise_add32(Register dst, Register src, int32_t value) { } } -void Assembler::patch_wasm_cpi_return_address(Register dst, int pc_offset, - int return_address_offset) { +void Assembler::patch_pc_address(Register dst, int pc_offset, + int return_address_offset) { DCHECK(is_int16(return_address_offset)); Assembler patching_assembler( AssemblerOptions{}, diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h index 1299b2d6135577..99c62c22c758fd 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/assembler-ppc.h @@ -1003,9 +1003,8 @@ class Assembler : public AssemblerBase { void bitwise_mov32(Register dst, int32_t value); void bitwise_add32(Register dst, Register src, int32_t value); - // Patch the offset to the return address after CallCFunction. - void patch_wasm_cpi_return_address(Register dst, int pc_offset, - int return_address_offset); + // Patch the offset to the return address after Call. + void patch_pc_address(Register dst, int pc_offset, int return_address_offset); // Load the position of the label relative to the generated code object // pointer in a register. diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h index 9b8ccf96958a78..f207c7e41cd5ff 100644 --- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h +++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h @@ -84,6 +84,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return r6; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return r7; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return r8; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return r5; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -313,7 +328,8 @@ CallApiCallbackGenericDescriptor::TopmostScriptHavingContextRegister() { return r4; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return r6; } // static diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc index 53a6b609351e0d..5e957d3b11b86f 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -20,7 +20,7 @@ #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/runtime/runtime.h" @@ -372,8 +372,7 @@ void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code, } Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); } void MacroAssembler::Call(Label* target) { b(target, SetLK); } @@ -2915,24 +2914,36 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1, } } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots, - bool has_function_descriptor) { +int MacroAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + bool has_function_descriptor) { Move(ip, function); - CallCFunction(ip, num_reg_arguments, num_double_arguments, - set_isolate_data_slots, has_function_descriptor); + return CallCFunction(ip, num_reg_arguments, num_double_arguments, + set_isolate_data_slots, has_function_descriptor); } -void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots, - bool has_function_descriptor) { +int MacroAssembler::CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + bool has_function_descriptor) { ASM_CODE_COMMENT(this); DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); DCHECK(has_frame()); + Label start_call; + Register pc_scratch = r11; + DCHECK(!AreAliased(pc_scratch, function)); + LoadPC(pc_scratch); + bind(&start_call); + int start_pc_offset = pc_offset(); + // We are going to patch this instruction after emitting + // Call, using a zero offset here as placeholder for now. + // patch_pc_address assumes `addi` is used here to + // add the offset to pc. + addi(pc_scratch, pc_scratch, Operand::Zero()); + if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // Save the frame pointer and PC so that the stack layout remains iterable, // even without an ExitFrame which normally exists between JS and C frames. @@ -2941,11 +2952,18 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, mflr(scratch); // See x64 code for reasoning about how to address the isolate data fields. if (root_array_available()) { - LoadPC(r0); - StoreU64(r0, MemOperand(kRootRegister, - IsolateData::fast_c_call_caller_pc_offset())); + StoreU64(pc_scratch, + MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_pc_offset())); StoreU64(fp, MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + mov(pc_scratch, Operand(Context::kNoContext)); + StoreRootRelative(IsolateData::context_offset(), pc_scratch); +#endif } else { DCHECK_NOT_NULL(isolate()); Register addr_scratch = r7; @@ -2953,11 +2971,19 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, Move(addr_scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); - LoadPC(r0); - StoreU64(r0, MemOperand(addr_scratch)); + StoreU64(pc_scratch, MemOperand(addr_scratch)); Move(addr_scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); StoreU64(fp, MemOperand(addr_scratch)); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + mov(pc_scratch, Operand(Context::kNoContext)); + StoreU64(pc_scratch, ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), + addr_scratch)); +#endif Pop(addr_scratch); } mtlr(scratch); @@ -2982,6 +3008,15 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, } Call(dest); + int call_pc_offset = pc_offset(); + int offset_since_start_call = SizeOfCodeGeneratedSince(&start_call); + // Here we are going to patch the `addi` instruction above to use the + // correct offset. + // LoadPC emits two instructions and pc is the address of its + // second emitted instruction therefore there is one more instruction to + // count. + offset_since_start_call += kInstrSize; + patch_pc_address(pc_scratch, start_pc_offset, offset_since_start_call); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // We don't unset the PC; the FP is the source of truth. @@ -3012,21 +3047,22 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, } else { AddS64(sp, sp, Operand(stack_space * kSystemPointerSize), r0); } + + return call_pc_offset; } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments, - SetIsolateDataSlots set_isolate_data_slots, - bool has_function_descriptor) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots, - has_function_descriptor); +int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + bool has_function_descriptor) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + has_function_descriptor); } -void MacroAssembler::CallCFunction(Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots, - bool has_function_descriptor) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots, - has_function_descriptor); +int MacroAssembler::CallCFunction(Register function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + bool has_function_descriptor) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + has_function_descriptor); } void MacroAssembler::CheckPageFlag( @@ -5486,12 +5522,12 @@ void MacroAssembler::ByteReverseU64(Register dst, Register val, Register) { } void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { - CmpS64(x, Operand(y), r0); + CmpS32(x, Operand(y), r0); beq(dest); } void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { - CmpS64(x, Operand(y), r0); + CmpS32(x, Operand(y), r0); blt(dest); } diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h index f6bc06cca5e13e..49045461124af8 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -661,20 +661,20 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - void CallCFunction( + int CallCFunction( ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, bool has_function_descriptor = true); - void CallCFunction( + int CallCFunction( Register function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, bool has_function_descriptor = true); - void CallCFunction( + int CallCFunction( ExternalReference function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, bool has_function_descriptor = true); - void CallCFunction( + int CallCFunction( Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, bool has_function_descriptor = true); diff --git a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h index c1395c205201cc..79b3a4b24e7258 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h @@ -191,6 +191,12 @@ void WritableRelocInfo::set_target_object(Tagged target, ICacheFlushMode icache_flush_mode) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); if (IsCompressedEmbeddedObject(rmode_)) { + DCHECK(COMPRESS_POINTERS_BOOL); + // We must not compress pointers to objects outside of the main pointer + // compression cage as we wouldn't be able to decompress them with the + // correct cage base. + DCHECK_IMPLIES(V8_ENABLE_SANDBOX_BOOL, !IsTrustedSpaceObject(target)); + DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(target)); Assembler::set_target_compressed_address_at( pc_, constant_pool_, V8HeapCompressionScheme::CompressObject(target.ptr()), diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.cc b/deps/v8/src/codegen/riscv/assembler-riscv.cc index 621ae7ffb8dcd0..0a248af9cb1cd8 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/assembler-riscv.cc @@ -95,7 +95,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) { } void CpuFeatures::PrintTarget() {} -void CpuFeatures::PrintFeatures() {} +void CpuFeatures::PrintFeatures() { + printf("supports_wasm_simd_128=%d\n", CpuFeatures::SupportsWasmSimd128()); +} int ToNumber(Register reg) { DCHECK(reg.is_valid()); const int kNumbers[] = { diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.h b/deps/v8/src/codegen/riscv/assembler-riscv.h index 3fa2812a1a6259..80c2d1d11c4bb2 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv.h @@ -429,6 +429,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); }; + class V8_NODISCARD BlockPoolsScope { + public: + // Block Trampoline Pool and Constant Pool. Emits pools if necessary to + // ensure that {margin} more bytes can be emitted without triggering pool + // emission. + explicit BlockPoolsScope(Assembler* assem, size_t margin = 0) + : block_const_pool_(assem, margin), block_trampoline_pool_(assem) {} + + BlockPoolsScope(Assembler* assem, PoolEmissionCheck check) + : block_const_pool_(assem, check), block_trampoline_pool_(assem) {} + ~BlockPoolsScope() {} + + private: + BlockConstPoolScope block_const_pool_; + BlockTrampolinePoolScope block_trampoline_pool_; + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope); + }; + // Class for postponing the assembly buffer growth. Typically used for // sequences of instructions that must be emitted as a unit, before // buffer growth (and relocation) can occur. @@ -512,8 +530,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, inline int UnboundLabelsCount() { return unbound_labels_count_; } - using BlockPoolsScope = BlockTrampolinePoolScope; - void RecordConstPool(int size); void ForceConstantPoolEmissionWithoutJump() { diff --git a/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h b/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h index 1b3ad331ba1abb..be5ff661b1236b 100644 --- a/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h +++ b/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h @@ -84,6 +84,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return a3; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return a4; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return a5; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return a2; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -330,7 +345,8 @@ CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister() { return a2; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return a3; } // static diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc index a9fc8c5b8ec37a..fd5e97339b4435 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc @@ -17,7 +17,7 @@ #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/heap-number.h" @@ -130,7 +130,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ LoadCodeInstructionStart(a2, optimized_code_entry); + __ LoadCodeInstructionStart(a2, optimized_code_entry, kJSEntrypointTag); __ Jump(a2); // Optimized code slot contains deoptimized code or code is cleared and @@ -156,18 +156,19 @@ void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) { Operand(FEEDBACK_VECTOR_TYPE)); } } +void MacroAssembler::AssertUnreachable(AbortReason reason) { + if (v8_flags.debug_code) Abort(reason); +} #endif // V8_ENABLE_DEBUG_CODE void MacroAssembler::ReplaceClosureCodeWithOptimizedCode( Register optimized_code, Register closure) { ASM_CODE_COMMENT(this); - DCHECK(!AreAliased(optimized_code, closure)); - // Store code entry in the closure. - StoreTaggedField(optimized_code, - FieldMemOperand(closure, JSFunction::kCodeOffset)); + StoreCodePointerField(optimized_code, + FieldMemOperand(closure, JSFunction::kCodeOffset)); RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code, - kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, - SmiCheck::kOmit); + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, SmiCheck::kOmit, + SlotDescriptor::ForCodePointerSlot()); } void MacroAssembler::GenerateTailCallToReturnedCode( @@ -188,7 +189,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode( CallRuntime(function_id, 1); // Use the return value before restoring a0 - LoadCodeInstructionStart(a2, a0); + LoadCodeInstructionStart(a2, a0, kJSEntrypointTag); // Restore target function, new target and actual argument count. Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, kJavaScriptCallArgCountRegister); @@ -316,7 +317,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { void MacroAssembler::RecordWriteField(Register object, int offset, Register value, RAStatus ra_status, SaveFPRegsMode save_fp, - SmiCheck smi_check) { + SmiCheck smi_check, SlotDescriptor slot) { DCHECK(!AreAliased(object, value)); // First, check if a write barrier is even needed. The tests below // catch stores of Smis. @@ -344,7 +345,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status, - save_fp, SmiCheck::kOmit); + save_fp, SmiCheck::kOmit, slot); bind(&done); } @@ -368,6 +369,91 @@ void MacroAssembler::StoreTrustedPointerField(Register value, #endif } +#ifdef V8_ENABLE_SANDBOX +void MacroAssembler::ResolveIndirectPointerHandle(Register destination, + Register handle, + IndirectPointerTag tag) { + ASM_CODE_COMMENT(this); + // The tag implies which pointer table to use. + if (tag == kUnknownIndirectPointerTag) { + // In this case we have to rely on the handle marking to determine which + // pointer table to use. + Label is_trusted_pointer_handle, done; + DCHECK(!AreAliased(destination, handle)); + And(destination, handle, kCodePointerHandleMarker); + Branch(&is_trusted_pointer_handle, eq, destination, Operand(zero_reg)); + ResolveCodePointerHandle(destination, handle); + Branch(&done); + bind(&is_trusted_pointer_handle); + ResolveTrustedPointerHandle(destination, handle, + kUnknownIndirectPointerTag); + bind(&done); + } else if (tag == kCodeIndirectPointerTag) { + ResolveCodePointerHandle(destination, handle); + } else { + ResolveTrustedPointerHandle(destination, handle, tag); + } +} + +void MacroAssembler::ResolveTrustedPointerHandle(Register destination, + Register handle, + IndirectPointerTag tag) { + ASM_CODE_COMMENT(this); + DCHECK_NE(tag, kCodeIndirectPointerTag); + DCHECK(!AreAliased(handle, destination)); + + Register table = destination; + DCHECK(root_array_available_); + LoadWord(table, MemOperand{kRootRegister, + IsolateData::trusted_pointer_table_offset()}); + SrlWord(handle, handle, kTrustedPointerHandleShift); + CalcScaledAddress(destination, table, handle, + kTrustedPointerTableEntrySizeLog2); + LoadWord(destination, MemOperand(destination, 0)); + // The LSB is used as marking bit by the trusted pointer table, so here we + // have to set it using a bitwise OR as it may or may not be set. + // Untag the pointer and remove the marking bit in one operation. + Register tag_reg = handle; + li(tag_reg, Operand(~(tag | kTrustedPointerTableMarkBit))); + and_(destination, destination, tag_reg); +} + +void MacroAssembler::ResolveCodePointerHandle(Register destination, + Register handle) { + ASM_CODE_COMMENT(this); + DCHECK(!AreAliased(handle, destination)); + + Register table = destination; + li(table, ExternalReference::code_pointer_table_address()); + SrlWord(handle, handle, kCodePointerHandleShift); + CalcScaledAddress(destination, table, handle, kCodePointerTableEntrySizeLog2); + LoadWord(destination, + MemOperand(destination, kCodePointerTableEntryCodeObjectOffset)); + // The LSB is used as marking bit by the code pointer table, so here we have + // to set it using a bitwise OR as it may or may not be set. + Or(destination, destination, Operand(kHeapObjectTag)); +} + +void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination, + MemOperand field_operand, + CodeEntrypointTag tag) { + DCHECK_NE(tag, kInvalidEntrypointTag); + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, ExternalReference::code_pointer_table_address()); + Lwu(destination, field_operand); + SrlWord(destination, destination, kCodePointerHandleShift); + SllWord(destination, destination, kCodePointerTableEntrySizeLog2); + AddWord(scratch, scratch, destination); + LoadWord(destination, MemOperand(scratch, 0)); + if (tag != 0) { + li(scratch, Operand(tag)); + xor_(destination, destination, scratch); + } +} +#endif // V8_ENABLE_SANDBOX + void MacroAssembler::LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTag tag, @@ -387,7 +473,7 @@ void MacroAssembler::LoadExternalPointerField(Register destination, MemOperand(isolate_root, IsolateData::external_pointer_table_offset() + Internals::kExternalPointerTableBasePointerOffset)); - lwu(destination, field_operand); + Lwu(destination, field_operand); srli(destination, destination, kExternalPointerIndexShift); slli(destination, destination, kExternalPointerTableEntrySizeLog2); AddWord(external_table, external_table, destination); @@ -405,8 +491,9 @@ void MacroAssembler::LoadIndirectPointerField(Register destination, #ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); - Register handle = temps.hasAvailable() ? temps.Acquire() : t8; - Ld_wu(handle, field_operand); + Register handle = t6; + DCHECK_NE(handle, destination); + Lwu(handle, field_operand); ResolveIndirectPointerHandle(destination, handle, tag); #else @@ -419,14 +506,14 @@ void MacroAssembler::StoreIndirectPointerField(Register value, #ifdef V8_ENABLE_SANDBOX UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - Ld_w(scratch, FieldMemOperand( - value, ExposedTrustedObject::kSelfIndirectPointerOffset)); - St_w(scratch, dst_field_operand); + Lw(scratch, + FieldMemOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset)); + Sw(scratch, dst_field_operand); #else UNREACHABLE(); -#endif +#endif // V8_ENABLE_SANDBOX } -#endif +#endif // V8_TARGET_ARCH_RISCV64 void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; @@ -438,44 +525,54 @@ void MacroAssembler::MaybeRestoreRegisters(RegList registers) { MultiPop(registers); } -void MacroAssembler::CallEphemeronKeyBarrier(Register object, - Register slot_address, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { - DCHECK(!AreAliased(object, slot_address)); - RegList registers = - WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); MaybeSaveRegisters(registers); Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); Register slot_address_parameter = WriteBarrierDescriptor::SlotAddressRegister(); - Push(object); - Push(slot_address); - Pop(slot_address_parameter); - Pop(object_parameter); + MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); CallBuiltin(Builtins::EphemeronKeyBarrier(fp_mode)); MaybeRestoreRegisters(registers); } +void MacroAssembler::CallIndirectPointerBarrier(Register object, Operand offset, + SaveFPRegsMode fp_mode, + IndirectPointerTag tag) { + ASM_CODE_COMMENT(this); + RegList registers = + IndirectPointerWriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); + + MoveObjectAndSlot( + IndirectPointerWriteBarrierDescriptor::ObjectRegister(), + IndirectPointerWriteBarrierDescriptor::SlotAddressRegister(), object, + offset); + li(IndirectPointerWriteBarrierDescriptor::IndirectPointerTagRegister(), + Operand(tag)); + + CallBuiltin(Builtins::IndirectPointerBarrier(fp_mode)); + MaybeRestoreRegisters(registers); +} + void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, - Register slot_address, + Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode) { - DCHECK(!AreAliased(object, slot_address)); - RegList registers = - WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); MaybeSaveRegisters(registers); Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); Register slot_address_parameter = WriteBarrierDescriptor::SlotAddressRegister(); - Push(object); - Push(slot_address); - Pop(slot_address_parameter); - Pop(object_parameter); + MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode); @@ -498,12 +595,47 @@ void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, } } +void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, + Register object, Operand offset) { + ASM_CODE_COMMENT(this); + DCHECK_NE(dst_object, dst_slot); + // If `offset` is a register, it cannot overlap with `object`. + DCHECK_IMPLIES(!offset.IsImmediate(), offset.rm() != object); + + // If the slot register does not overlap with the object register, we can + // overwrite it. + if (dst_slot != object) { + AddWord(dst_slot, object, offset); + mv(dst_object, object); + return; + } + + DCHECK_EQ(dst_slot, object); + + // If the destination object register does not overlap with the offset + // register, we can overwrite it. + if (offset.IsImmediate() || (offset.rm() != dst_object)) { + mv(dst_object, dst_slot); + AddWord(dst_slot, dst_slot, offset); + return; + } + + DCHECK_EQ(dst_object, offset.rm()); + + // We only have `dst_slot` and `dst_object` left as distinct registers so we + // have to swap them. We write this as a add+sub sequence to avoid using a + // scratch register. + AddWord(dst_slot, dst_slot, dst_object); + SubWord(dst_object, dst_slot, dst_object); +} + // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) // The register 'object' contains a heap object pointer. The heap object // tag is shifted away. void MacroAssembler::RecordWrite(Register object, Operand offset, Register value, RAStatus ra_status, - SaveFPRegsMode fp_mode, SmiCheck smi_check) { + SaveFPRegsMode fp_mode, SmiCheck smi_check, + SlotDescriptor slot) { DCHECK(!AreAliased(object, value)); if (v8_flags.debug_code) { @@ -511,7 +643,17 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Register temp = temps.Acquire(); DCHECK(!AreAliased(object, value, temp)); AddWord(temp, object, offset); +#ifdef V8_TARGET_ARCH_RISCV64 + if (slot.contains_indirect_pointer()) { + LoadIndirectPointerField(temp, MemOperand(temp, 0), + slot.indirect_pointer_tag()); + } else { + DCHECK(slot.contains_direct_pointer()); + LoadTaggedField(temp, MemOperand(temp, 0)); + } +#else LoadTaggedField(temp, MemOperand(temp)); +#endif Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp, Operand(value)); } @@ -531,17 +673,12 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, { UseScratchRegisterScope temps(this); - Register temp = temps.Acquire(); - CheckPageFlag(value, - temp, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, + CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq, // In RISC-V, it uses cc for a comparison with 0, so if // no bits are set, and cc is eq, it will branch to done &done); - CheckPageFlag(object, - temp, // Used as scratch. - MemoryChunk::kPointersFromHereAreInterestingMask, + CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq, // In RISC-V, it uses cc for a comparison with 0, so if // no bits are set, and cc is eq, it will branch to done &done); @@ -553,9 +690,16 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); DCHECK(!AreAliased(object, slot_address, value)); // TODO(cbruni): Turn offset into int. - DCHECK(offset.IsImmediate()); - AddWord(slot_address, object, offset); - CallRecordWriteStub(object, slot_address, fp_mode); + if (slot.contains_direct_pointer()) { + DCHECK(offset.IsImmediate()); + AddWord(slot_address, object, offset); + CallRecordWriteStub(object, slot_address, fp_mode, + StubCallMode::kCallBuiltinPointer); + } else { + DCHECK(slot.contains_indirect_pointer()); + CallIndirectPointerBarrier(object, offset, fp_mode, + slot.indirect_pointer_tag()); + } if (ra_status == kRAHasNotBeenSaved) { pop(ra); } @@ -6094,8 +6238,7 @@ void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization( } Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); } void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, @@ -6416,37 +6559,42 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { BlockTrampolinePoolScope block_trampoline_pool(this); li(t6, function); - CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments, - set_isolate_data_slots); + return CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments, + set_isolate_data_slots, return_location); } -void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunctionHelper(function, num_reg_arguments, num_double_arguments, - set_isolate_data_slots); +int MacroAssembler::CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { + return CallCFunctionHelper(function, num_reg_arguments, num_double_arguments, + set_isolate_data_slots, return_location); } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_location); } -void MacroAssembler::CallCFunction(Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(Register function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_location); } -void MacroAssembler::CallCFunctionHelper( +int MacroAssembler::CallCFunctionHelper( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { + SetIsolateDataSlots set_isolate_data_slots, Label* return_location) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); DCHECK(has_frame()); ASM_CODE_COMMENT(this); @@ -6480,6 +6628,7 @@ void MacroAssembler::CallCFunctionHelper( // Just call directly. The function called cannot cause a GC, or // allow preemption, so the return address in the link register // stays correct. + Label get_pc; { if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { if (function != t6) { @@ -6494,7 +6643,7 @@ void MacroAssembler::CallCFunctionHelper( Register pc_scratch = t1; Register scratch = t2; - auipc(pc_scratch, 0); + LoadAddress(pc_scratch, &get_pc); // See x64 code for reasoning about how to address the isolate data // fields. if (root_array_available()) { @@ -6503,6 +6652,13 @@ void MacroAssembler::CallCFunctionHelper( IsolateData::fast_c_call_caller_pc_offset())); StoreWord(fp, MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that + // the GC can visit this field unconditionally. This is necessary + // because CEntry sets it to kInvalidContext in debug build only. + static_assert(Context::kNoContext == 0); + StoreRootRelative(IsolateData::context_offset(), zero_reg); +#endif } else { DCHECK_NOT_NULL(isolate()); li(scratch, @@ -6511,24 +6667,36 @@ void MacroAssembler::CallCFunctionHelper( li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); StoreWord(fp, MemOperand(scratch)); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that + // the GC can visit this field unconditionally. This is necessary + // because CEntry sets it to kInvalidContext in debug build only. + static_assert(Context::kNoContext == 0); + StoreWord(zero_reg, + ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), scratch)); +#endif } } + } - Call(function); - if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { - // We don't unset the PC; the FP is the source of truth. - if (root_array_available()) { - StoreWord(zero_reg, - MemOperand(kRootRegister, - IsolateData::fast_c_call_caller_fp_offset())); - } else { - DCHECK_NOT_NULL(isolate()); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, - ExternalReference::fast_c_call_caller_fp_address(isolate())); - StoreWord(zero_reg, MemOperand(scratch)); - } + Call(function); + int call_pc_offset = pc_offset(); + bind(&get_pc); + if (return_location) bind(return_location); + + if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { + // We don't unset the PC; the FP is the source of truth. + if (root_array_available()) { + StoreWord(zero_reg, + MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_fp_offset())); + } else { + DCHECK_NOT_NULL(isolate()); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); + StoreWord(zero_reg, MemOperand(scratch)); } } @@ -6540,14 +6708,19 @@ void MacroAssembler::CallCFunctionHelper( } else { AddWord(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize)); } + + return call_pc_offset; } #undef BRANCH_ARGS_CHECK -void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, - Condition cc, Label* condition_met) { - And(scratch, object, - Operand(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); +void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc, + Label* condition_met) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + temps.Include(t6); + Register scratch = temps.Acquire(); + And(scratch, object, Operand(~MemoryChunk::GetAlignmentMaskForAssembler())); LoadWord(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); @@ -6569,6 +6742,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, } void MacroAssembler::ComputeCodeStartAddress(Register dst) { + ASM_CODE_COMMENT(this); auto pc = -pc_offset(); auipc(dst, 0); if (pc != 0) { @@ -6593,8 +6767,14 @@ void MacroAssembler::LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); +#ifdef V8_ENABLE_SANDBOX + LoadCodeEntrypointViaCodePointer( + destination, + FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset), tag); +#else LoadWord(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); +#endif } void MacroAssembler::LoadProtectedPointerField(Register destination, @@ -6613,55 +6793,61 @@ void MacroAssembler::LoadProtectedPointerField(Register destination, #endif } -void MacroAssembler::CallCodeObject(Register code) { +void MacroAssembler::CallCodeObject(Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); - LoadCodeInstructionStart(code, code); - Call(code); + LoadCodeInstructionStart(code_object, code_object, tag); + Call(code_object); } -void MacroAssembler::JumpCodeObject(Register code, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag, + JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); - LoadCodeInstructionStart(code, code); - Jump(code); + LoadCodeInstructionStart(code_object, code_object, tag); + Jump(code_object); } void MacroAssembler::CallJSFunction(Register function_object) { + ASM_CODE_COMMENT(this); Register code = kJavaScriptCallCodeStartRegister; #ifdef V8_ENABLE_SANDBOX // When the sandbox is enabled, we can directly fetch the entrypoint pointer // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. - LoadCodeEntrypointField( - code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); + LoadCodeEntrypointViaCodePointer( + code, FieldMemOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); Call(code); #else LoadTaggedField(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - CallCodeObject(code); + CallCodeObject(code, kJSEntrypointTag); #endif } void MacroAssembler::JumpJSFunction(Register function_object, JumpMode jump_mode) { + ASM_CODE_COMMENT(this); Register code = kJavaScriptCallCodeStartRegister; #ifdef V8_ENABLE_SANDBOX // When the sandbox is enabled, we can directly fetch the entrypoint pointer // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. - LoadCodeEntrypointField( - code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); + LoadCodeEntrypointViaCodePointer( + code, FieldMemOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); DCHECK_EQ(jump_mode, JumpMode::kJump); // We jump through x17 here because for Branch Identification (BTI) we use // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for tail-called // code. See TailCallBuiltin for more information. DCHECK_NE(code, t6); - Mov(t6, code); + mv(t6, code); Jump(t6); #else LoadTaggedField(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - JumpCodeObject(code, jump_mode); + JumpCodeObject(code, kJSEntrypointTag, jump_mode); #endif } diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h index 74431fd9ac7ede..e83daa06a14942 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h @@ -310,8 +310,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void LoadCodeInstructionStart( Register destination, Register code_object, CodeEntrypointTag tag = kDefaultCodeEntrypointTag); - void CallCodeObject(Register code_object); - void JumpCodeObject(Register code_object, + void CallCodeObject(Register code_object, CodeEntrypointTag tag); + void JumpCodeObject(Register code_object, CodeEntrypointTag tag, JumpMode jump_mode = JumpMode::kJump); // Convenience functions to call/jmp to the code of a JSFunction object. @@ -408,16 +408,26 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void MaybeSaveRegisters(RegList registers); void MaybeRestoreRegisters(RegList registers); - void CallEphemeronKeyBarrier(Register object, Register slot_address, + void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode); - + void CallIndirectPointerBarrier(Register object, Operand offset, + SaveFPRegsMode fp_mode, + IndirectPointerTag tag); void CallRecordWriteStubSaveRegisters( - Register object, Register slot_address, SaveFPRegsMode fp_mode, + Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode = StubCallMode::kCallBuiltinPointer); void CallRecordWriteStub( Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode = StubCallMode::kCallBuiltinPointer); + // For a given |object| and |offset|: + // - Move |object| to |dst_object|. + // - Compute the address of the slot pointed to by |offset| in |object| and + // write it to |dst_slot|. + // This method makes sure |object| and |offset| are allowed to overlap with + // the destination registers. + void MoveObjectAndSlot(Register dst_object, Register dst_slot, + Register object, Operand offset); // Push multiple registers on the stack. // Registers are saved in numerical order, with higher numbered registers // saved in higher memory addresses. @@ -616,19 +626,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - void CallCFunction( + int CallCFunction( ExternalReference function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( ExternalReference function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); void MovFromFloatResult(DoubleRegister dst); void MovFromFloatParameter(DoubleRegister dst); @@ -644,7 +658,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { li(a1, ref); } - void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, + void CheckPageFlag(Register object, int mask, Condition cc, Label* condition_met); #undef COND_ARGS @@ -1201,6 +1215,26 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // having to place the #ifdefs into the caller. void StoreIndirectPointerField(Register value, MemOperand dst_field_operand); +#ifdef V8_ENABLE_SANDBOX + // Retrieve the heap object referenced by the given indirect pointer handle, + // which can either be a trusted pointer handle or a code pointer handle. + void ResolveIndirectPointerHandle(Register destination, Register handle, + IndirectPointerTag tag); + + // Retrieve the heap object referenced by the given trusted pointer handle. + void ResolveTrustedPointerHandle(Register destination, Register handle, + IndirectPointerTag tag); + // Retrieve the Code object referenced by the given code pointer handle. + void ResolveCodePointerHandle(Register destination, Register handle); + + // Load the pointer to a Code's entrypoint via a code pointer. + // Only available when the sandbox is enabled as it requires the code pointer + // table. + void LoadCodeEntrypointViaCodePointer(Register destination, + MemOperand field_operand, + CodeEntrypointTag tag); +#endif + void AtomicDecompressTaggedSigned(Register dst, const MemOperand& src); void AtomicDecompressTagged(Register dst, const MemOperand& src); @@ -1234,6 +1268,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { const MemOperand& dst_field_operand) { Sw(value, dst_field_operand); } + + void AtomicStoreTaggedField(Register src, const MemOperand& dst) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + AddWord(scratch, dst.rm(), dst.offset()); + amoswap_w(true, true, zero_reg, src, scratch); + } #endif // Control-flow integrity: @@ -1334,16 +1375,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // stored. value and scratch registers are clobbered by the operation. // The offset is the offset from the start of the object, not the offset from // the tagged HeapObject pointer. For use with FieldOperand(reg, off). - void RecordWriteField(Register object, int offset, Register value, - RAStatus ra_status, SaveFPRegsMode save_fp, - SmiCheck smi_check = SmiCheck::kInline); + void RecordWriteField( + Register object, int offset, Register value, RAStatus ra_status, + SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline, + SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot()); // For a given |object| notify the garbage collector that the slot |address| // has been written. |value| is the object being stored. The value and // address registers are clobbered by the operation. - void RecordWrite(Register object, Operand offset, Register value, - RAStatus ra_status, SaveFPRegsMode save_fp, - SmiCheck smi_check = SmiCheck::kInline); + void RecordWrite( + Register object, Operand offset, Register value, RAStatus ra_status, + SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline, + SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot()); // void Pref(int32_t hint, const MemOperand& rs); @@ -1546,6 +1589,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // enabled via --debug-code. void AssertGeneratorObject(Register object); + // Like Assert(), but without condition. + // Use --debug_code to enable. + void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE; + // Abort execution if argument is not undefined or an AllocationSite, enabled // via --debug-code. void AssertUndefinedOrAllocationSite(Register object, Register scratch); @@ -1577,9 +1624,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, Label* done); - void CallCFunctionHelper( + int CallCFunctionHelper( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); // TODO(RISCV) Reorder parameters so out parameters come last. bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); diff --git a/deps/v8/src/codegen/riscv/register-riscv.h b/deps/v8/src/codegen/riscv/register-riscv.h index 68986705969366..fad18fd2cc874d 100644 --- a/deps/v8/src/codegen/riscv/register-riscv.h +++ b/deps/v8/src/codegen/riscv/register-riscv.h @@ -305,6 +305,7 @@ constexpr Register kRuntimeCallArgCountRegister = a0; constexpr Register kRuntimeCallArgvRegister = a2; constexpr Register kWasmInstanceRegister = a7; constexpr Register kWasmCompileLazyFuncIndexRegister = t0; +constexpr Register kWasmTrapHandlerFaultAddressRegister = t6; constexpr DoubleRegister kFPReturnRegister0 = fa0; constexpr VRegister kSimd128ScratchReg = v24; diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h index 69707d7094579f..54c8ad1061c4f5 100644 --- a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h +++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h @@ -84,6 +84,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return r5; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return r6; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return r7; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return r4; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -317,7 +332,8 @@ CallApiCallbackGenericDescriptor::TopmostScriptHavingContextRegister() { return r3; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return r5; } // static diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc index 88d18f14099f4e..6c46352be61907 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc @@ -20,7 +20,7 @@ #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/smi.h" @@ -565,8 +565,7 @@ void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code, } Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); } void MacroAssembler::Call(Label* target) { b(r14, target); } @@ -2512,42 +2511,61 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1, } } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { Move(ip, function); - CallCFunction(ip, num_reg_arguments, num_double_arguments, - set_isolate_data_slots); + return CallCFunction(ip, num_reg_arguments, num_double_arguments, + set_isolate_data_slots, return_label); } -void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { ASM_CODE_COMMENT(this); DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); DCHECK(has_frame()); + Label get_pc; + if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // Save the frame pointer and PC so that the stack layout remains iterable, // even without an ExitFrame which normally exists between JS and C frames. // See x64 code for reasoning about how to address the isolate data fields. + larl(r0, &get_pc); if (root_array_available()) { - LoadPC(r0); StoreU64(r0, MemOperand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset())); StoreU64(fp, MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + mov(r0, Operand(Context::kNoContext)); + StoreRootRelative(IsolateData::context_offset(), r0); +#endif } else { DCHECK_NOT_NULL(isolate()); Register addr_scratch = r1; Move(addr_scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); - LoadPC(r0); StoreU64(r0, MemOperand(addr_scratch)); Move(addr_scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); StoreU64(fp, MemOperand(addr_scratch)); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + mov(r0, Operand(Context::kNoContext)); + StoreU64( + r0, ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), addr_scratch)); +#endif } } @@ -2561,6 +2579,9 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, } Call(dest); + int call_pc_offset = pc_offset(); + bind(&get_pc); + if (return_label) bind(return_label); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // We don't unset the PC; the FP is the source of truth. @@ -2589,17 +2610,22 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, } else { la(sp, MemOperand(sp, stack_space * kSystemPointerSize)); } + + return call_pc_offset; } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_label); } -void MacroAssembler::CallCFunction(Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(Register function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_label); } void MacroAssembler::CheckPageFlag( diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h index 3c369e6b65b6ef..e175037a0f587f 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.h +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h @@ -928,19 +928,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - void CallCFunction( + int CallCFunction( ExternalReference function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); + int CallCFunction( Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); + int CallCFunction( ExternalReference function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); + int CallCFunction( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); void MovFromFloatParameter(DoubleRegister dst); void MovFromFloatResult(DoubleRegister dst); diff --git a/deps/v8/src/codegen/signature.h b/deps/v8/src/codegen/signature.h index 41bf09355e3dc6..df616a0d550575 100644 --- a/deps/v8/src/codegen/signature.h +++ b/deps/v8/src/codegen/signature.h @@ -32,12 +32,14 @@ class Signature : public ZoneObject { size_t parameter_count() const { return parameter_count_; } T GetParam(size_t index) const { - DCHECK_LT(index, parameter_count_); + // If heap memory is corrupted, we may get confused about the number of + // parameters during compilation. These SBXCHECKs defend against that. + SBXCHECK_LT(index, parameter_count_); return reps_[return_count_ + index]; } T GetReturn(size_t index = 0) const { - DCHECK_LT(index, return_count_); + SBXCHECK_LT(index, return_count_); return reps_[index]; } diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc index 44e1b3e1827736..ddba35b281b22e 100644 --- a/deps/v8/src/codegen/source-position-table.cc +++ b/deps/v8/src/codegen/source-position-table.cc @@ -118,7 +118,8 @@ void DecodeEntry(base::Vector bytes, int* index, entry->source_position = DecodeInt(bytes, index); } -base::Vector VectorFromByteArray(Tagged byte_array) { +base::Vector VectorFromByteArray( + Tagged byte_array) { return base::Vector(byte_array->begin(), byte_array->length()); } @@ -171,13 +172,13 @@ V8_INLINE void SourcePositionTableBuilder::AddEntry( } template -Handle SourcePositionTableBuilder::ToSourcePositionTable( +Handle SourcePositionTableBuilder::ToSourcePositionTable( IsolateT* isolate) { - if (bytes_.empty()) return isolate->factory()->empty_byte_array(); + if (bytes_.empty()) return isolate->factory()->empty_trusted_byte_array(); DCHECK(!Omit()); - Handle table = isolate->factory()->NewByteArray( - static_cast(bytes_.size()), AllocationType::kOld); + Handle table = + isolate->factory()->NewTrustedByteArray(static_cast(bytes_.size())); MemCopy(table->begin(), bytes_.data(), bytes_.size()); #ifdef ENABLE_SLOW_DCHECKS @@ -194,10 +195,10 @@ Handle SourcePositionTableBuilder::ToSourcePositionTable( } template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Handle SourcePositionTableBuilder::ToSourcePositionTable( + Handle SourcePositionTableBuilder::ToSourcePositionTable( Isolate* isolate); template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Handle SourcePositionTableBuilder::ToSourcePositionTable( + Handle SourcePositionTableBuilder::ToSourcePositionTable( LocalIsolate* isolate); base::OwnedVector @@ -229,7 +230,7 @@ void SourcePositionTableIterator::Initialize() { } SourcePositionTableIterator::SourcePositionTableIterator( - Tagged byte_array, IterationFilter iteration_filter, + Tagged byte_array, IterationFilter iteration_filter, FunctionEntryFilter function_entry_filter) : raw_table_(VectorFromByteArray(byte_array)), iteration_filter_(iteration_filter), @@ -238,7 +239,7 @@ SourcePositionTableIterator::SourcePositionTableIterator( } SourcePositionTableIterator::SourcePositionTableIterator( - Handle byte_array, IterationFilter iteration_filter, + Handle byte_array, IterationFilter iteration_filter, FunctionEntryFilter function_entry_filter) : table_(byte_array), iteration_filter_(iteration_filter), diff --git a/deps/v8/src/codegen/source-position-table.h b/deps/v8/src/codegen/source-position-table.h index 4d8e6de0a05de5..a3bfbc9d5e1553 100644 --- a/deps/v8/src/codegen/source-position-table.h +++ b/deps/v8/src/codegen/source-position-table.h @@ -16,7 +16,7 @@ namespace v8 { namespace internal { -class ByteArray; +class TrustedByteArray; class Zone; struct PositionTableEntry { @@ -53,7 +53,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder { template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - Handle ToSourcePositionTable(IsolateT* isolate); + Handle ToSourcePositionTable(IsolateT* isolate); base::OwnedVector ToSourcePositionTableVector(); inline bool Omit() const { return mode_ != RECORD_SOURCE_POSITIONS; } @@ -97,7 +97,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator { // Handlified iterator allows allocation, but it needs a handle (and thus // a handle scope). This is the preferred version. explicit SourcePositionTableIterator( - Handle byte_array, + Handle byte_array, IterationFilter iteration_filter = kJavaScriptOnly, FunctionEntryFilter function_entry_filter = kSkipFunctionEntry); @@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator { // allocation during its lifetime. This is useful if there is no handle // scope around. explicit SourcePositionTableIterator( - Tagged byte_array, + Tagged byte_array, IterationFilter iteration_filter = kJavaScriptOnly, FunctionEntryFilter function_entry_filter = kSkipFunctionEntry); @@ -151,7 +151,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator { static const int kDone = -1; base::Vector raw_table_; - Handle table_; + Handle table_; int index_ = 0; PositionTableEntry current_; IterationFilter iteration_filter_; diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h index e75010153dc0a1..268a83f13e9be2 100644 --- a/deps/v8/src/codegen/tnode.h +++ b/deps/v8/src/codegen/tnode.h @@ -102,6 +102,10 @@ struct ExternalPointerT : UntaggedT { }; #endif +struct Float16T : Word32T { + static constexpr MachineType kMachineType = MachineType::Uint16(); +}; + struct Float32T : UntaggedT { static const MachineRepresentation kMachineRepresentation = MachineRepresentation::kFloat32; @@ -129,6 +133,13 @@ struct BoolT : Word32T { template struct PairT {}; +template +struct UnionT; +template +struct is_union_t : public std::false_type {}; +template +struct is_union_t> : public std::true_type {}; + struct Simd128T : UntaggedT { static const MachineRepresentation kMachineRepresentation = MachineRepresentation::kSimd128; @@ -213,12 +224,6 @@ struct MachineRepresentationOf>> { static constexpr MachineRepresentation value = MachineTypeOf::value.representation(); }; -template -struct MachineRepresentationOf< - T, std::enable_if_t>> { - static constexpr MachineRepresentation value = - MachineTypeOf::value.representation(); -}; template <> struct MachineRepresentationOf { static constexpr MachineRepresentation value = @@ -239,10 +244,8 @@ template struct is_valid_type_tag { static const bool value = is_taggable_v || std::is_base_of::value || - std::is_base_of::value || std::is_same::value; - static const bool is_tagged = - is_taggable_v || std::is_base_of::value; + static const bool is_tagged = is_taggable_v; }; template @@ -279,25 +282,19 @@ using ContextOrEmptyContext = UnionT; using BuiltinPtr = Smi; template -struct is_subtype, U> +struct is_subtype, U, 1> : public std::conjunction, is_subtype> {}; template -struct is_subtype> +struct is_subtype, 1> : public std::disjunction, is_subtype> {}; template -struct is_subtype, UnionT> +struct is_subtype, UnionT, 1> : public std::conjunction>, is_subtype>> {}; template <> struct is_subtype { static const bool value = true; }; -// All subtypes of HeapObject are also subtypes of HeapObjectReference. -template -struct is_subtype::value>> - : public std::disjunction, - is_subtype> {}; template struct types_have_common_values { @@ -352,7 +349,7 @@ struct types_have_common_values, UnionT> { // TNode is an SSA value with the static type tag T, which is one of the // following: -// - MaybeObject represents the type of all tagged values, including weak +// - MaybeObject> represents the type of all tagged values, including weak // pointers. // - a subclass of internal::Object represents a non-weak tagged type. // - a subclass of internal::UntaggedT represents an untagged type @@ -396,6 +393,12 @@ class TNode { compiler::Node* node_; }; +template +class TNode> { + static_assert(!std::is_same_v, + "Don't write TNode>, just write TNode directly."); +}; + // SloppyTNode is a variant of TNode and allows implicit casts from // Node*. It is intended for function arguments as long as some call sites // still use untyped Node* arguments. diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h index 253d3edad4b927..548c44ac3d7d0b 100644 --- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h +++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h @@ -97,6 +97,21 @@ constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { return rbx; } +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::EnumIndexRegister() { + return rdi; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::CacheTypeRegister() { + return r8; +} + +// static +constexpr Register EnumeratedKeyedLoadBaselineDescriptor::SlotRegister() { + return rcx; +} + // static constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { return kInterpreterAccumulatorRegister; @@ -342,7 +357,8 @@ CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister() { return rcx; } // static -constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { +constexpr Register +CallApiCallbackGenericDescriptor::FunctionTemplateInfoRegister() { return rbx; } // static diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index e25855ca7399a1..4b6ab30ce3db52 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -25,7 +25,7 @@ #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/instance-type-inl.h" @@ -594,9 +594,10 @@ void MacroAssembler::ResolveTrustedPointerHandle(Register destination, movq(destination, Operand{kRootRegister, IsolateData::trusted_pointer_table_offset()}); movq(destination, Operand{destination, handle, times_8, 0}); - // The LSB is used as marking bit by the trusted pointer table, so here we - // have to set it using a bitwise OR as it may or may not be set. - orq(destination, Immediate(kHeapObjectTag)); + // Untag the pointer and remove the marking bit in one operation. + Register tag_reg = handle; + movq(tag_reg, Immediate64(~(tag | kTrustedPointerTableMarkBit))); + andq(destination, tag_reg); } void MacroAssembler::ResolveCodePointerHandle(Register destination, @@ -3201,8 +3202,7 @@ void MacroAssembler::TestCodeIsTurbofanned(Register code) { } Immediate MacroAssembler::ClearedValue() const { - return Immediate( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); + return Immediate(static_cast(i::ClearedValue(isolate()).ptr())); } #ifdef V8_ENABLE_DEBUG_CODE @@ -3960,6 +3960,13 @@ int MacroAssembler::CallCFunction(Register function, int num_arguments, kScratchRegister); movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()), rbp); +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + movq(Operand(kRootRegister, IsolateData::context_offset()), + Immediate(Context::kNoContext)); +#endif } else { DCHECK_NOT_NULL(isolate()); // Use alternative scratch register in order not to overwrite @@ -3974,7 +3981,14 @@ int MacroAssembler::CallCFunction(Register function, int num_arguments, movq(ExternalReferenceAsOperand( ExternalReference::fast_c_call_caller_fp_address(isolate())), rbp); - +#if DEBUG + // Reset Isolate::context field right before the fast C call such that the + // GC can visit this field unconditionally. This is necessary because + // CEntry sets it to kInvalidContext in debug build only. + movq(ExternalReferenceAsOperand( + ExternalReference::context_address(isolate()), kScratchRegister), + Immediate(Context::kNoContext)); +#endif popq(scratch); } } @@ -4009,7 +4023,7 @@ int MacroAssembler::CallCFunction(Register function, int num_arguments, void MacroAssembler::MemoryChunkHeaderFromObject(Register object, Register header) { constexpr intptr_t alignment_mask = - MemoryChunkHeader::GetAlignmentMaskForAssembler(); + MemoryChunk::GetAlignmentMaskForAssembler(); if (header == object) { andq(header, Immediate(~alignment_mask)); } else { diff --git a/deps/v8/src/common/code-memory-access.h b/deps/v8/src/common/code-memory-access.h index e0d317b07fe17f..80d96bb4ce02ec 100644 --- a/deps/v8/src/common/code-memory-access.h +++ b/deps/v8/src/common/code-memory-access.h @@ -12,7 +12,7 @@ #include "src/base/build_config.h" #include "src/base/macros.h" #include "src/base/platform/mutex.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index 5c8e134266f48f..00e855c472187d 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -862,8 +862,8 @@ const Address kWeakHeapObjectMask = 1 << 1; // for cleared weak reference. // Note, that real heap objects can't have lower 32 bits equal to 3 because // this offset belongs to page header. So, in either case it's enough to -// compare only the lower 32 bits of a MaybeObject value in order to figure -// out if it's a cleared reference or not. +// compare only the lower 32 bits of a Tagged value in order to +// figure out if it's a cleared reference or not. const uint32_t kClearedWeakHeapObjectLower32 = 3; // Zap-value: The value used for zapping dead objects. @@ -950,7 +950,6 @@ using DirectHandle = Handle; #endif class Heap; class HeapObject; -class HeapObjectReference; class IC; template using IndirectHandle = Handle; @@ -976,7 +975,6 @@ using MaybeDirectHandle = MaybeHandle; #endif template using MaybeIndirectHandle = MaybeHandle; -class MaybeObject; #ifdef V8_ENABLE_DIRECT_HANDLE class MaybeObjectDirectHandle; #endif @@ -985,7 +983,9 @@ class MaybeObjectHandle; using MaybeObjectDirectHandle = MaybeObjectHandle; #endif using MaybeObjectIndirectHandle = MaybeObjectHandle; -class MemoryChunk; +template +class MaybeWeak; +class MutablePageMetadata; class MessageLocation; class ModuleScope; class Name; @@ -1046,6 +1046,9 @@ namespace compiler { class AccessBuilder; } +using MaybeObject = MaybeWeak; +using HeapObjectReference = MaybeWeak; + // Slots are either full-pointer slots or compressed slots depending on whether // pointer compression is enabled or not. struct SlotTraits { @@ -1081,12 +1084,13 @@ struct SlotTraits { using ObjectSlot = SlotTraits::TObjectSlot; // A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field -// ("slot") holding MaybeObject (smi or weak heap object or strong heap object). +// ("slot") holding Tagged (smi or weak heap object or strong heap +// object). using MaybeObjectSlot = SlotTraits::TMaybeObjectSlot; // A HeapObjectSlot instance describes a kTaggedSize-sized field ("slot") // holding a weak or strong pointer to a heap object (think: -// HeapObjectReference). +// Tagged). using HeapObjectSlot = SlotTraits::THeapObjectSlot; // An OffHeapObjectSlot instance describes a kTaggedSize-sized field ("slot") @@ -1988,6 +1992,8 @@ class BinaryOperationFeedback { kString = 0x10, kBigInt64 = 0x20, kBigInt = 0x60, + kStringWrapper = 0x80, + kStringOrStringWrapper = 0x90, kAny = 0x7F }; }; @@ -2139,6 +2145,7 @@ enum ExternalArrayType { kExternalUint16Array, kExternalInt32Array, kExternalUint32Array, + kExternalFloat16Array, kExternalFloat32Array, kExternalFloat64Array, kExternalUint8ClampedArray, diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index 5f4ccc0b651acc..84068918b42c3d 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -612,6 +612,15 @@ FieldAccess AccessBuilder::ForJSIteratorResultValue() { return access; } +// static +FieldAccess AccessBuilder::ForJSPrimitiveWrapperValue() { + FieldAccess access = {kTaggedBase, JSPrimitiveWrapper::kValueOffset, + MaybeHandle(), OptionalMapRef(), + Type::NonInternal(), MachineType::AnyTagged(), + kFullWriteBarrier, "JSPrimitiveWrapperValue"}; + return access; +} + // static FieldAccess AccessBuilder::ForJSRegExpData() { FieldAccess access = {kTaggedBase, JSRegExp::kDataOffset, @@ -1214,6 +1223,10 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type, MachineType::Uint32(), kNoWriteBarrier}; return access; } + case kExternalFloat16Array: { + // TODO(v8:14012): support machine logic + UNIMPLEMENTED(); + } case kExternalFloat32Array: { ElementAccess access = {taggedness, header_size, Type::Number(), MachineType::Float32(), kNoWriteBarrier}; diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h index ced4dd466c8ad0..1ccb78868950a0 100644 --- a/deps/v8/src/compiler/access-builder.h +++ b/deps/v8/src/compiler/access-builder.h @@ -188,6 +188,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to JSIteratorResult::value() field. static FieldAccess ForJSIteratorResultValue(); + static FieldAccess ForJSPrimitiveWrapperValue(); + // Provides access to JSRegExp::data() field. static FieldAccess ForJSRegExpData(); diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index 0ebea44f6794a4..ab99e85e7c502b 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -17,7 +17,7 @@ #include "src/compiler/backend/instruction-codes.h" #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/utils/boxed-float.h" #if V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc index 1bd87c7e9736f9..2ef4758fffc4fe 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -4527,8 +4527,9 @@ void InstructionSelectorT::VisitSetStackPointer(Node* node) { template <> void InstructionSelectorT::VisitSetStackPointer( node_t node) { - // TODO(thibaudm): Implement. - UNREACHABLE(); + OperandGenerator g(this); + auto input = g.UseRegister(this->input_at(node, 0)); + Emit(kArchSetStackPointer, 0, nullptr, 1, &input); } template diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index 541597ef969a8c..c0f77e938021d9 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -14,7 +14,7 @@ #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" #include "src/execution/frame-constants.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #if V8_ENABLE_WEBASSEMBLY #include "src/wasm/wasm-objects.h" @@ -53,6 +53,15 @@ class Arm64OperandConverter final : public InstructionOperandConverter { return InputDoubleRegister(index).S(); } + DoubleRegister InputFloat32OrFPZeroRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) { + DCHECK_EQ(0, base::bit_cast(InputFloat32(index))); + return fp_zero.S(); + } + DCHECK(instr_->InputAt(index)->IsFPRegister()); + return InputDoubleRegister(index).S(); + } + CPURegister InputFloat64OrZeroRegister(size_t index) { if (instr_->InputAt(index)->IsImmediate()) { DCHECK_EQ(0, base::bit_cast(InputDouble(index))); @@ -62,6 +71,15 @@ class Arm64OperandConverter final : public InstructionOperandConverter { return InputDoubleRegister(index); } + DoubleRegister InputFloat64OrFPZeroRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) { + DCHECK_EQ(0, base::bit_cast(InputDouble(index))); + return fp_zero; + } + DCHECK(instr_->InputAt(index)->IsDoubleRegister()); + return InputDoubleRegister(index); + } + size_t OutputCount() { return instr_->OutputCount(); } DoubleRegister OutputFloat32Register(size_t index = 0) { @@ -2888,8 +2906,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Mov(temp, src1); src1 = temp; } - // Perform shuffle as a vmov per lane. int32_t shuffle = i.InputInt32(2); + + // Check whether we can reduce the number of vmovs by performing a dup + // first. + if (src0 == src1) { + const std::array lanes{shuffle & 0x3, shuffle >> 8 & 0x3, + shuffle >> 16 & 0x3, + shuffle >> 24 & 0x3}; + std::array lane_counts{}; + for (int lane : lanes) { + ++lane_counts[lane]; + } + + int duplicate_lane = -1; + for (int lane = 0; lane < 4; ++lane) { + if (lane_counts[lane] > 1) { + duplicate_lane = lane; + break; + } + } + + if (duplicate_lane != -1) { + __ Dup(dst, src0, duplicate_lane); + for (int i = 0; i < 4; ++i) { + int lane = lanes[i]; + if (lane == duplicate_lane) continue; + __ Mov(dst, i, src0, lane); + } + break; + } + } + + // Perform shuffle as a vmov per lane. for (int i = 0; i < 4; i++) { VRegister src = src0; int lane = shuffle & 0x7; @@ -2955,6 +3004,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Tbl(dst, src0, src1, temp.V16B()); } break; + } + case kArm64S32x4Reverse: { + Simd128Register dst = i.OutputSimd128Register().V16B(), + src = i.InputSimd128Register(0).V16B(); + __ Rev64(dst.V4S(), src.V4S()); + __ Ext(dst.V16B(), dst.V16B(), dst.V16B(), 8); + break; } SIMD_UNOP_CASE(kArm64S32x2Reverse, Rev64, 4S); SIMD_UNOP_CASE(kArm64S16x4Reverse, Rev64, 8H); @@ -3035,6 +3091,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Cset(i.OutputRegister32(), ne); break; } + case kArm64S32x4OneLaneSwizzle: { + Simd128Register dst = i.OutputSimd128Register().V4S(), + src = i.InputSimd128Register(0).V4S(); + int from = i.InputInt32(1); + int to = i.InputInt32(2); + if (dst != src) { + __ Mov(dst, src); + } + __ Mov(dst, to, src, from); + break; + } #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \ case Op: { \ UseScratchRegisterScope scope(masm()); \ @@ -3171,21 +3238,21 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr, size_t false_value_index = instr->InputCount() - 1; if (rep == MachineRepresentation::kFloat32) { __ Fcsel(i.OutputFloat32Register(output_index), - i.InputFloat32Register(true_value_index), - i.InputFloat32Register(false_value_index), cc); + i.InputFloat32OrFPZeroRegister(true_value_index), + i.InputFloat32OrFPZeroRegister(false_value_index), cc); } else if (rep == MachineRepresentation::kFloat64) { __ Fcsel(i.OutputFloat64Register(output_index), - i.InputFloat64Register(true_value_index), - i.InputFloat64Register(false_value_index), cc); + i.InputFloat64OrFPZeroRegister(true_value_index), + i.InputFloat64OrFPZeroRegister(false_value_index), cc); } else if (rep == MachineRepresentation::kWord32) { __ Csel(i.OutputRegister32(output_index), - i.InputRegister32(true_value_index), - i.InputRegister32(false_value_index), cc); + i.InputOrZeroRegister32(true_value_index), + i.InputOrZeroRegister32(false_value_index), cc); } else { DCHECK_EQ(rep, MachineRepresentation::kWord64); __ Csel(i.OutputRegister64(output_index), - i.InputRegister64(true_value_index), - i.InputRegister64(false_value_index), cc); + i.InputOrZeroRegister64(true_value_index), + i.InputOrZeroRegister64(false_value_index), cc); } } diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h index 7f71ca4bbae17f..3c2213bd14fea6 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -111,6 +111,8 @@ namespace compiler { V(Arm64S8x16Concat) \ V(Arm64I8x16Swizzle) \ V(Arm64I8x16Shuffle) \ + V(Arm64S32x4Reverse) \ + V(Arm64S32x4OneLaneSwizzle) \ V(Arm64S32x2Reverse) \ V(Arm64S16x4Reverse) \ V(Arm64S16x2Reverse) \ diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index 86c399e63ae697..a2a57b4e484029 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -250,6 +250,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64S32x4UnzipRight: case kArm64S32x4TransposeLeft: case kArm64S32x4TransposeRight: + case kArm64S32x4OneLaneSwizzle: case kArm64S32x4Shuffle: case kArm64S16x8ZipLeft: case kArm64S16x8ZipRight: @@ -266,6 +267,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64S8x16Concat: case kArm64I8x16Swizzle: case kArm64I8x16Shuffle: + case kArm64S32x4Reverse: case kArm64S32x2Reverse: case kArm64S16x4Reverse: case kArm64S16x2Reverse: diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 7dce0586e9d774..78cc0feb6f0821 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -13,8 +13,10 @@ #include "src/compiler/machine-operator.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" +#include "src/compiler/turboshaft/operation-matcher.h" #include "src/compiler/turboshaft/operations.h" #include "src/compiler/turboshaft/opmasks.h" +#include "src/compiler/turboshaft/representations.h" #include "src/flags/flags.h" namespace v8 { @@ -50,21 +52,39 @@ class Arm64OperandGeneratorT final : public OperandGeneratorT { return UseRegister(node); } - // Use the zero register if the node has the immediate value zero, otherwise - // assign a register. - InstructionOperand UseRegisterOrImmediateZero(typename Adapter::node_t node) { + bool IsImmediateZero(typename Adapter::node_t node) { if (this->is_constant(node)) { auto constant = selector()->constant_view(node); if ((IsIntegerConstant(constant) && GetIntegerConstantValue(constant) == 0) || (constant.is_float() && - (base::bit_cast(constant.float_value()) == 0))) { - return UseImmediate(node); + base::bit_cast(constant.float_value()) == 0)) { + return true; } } + return false; + } + + // Use the zero register if the node has the immediate value zero, otherwise + // assign a register. + InstructionOperand UseRegisterOrImmediateZero(typename Adapter::node_t node) { + if (IsImmediateZero(node)) { + return UseImmediate(node); + } return UseRegister(node); } + // Use the zero register if the node has the immediate value zero, otherwise + // assign a register, keeping it alive for the whole sequence of continuation + // instructions. + InstructionOperand UseRegisterAtEndOrImmediateZero( + typename Adapter::node_t node) { + if (IsImmediateZero(node)) { + return UseImmediate(node); + } + return this->UseRegisterAtEnd(node); + } + // Use the provided node if it has the required value, or create a // TempImmediate otherwise. InstructionOperand UseImmediateOrTemp(node_t node, int32_t value) { @@ -541,46 +561,93 @@ bool TryMatchAnyExtend(Arm64OperandGeneratorT* g, return false; } -bool TryMatchAnyExtend(Arm64OperandGeneratorT* g, - InstructionSelectorT* selector, - turboshaft::OpIndex node, turboshaft::OpIndex left_node, - turboshaft::OpIndex right_node, - InstructionOperand* left_op, - InstructionOperand* right_op, InstructionCode* opcode) { +bool TryMatchBitwiseAndSmallMask(turboshaft::OperationMatcher& matcher, + turboshaft::OpIndex op, + turboshaft::OpIndex* left, int32_t* mask) { using namespace turboshaft; // NOLINT(build/namespaces) - if (!selector->CanCover(node, right_node)) return false; - - const Operation& right = selector->Get(right_node); + if (const ChangeOp* change_op = + matcher.TryCast(op)) { + return TryMatchBitwiseAndSmallMask(matcher, change_op->input(), left, mask); + } if (const WordBinopOp* bitwise_and = - right.TryCast()) { - int32_t mask; - if (selector->MatchIntegralWord32Constant(bitwise_and->right(), &mask) && - (mask == 0xFF || mask == 0xFFFF)) { - *left_op = g->UseRegister(left_node); - *right_op = g->UseRegister(bitwise_and->left()); - *opcode |= AddressingModeField::encode( - (mask == 0xFF) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH); + matcher.TryCast(op)) { + if (matcher.MatchIntegralWord32Constant(bitwise_and->right(), mask) && + (*mask == 0xFF || *mask == 0xFFFF)) { + *left = bitwise_and->left(); return true; } - } else if (const ShiftOp* sar = - right.TryCast()) { + if (matcher.MatchIntegralWord32Constant(bitwise_and->left(), mask) && + (*mask == 0xFF || *mask == 0xFFFF)) { + *left = bitwise_and->right(); + return true; + } + } + return false; +} + +bool TryMatchSignExtendShift(InstructionSelectorT* selector, + turboshaft::OpIndex op, turboshaft::OpIndex* left, + int32_t* shift_by) { + using namespace turboshaft; // NOLINT(build/namespaces) + if (const ChangeOp* change_op = + selector->TryCast(op)) { + return TryMatchSignExtendShift(selector, change_op->input(), left, + shift_by); + } + + if (const ShiftOp* sar = + selector->TryCast(op)) { const Operation& sar_lhs = selector->Get(sar->left()); if (sar_lhs.Is() && - selector->CanCover(right_node, sar->left())) { + selector->CanCover(op, sar->left())) { const ShiftOp& shl = sar_lhs.Cast(); int32_t sar_by, shl_by; if (selector->MatchIntegralWord32Constant(sar->right(), &sar_by) && selector->MatchIntegralWord32Constant(shl.right(), &shl_by) && sar_by == shl_by && (sar_by == 16 || sar_by == 24)) { - *left_op = g->UseRegister(left_node); - *right_op = g->UseRegister(shl.left()); - *opcode |= AddressingModeField::encode( - (sar_by == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH); + *left = shl.left(); + *shift_by = sar_by; return true; } } - } else if (const ChangeOp* change_op = - right.TryCast()) { + } + return false; +} + +bool TryMatchAnyExtend(Arm64OperandGeneratorT* g, + InstructionSelectorT* selector, + turboshaft::OpIndex node, turboshaft::OpIndex left_node, + turboshaft::OpIndex right_node, + InstructionOperand* left_op, + InstructionOperand* right_op, InstructionCode* opcode) { + using namespace turboshaft; // NOLINT(build/namespaces) + if (!selector->CanCover(node, right_node)) return false; + + const Operation& right = selector->Get(right_node); + OpIndex bitwise_and_left; + int32_t mask; + if (TryMatchBitwiseAndSmallMask(*selector, right_node, &bitwise_and_left, + &mask)) { + *left_op = g->UseRegister(left_node); + *right_op = g->UseRegister(bitwise_and_left); + *opcode |= AddressingModeField::encode( + (mask == 0xFF) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH); + return true; + } + + OpIndex shift_input_left; + int32_t shift_by; + if (TryMatchSignExtendShift(selector, right_node, &shift_input_left, + &shift_by)) { + *left_op = g->UseRegister(left_node); + *right_op = g->UseRegister(shift_input_left); + *opcode |= AddressingModeField::encode( + (shift_by == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH); + return true; + } + + if (const ChangeOp* change_op = + right.TryCast()) { // Use extended register form. *opcode |= AddressingModeField::encode(kMode_Operand2_R_SXTW); *left_op = g->UseRegister(left_node); @@ -765,8 +832,10 @@ void VisitBinop(InstructionSelectorT* selector, // Keep the values live until the end so that we can use operations that // write registers to generate the condition, without accidently // overwriting the inputs. - inputs[input_count++] = g.UseRegisterAtEnd(cont->true_value()); - inputs[input_count++] = g.UseRegisterAtEnd(cont->false_value()); + inputs[input_count++] = + g.UseRegisterAtEndOrImmediateZero(cont->true_value()); + inputs[input_count++] = + g.UseRegisterAtEndOrImmediateZero(cont->false_value()); } DCHECK_NE(0u, input_count); @@ -903,19 +972,35 @@ void VisitAddSub(InstructionSelectorT* selector, Node* node, } } +std::tuple +GetBinopLeftRightCstOnTheRight( + InstructionSelectorT* selector, + const turboshaft::WordBinopOp& binop) { + using namespace turboshaft; // NOLINT(build/namespaces) + OpIndex left = binop.left(); + OpIndex right = binop.right(); + if (!selector->Is(right) && + WordBinopOp::IsCommutative(binop.kind) && + selector->Is(left)) { + std::swap(left, right); + } + return {left, right}; +} + void VisitAddSub(InstructionSelectorT* selector, turboshaft::OpIndex node, ArchOpcode opcode, ArchOpcode negate_opcode) { using namespace turboshaft; // NOLINT(build/namespaces) Arm64OperandGeneratorT g(selector); const WordBinopOp& add_sub = selector->Get(node).Cast(); + auto [left, right] = GetBinopLeftRightCstOnTheRight(selector, add_sub); if (base::Optional constant_rhs = - g.GetOptionalIntegerConstant(add_sub.right())) { + g.GetOptionalIntegerConstant(right)) { if (constant_rhs < 0 && constant_rhs > std::numeric_limits::min() && g.CanBeImmediate(-*constant_rhs, kArithmeticImm)) { selector->Emit(negate_opcode, g.DefineAsRegister(node), - g.UseRegister(add_sub.left()), + g.UseRegister(left), g.TempImmediate(static_cast(-*constant_rhs))); return; } @@ -3800,11 +3885,12 @@ bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis( case Opcode::kOverflowCheckedBinop: return op.Cast().rep == WordRepresentation::Word32(); + case Opcode::kProjection: + return ZeroExtendsWord32ToWord64NoPhis(op.Cast().input()); case Opcode::kLoad: { - MemoryRepresentation rep = op.Cast().loaded_rep; - return rep == MemoryRepresentation::Int8() || - rep == MemoryRepresentation::Int16() || - rep == MemoryRepresentation::Int32(); + RegisterRepresentation rep = + op.Cast().loaded_rep.ToRegisterRepresentation(); + return rep == RegisterRepresentation::Word32(); } default: return false; @@ -4009,9 +4095,9 @@ void VisitCompare(InstructionSelectorT* selector, InstructionOperand right, FlagsContinuationT* cont) { if (cont->IsSelect()) { Arm64OperandGeneratorT g(selector); - InstructionOperand inputs[] = {left, right, - g.UseRegister(cont->true_value()), - g.UseRegister(cont->false_value())}; + InstructionOperand inputs[] = { + left, right, g.UseRegisterOrImmediateZero(cont->true_value()), + g.UseRegisterOrImmediateZero(cont->false_value())}; selector->EmitWithContinuation(opcode, 0, nullptr, 4, inputs, cont); } else { selector->EmitWithContinuation(opcode, left, right, cont); @@ -4294,7 +4380,7 @@ bool TryEmitCbzOrTbz(InstructionSelectorT* selector, } } } - V8_FALLTHROUGH; + [[fallthrough]]; } case kUnsignedLessThanOrEqual: case kUnsignedGreaterThan: { @@ -7464,11 +7550,19 @@ void InstructionSelectorT::VisitI8x16Shuffle(node_t node) { return; } int index = 0; + uint8_t from = 0; + uint8_t to = 0; if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) { if (wasm::SimdShuffle::TryMatchSplat<4>(shuffle, &index)) { DCHECK_GT(4, index); Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0), g.UseImmediate(4), g.UseImmediate(index % 4)); + } else if (wasm::SimdShuffle::TryMatch32x4Reverse(shuffle32x4)) { + Emit(kArm64S32x4Reverse, g.DefineAsRegister(node), g.UseRegister(input0)); + } else if (wasm::SimdShuffle::TryMatch32x4OneLaneSwizzle(shuffle32x4, &from, + &to)) { + Emit(kArm64S32x4OneLaneSwizzle, g.DefineAsRegister(node), + g.UseRegister(input0), g.TempImmediate(from), g.TempImmediate(to)); } else if (wasm::SimdShuffle::TryMatchIdentity(shuffle)) { // Bypass normal shuffle code generation in this case. // EmitIdentity diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index 4c7129e7a88182..f7fe7ef9d57756 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -479,7 +479,7 @@ MaybeHandle CodeGenerator::FinalizeCode() { } // Allocate the source position table. - Handle source_positions = + Handle source_positions = source_position_table_builder_.ToSourcePositionTable(isolate()); // Allocate and install the code. @@ -506,8 +506,7 @@ MaybeHandle CodeGenerator::FinalizeCode() { .set_profiler_data(info()->profiler_data()) .set_osr_offset(info()->osr_offset()); - if (info()->code_kind() == CodeKind::TURBOFAN) { - // Deoptimization data is only used in this case. + if (CodeKindUsesDeoptimizationData(info()->code_kind())) { builder.set_deoptimization_data(GenerateDeoptimizationData()); } @@ -743,7 +742,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( bool adjust_stack = GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot); if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot); - AssembleGaps(instr); + if (instr->opcode() == kArchNop && block->successors().empty() && + block->code_end() - block->code_start() == 1) { + // When the frame-less dummy end block in Turbofan contains a Phi node, + // don't attempt to access spill slots. + // TODO(dmercadier): When the switch to Turboshaft is complete, this + // will no longer be required. + } else { + AssembleGaps(instr); + } if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot); DCHECK_IMPLIES( block->must_deconstruct_frame(), diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 2ddfb2dfff3790..3db7f0d9427acc 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -17,7 +17,7 @@ #include "src/compiler/osr.h" #include "src/execution/frame-constants.h" #include "src/execution/frames.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/objects/smi.h" #if V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/compiler/backend/instruction-selector-adapter.h b/deps/v8/src/compiler/backend/instruction-selector-adapter.h index 1ac063b41ec2bd..f4a539e0822bb0 100644 --- a/deps/v8/src/compiler/backend/instruction-selector-adapter.h +++ b/deps/v8/src/compiler/backend/instruction-selector-adapter.h @@ -47,6 +47,7 @@ struct TurbofanAdapter { using inputs_t = Node::Inputs; using opcode_t = IrOpcode::Value; using id_t = uint32_t; + static_assert(std::is_same_v); using source_position_table_t = SourcePositionTable; explicit TurbofanAdapter(Schedule*) {} @@ -60,7 +61,9 @@ struct TurbofanAdapter { node_->opcode() == IrOpcode::kRelocatableInt64Constant || node_->opcode() == IrOpcode::kHeapConstant || node_->opcode() == IrOpcode::kCompressedHeapConstant || - node_->opcode() == IrOpcode::kNumberConstant); + node_->opcode() == IrOpcode::kNumberConstant || + node_->opcode() == IrOpcode::kFloat32Constant || + node_->opcode() == IrOpcode::kFloat64Constant); } bool is_int32() const { @@ -452,6 +455,8 @@ struct TurbofanAdapter { case IrOpcode::kHeapConstant: case IrOpcode::kCompressedHeapConstant: case IrOpcode::kNumberConstant: + case IrOpcode::kFloat32Constant: + case IrOpcode::kFloat64Constant: // For those, a view must be constructible. DCHECK_EQ(constant_view(node), node); return true; @@ -820,50 +825,59 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { class LoadView { public: LoadView(turboshaft::Graph* graph, node_t node) : node_(node) { - load_ = graph->Get(node_).TryCast(); + switch (graph->Get(node_).opcode) { + case opcode_t::kLoad: + load_ = &graph->Get(node_).Cast(); + break; #if V8_ENABLE_WEBASSEMBLY - if (load_ == nullptr) { - load_transform_ = - &graph->Get(node_).Cast(); + case opcode_t::kSimd128LoadTransform: + load_transform_ = + &graph->Get(node_).Cast(); + break; +#if V8_ENABLE_WASM_SIMD256_REVEC + case opcode_t::kSimd256LoadTransform: + load_transform256_ = + &graph->Get(node_).Cast(); + break; +#endif // V8_ENABLE_WASM_SIMD256_REVEC +#endif // V8_ENABLE_WEBASSEMBLY + default: + UNREACHABLE(); } -#else - DCHECK_NOT_NULL(load_); -#endif } - LoadRepresentation loaded_rep() const { DCHECK_NOT_NULL(load_); return load_->machine_type(); } bool is_protected(bool* traps_on_null) const { - if (load_) { - if (load_->kind.with_trap_handler) { + if (kind().with_trap_handler) { + if (load_) { *traps_on_null = load_->kind.trap_on_null; - return true; - } #if V8_ENABLE_WEBASSEMBLY - } else { - if (load_transform_->load_kind.with_trap_handler) { - DCHECK(!load_transform_->load_kind.trap_on_null); + } else { +#if V8_ENABLE_WASM_SIMD256_REVEC + DCHECK((load_transform_ && !load_transform_->load_kind.trap_on_null) + || (load_transform256_ && + !load_transform256_->load_kind.trap_on_null)); +#else + DCHECK((load_transform_ && !load_transform_->load_kind.trap_on_null)); +#endif // V8_ENABLE_WASM_SIMD256_REVEC *traps_on_null = false; - return true; +#endif // V8_ENABLE_WEBASSEMBLY } -#endif + return true; } return false; } - bool is_atomic() const { - if (load_) return load_->kind.is_atomic; -#if V8_ENABLE_WEBASSEMBLY - if (load_transform_) return load_transform_->load_kind.is_atomic; -#endif - UNREACHABLE(); - } + bool is_atomic() const { return kind().is_atomic; } node_t base() const { if (load_) return load_->base(); #if V8_ENABLE_WEBASSEMBLY if (load_transform_) return load_transform_->base(); +#if V8_ENABLE_WASM_SIMD256_REVEC + if (load_transform256_) return load_transform256_->base(); +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif UNREACHABLE(); } @@ -871,6 +885,9 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { if (load_) return load_->index().value_or_invalid(); #if V8_ENABLE_WEBASSEMBLY if (load_transform_) return load_transform_->index(); +#if V8_ENABLE_WASM_SIMD256_REVEC + if (load_transform256_) return load_transform256_->index(); +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif UNREACHABLE(); } @@ -890,6 +907,12 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { int32_t offset = load_transform_->offset; DCHECK(!load_transform_->load_kind.tagged_base); return offset; +#if V8_ENABLE_WASM_SIMD256_REVEC + } else if (load_transform256_) { + int32_t offset = load_transform256_->offset; + DCHECK(!load_transform256_->load_kind.tagged_base); + return offset; +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif } UNREACHABLE(); @@ -901,6 +924,9 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { if (load_) return load_->element_size_log2; #if V8_ENABLE_WEBASSEMBLY if (load_transform_) return 0; +#if V8_ENABLE_WASM_SIMD256_REVEC + if (load_transform256_) return 0; +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif UNREACHABLE(); } @@ -908,10 +934,24 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { operator node_t() const { return node_; } private: + turboshaft::LoadOp::Kind kind() const { + if (load_) return load_->kind; +#if V8_ENABLE_WEBASSEMBLY + if (load_transform_) return load_transform_->load_kind; +#if V8_ENABLE_WASM_SIMD256_REVEC + if (load_transform256_) return load_transform256_->load_kind; +#endif // V8_ENABLE_WASM_SIMD256_REVEC +#endif + UNREACHABLE(); + } + node_t node_; - const turboshaft::LoadOp* load_; + const turboshaft::LoadOp* load_ = nullptr; #if V8_ENABLE_WEBASSEMBLY - const turboshaft::Simd128LoadTransformOp* load_transform_; + const turboshaft::Simd128LoadTransformOp* load_transform_ = nullptr; +#if V8_ENABLE_WASM_SIMD256_REVEC + const turboshaft::Simd256LoadTransformOp* load_transform256_ = nullptr; +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif }; @@ -1094,6 +1134,9 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { return graph_->Get(node).Is() #if V8_ENABLE_WEBASSEMBLY || graph_->Get(node).Is() +#if V8_ENABLE_WASM_SIMD256_REVEC + || graph_->Get(node).Is() +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif ; } diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index c84e90685ea534..87ba3ef5cee6fa 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -7,7 +7,6 @@ #include #include "src/base/iterator.h" -#include "src/base/v8-fallthrough.h" #include "src/codegen/machine-type.h" #include "src/codegen/tick-counter.h" #include "src/common/globals.h" @@ -462,12 +461,12 @@ int InstructionSelectorT::GetVirtualRegister(node_t node) { } template -const std::map +const std::map InstructionSelectorT::GetVirtualRegistersForTesting() const { - std::map virtual_registers; + std::map virtual_registers; for (size_t n = 0; n < virtual_registers_.size(); ++n) { if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) { - NodeId const id = static_cast(n); + typename Adapter::id_t const id = static_cast(n); virtual_registers.insert(std::make_pair(id, virtual_registers_[n])); } } @@ -720,7 +719,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, } } } - V8_FALLTHROUGH; + [[fallthrough]]; default: switch (kind) { case FrameStateInputKind::kStackSlot: @@ -1601,6 +1600,12 @@ bool InstructionSelectorT::IsSourcePositionUsed(node_t node) { operation.TryCast()) { return lt->load_kind.with_trap_handler; } +#if V8_ENABLE_WASM_SIMD256_REVEC + if (const Simd256LoadTransformOp* lt = + operation.TryCast()) { + return lt->load_kind.with_trap_handler; + } +#endif // V8_ENABLE_WASM_SIMD256_REVEC if (const Simd128LaneMemoryOp* lm = operation.TryCast()) { return lm->kind.with_trap_handler; @@ -1656,6 +1661,11 @@ bool increment_effect_level_for_node(TurbofanAdapter* adapter, Node* node) { return opcode == IrOpcode::kStore || opcode == IrOpcode::kUnalignedStore || opcode == IrOpcode::kCall || opcode == IrOpcode::kProtectedStore || opcode == IrOpcode::kStoreTrapOnNull || +#if V8_ENABLE_WEBASSEMBLY + opcode == IrOpcode::kStoreLane || +#endif + opcode == IrOpcode::kStorePair || + opcode == IrOpcode::kStoreIndirectPointer || #define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) opcode == IrOpcode::k##Opcode || MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP) #undef ADD_EFFECT_FOR_ATOMIC_OP @@ -5384,6 +5394,62 @@ void InstructionSelectorT::VisitNode( } } + // SIMD256 +#if V8_ENABLE_WASM_SIMD256_REVEC + case Opcode::kSimd256Extract128Lane: { + MarkAsSimd128(node); + return VisitExtractF128(node); + } + case Opcode::kSimd256LoadTransform: { + MarkAsSimd256(node); + return VisitSimd256LoadTransform(node); + } + case Opcode::kSimd256Unary: { + const Simd256UnaryOp& unary = op.Cast(); + MarkAsSimd256(node); + switch (unary.kind) { +#define VISIT_SIMD_256_UNARY(kind) \ + case Simd256UnaryOp::Kind::k##kind: \ + return Visit##kind(node); + FOREACH_SIMD_256_UNARY_OPCODE(VISIT_SIMD_256_UNARY) +#undef VISIT_SIMD_256_UNARY + } + } + case Opcode::kSimd256Binop: { + const Simd256BinopOp& binop = op.Cast(); + MarkAsSimd256(node); + switch (binop.kind) { +#define VISIT_SIMD_BINOP(kind) \ + case Simd256BinopOp::Kind::k##kind: \ + return Visit##kind(node); + FOREACH_SIMD_256_BINARY_OPCODE(VISIT_SIMD_BINOP) +#undef VISIT_SIMD_BINOP + } + } + case Opcode::kSimd256Shift: { + const Simd256ShiftOp& shift = op.Cast(); + MarkAsSimd256(node); + switch (shift.kind) { +#define VISIT_SIMD_SHIFT(kind) \ + case Simd256ShiftOp::Kind::k##kind: \ + return Visit##kind(node); + FOREACH_SIMD_256_SHIFT_OPCODE(VISIT_SIMD_SHIFT) +#undef VISIT_SIMD_SHIFT + } + } + case Opcode::kSimd256Ternary: { + const Simd256TernaryOp& ternary = op.Cast(); + MarkAsSimd256(node); + switch (ternary.kind) { +#define VISIT_SIMD_256_TERNARY(kind) \ + case Simd256TernaryOp::Kind::k##kind: \ + return Visit##kind(node); + FOREACH_SIMD_256_TERNARY_OPCODE(VISIT_SIMD_256_TERNARY) +#undef VISIT_SIMD_256_UNARY + } + } +#endif // V8_ENABLE_WASM_SIMD256_REVEC + case Opcode::kLoadStackPointer: return VisitLoadStackPointer(node); diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index aa630bbaf2bc14..fe1472b0d7f174 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -552,7 +552,7 @@ class InstructionSelectorT final : public Adapter { int GetEffectLevel(node_t node, FlagsContinuation* cont) const; int GetVirtualRegister(node_t node); - const std::map GetVirtualRegistersForTesting() const; + const std::map GetVirtualRegistersForTesting() const; // Check if we can generate loads and stores of ExternalConstants relative // to the roots register. @@ -1023,6 +1023,11 @@ class InstructionSelectorT final : public Adapter { // Swaps the two first input operands of the node, to help match shuffles // to specific architectural instructions. void SwapShuffleInputs(typename Adapter::SimdShuffleView& node); + +#if V8_ENABLE_WASM_SIMD256_REVEC + void VisitSimd256LoadTransform(node_t node); +#endif // V8_ENABLE_WASM_SIMD256_REVEC + #endif // V8_ENABLE_WEBASSEMBLY // =========================================================================== diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc index f916b1661875ba..29ce36ad4a0917 100644 --- a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc +++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc @@ -13,7 +13,7 @@ #include "src/compiler/backend/gap-resolver.h" #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" namespace v8 { namespace internal { @@ -366,7 +366,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, *predicate = true; return CLT; case kUnsignedGreaterThanOrEqual: - case kFloatGreaterThanOrEqual: *predicate = false; return CLT; case kUnsignedLessThanOrEqual: @@ -374,9 +373,26 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, *predicate = true; return CLE; case kUnsignedGreaterThan: + *predicate = false; + return CLE; case kFloatGreaterThan: + *predicate = false; + return CULE; + case kFloatGreaterThanOrEqual: + *predicate = false; + return CULT; + case kFloatLessThanOrUnordered: + *predicate = true; + return CULT; + case kFloatGreaterThanOrUnordered: *predicate = false; return CLE; + case kFloatGreaterThanOrEqualOrUnordered: + *predicate = false; + return CLT; + case kFloatLessThanOrEqualOrUnordered: + *predicate = true; + return CULE; case kUnorderedEqual: case kUnorderedNotEqual: *predicate = true; @@ -645,10 +661,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ CallCodeObject(reg); + __ CallCodeObject(reg, tag); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -697,10 +715,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ JumpCodeObject(reg); + __ JumpCodeObject(reg, tag); } frame_access_state()->ClearSPDelta(); frame_access_state()->SetFrameAccessToDefault(); @@ -776,50 +796,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; + Label return_location; #if V8_ENABLE_WEBASSEMBLY - Label start_call; bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); - // from start_call to return address. - int offset = 0; - // TODO(loongarch): Use a more robust way to calculate offset of pc. - // See CallCFunction. - if (isWasmCapiFunction) { - offset = 16; // SetIsolateDataSlots::kNo - } else if (__ root_array_available()) { - offset = 36; // SetIsolateDataSlots::kYes and root_array_available - } else { - offset = 80; // SetIsolateDataSlots::kYes but not root_array_available - } -#endif // V8_ENABLE_WEBASSEMBLY -#if V8_HOST_ARCH_LOONG64 - if (v8_flags.debug_code) { - offset += 12; // see CallCFunction - } -#endif -#if V8_ENABLE_WEBASSEMBLY if (isWasmCapiFunction) { - __ bind(&start_call); - __ pcaddi(t7, offset >> kInstrSizeLog2); + __ LoadLabelRelative(t7, &return_location); __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY + int pc_offset; if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots); + pc_offset = __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots); - } -#if V8_ENABLE_WEBASSEMBLY - if (isWasmCapiFunction) { - CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); - RecordSafepoint(instr->reference_map()); + pc_offset = __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } -#endif // V8_ENABLE_WEBASSEMBLY + RecordSafepoint(instr->reference_map(), pc_offset); + frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -2532,6 +2530,12 @@ void CodeGenerator::AssembleConstructFrame() { // Create space for returns. __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize)); } + + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ St_d(zero_reg, MemOperand(fp, offset.offset())); + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc index 6e836989abf732..8220211d8b1b9b 100644 --- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc +++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc @@ -524,7 +524,8 @@ static void VisitBinop(InstructionSelectorT* selector, Node* node, template void InstructionSelectorT::VisitStackSlot(node_t node) { StackSlotRepresentation rep = this->stack_slot_representation_of(node); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); OperandGenerator g(this); Emit(kArchStackSlot, g.DefineAsRegister(node), @@ -3444,8 +3445,6 @@ void InstructionSelectorT::VisitWordCompareZero( } else if (value_op.Is()) { cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); return VisitStackPointerGreaterThan(value, cont); - } else { - UNREACHABLE(); } } diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index 47c8f5a3d05876..6e1089c145c45e 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -13,7 +13,7 @@ #include "src/compiler/backend/gap-resolver.h" #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" namespace v8 { namespace internal { @@ -294,7 +294,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, *predicate = true; return OLT; case kUnsignedGreaterThanOrEqual: - case kFloatGreaterThanOrEqual: *predicate = false; return OLT; case kUnsignedLessThanOrEqual: @@ -302,9 +301,26 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, *predicate = true; return OLE; case kUnsignedGreaterThan: + *predicate = false; + return OLE; case kFloatGreaterThan: + *predicate = false; + return ULE; + case kFloatGreaterThanOrEqual: + *predicate = false; + return ULT; + case kFloatLessThanOrUnordered: + *predicate = true; + return ULT; + case kFloatGreaterThanOrUnordered: *predicate = false; return OLE; + case kFloatGreaterThanOrEqualOrUnordered: + *predicate = false; + return OLT; + case kFloatLessThanOrEqualOrUnordered: + *predicate = true; + return ULE; case kUnorderedEqual: case kUnorderedNotEqual: *predicate = true; @@ -572,10 +588,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ CallCodeObject(reg); + __ CallCodeObject(reg, tag); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -626,10 +644,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ JumpCodeObject(reg); + __ JumpCodeObject(reg, tag); } frame_access_state()->ClearSPDelta(); frame_access_state()->SetFrameAccessToDefault(); diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index d466adb2c41774..03c7f850ff0b1d 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -2751,10 +2751,20 @@ template void VisitFloat32Compare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { + Mips64OperandGeneratorT g(selector); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const ComparisonOp& op = selector->Get(node).template Cast(); + OpIndex left = op.left(); + OpIndex right = op.right(); + InstructionOperand lhs, rhs; + + lhs = + selector->MatchZero(left) ? g.UseImmediate(left) : g.UseRegister(left); + rhs = selector->MatchZero(right) ? g.UseImmediate(right) + : g.UseRegister(right); + VisitCompare(selector, kMips64CmpS, lhs, rhs, cont); } else { - Mips64OperandGeneratorT g(selector); Float32BinopMatcher m(node); InstructionOperand lhs, rhs; @@ -3494,8 +3504,6 @@ void InstructionSelectorT::VisitWordCompareZero( } else if (value_op.Is()) { cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); return VisitStackPointerGreaterThan(value, cont); - } else { - UNREACHABLE(); } } // Continuation could not be combined with a compare, emit compare against @@ -3507,54 +3515,64 @@ void InstructionSelectorT::VisitWordCompareZero( template void InstructionSelectorT::VisitSwitch(node_t node, const SwitchInfo& sw) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - Mips64OperandGeneratorT g(this); - InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); - - // Emit either ArchTableSwitch or ArchBinarySearchSwitch. - if (enable_switch_jump_table_ == - InstructionSelector::kEnableSwitchJumpTable) { - static const size_t kMaxTableSwitchValueRange = 2 << 16; - size_t table_space_cost = 10 + 2 * sw.value_range(); - size_t table_time_cost = 3; - size_t lookup_space_cost = 2 + 2 * sw.case_count(); - size_t lookup_time_cost = sw.case_count(); - if (sw.case_count() > 0 && - table_space_cost + 3 * table_time_cost <= - lookup_space_cost + 3 * lookup_time_cost && - sw.min_value() > std::numeric_limits::min() && - sw.value_range() <= kMaxTableSwitchValueRange) { - InstructionOperand index_operand = value_operand; - if (sw.min_value()) { - index_operand = g.TempRegister(); - Emit(kMips64Sub, index_operand, value_operand, - g.TempImmediate(sw.min_value())); - } - // Generate a table lookup. - return EmitTableSwitch(sw, index_operand); + Mips64OperandGeneratorT g(this); + InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0)); + + // Emit either ArchTableSwitch or ArchBinarySearchSwitch. + if (enable_switch_jump_table_ == + InstructionSelector::kEnableSwitchJumpTable) { + static const size_t kMaxTableSwitchValueRange = 2 << 16; + size_t table_space_cost = 10 + 2 * sw.value_range(); + size_t table_time_cost = 3; + size_t lookup_space_cost = 2 + 2 * sw.case_count(); + size_t lookup_time_cost = sw.case_count(); + if (sw.case_count() > 0 && + table_space_cost + 3 * table_time_cost <= + lookup_space_cost + 3 * lookup_time_cost && + sw.min_value() > std::numeric_limits::min() && + sw.value_range() <= kMaxTableSwitchValueRange) { + InstructionOperand index_operand = value_operand; + if (sw.min_value()) { + index_operand = g.TempRegister(); + Emit(kMips64Sub, index_operand, value_operand, + g.TempImmediate(sw.min_value())); } + // Generate a table lookup. + return EmitTableSwitch(sw, index_operand); } + } - // Generate a tree of conditional jumps. - return EmitBinarySearchSwitch(sw, value_operand); + // Generate a tree of conditional jumps. + return EmitBinarySearchSwitch(sw, value_operand); +} + +template <> +void InstructionSelectorT::VisitWord32Equal(node_t node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + + Int32BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWordCompareZero(m.node(), m.left().node(), &cont); } + + VisitWord32Compare(this, node, &cont); } -template -void InstructionSelectorT::VisitWord32Equal(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); - Int32BinopMatcher m(node); - if (m.right().Is(0)) { - return VisitWordCompareZero(m.node(), m.left().node(), &cont); - } +template <> +void InstructionSelectorT::VisitWord32Equal(node_t node) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& equal = Get(node); + DCHECK(equal.Is()); + OpIndex left = equal.input(0); + OpIndex right = equal.input(1); + OpIndex user = node; + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); - VisitWord32Compare(this, node, &cont); + if (MatchZero(right)) { + return VisitWordCompareZero(user, left, &cont); } + + VisitWord32Compare(this, node, &cont); } template @@ -3675,44 +3693,28 @@ void InstructionSelectorT::VisitWord64Equal(node_t node) { template void InstructionSelectorT::VisitInt64LessThan(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); - VisitWord64Compare(this, node, &cont); - } + FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); + VisitWord64Compare(this, node, &cont); } template void InstructionSelectorT::VisitInt64LessThanOrEqual(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - FlagsContinuation cont = - FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); - VisitWord64Compare(this, node, &cont); - } + FlagsContinuation cont = + FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); + VisitWord64Compare(this, node, &cont); } template void InstructionSelectorT::VisitUint64LessThan(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); - VisitWord64Compare(this, node, &cont); - } + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitWord64Compare(this, node, &cont); } template void InstructionSelectorT::VisitUint64LessThanOrEqual(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - FlagsContinuation cont = - FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); - VisitWord64Compare(this, node, &cont); - } + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitWord64Compare(this, node, &cont); } template diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index caf64ed255b511..93d47489a2c356 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -12,7 +12,7 @@ #include "src/compiler/backend/gap-resolver.h" #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #if V8_ENABLE_WEBASSEMBLY #include "src/wasm/wasm-objects.h" @@ -1001,7 +1001,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( start_pc_offset = __ pc_offset(); // We are going to patch this instruction after emitting // CallCFunction, using a zero offset here as placeholder for now. - // patch_wasm_cpi_return_address assumes `addi` is used here to + // patch_pc_address assumes `addi` is used here to // add the offset to pc. __ addi(kScratchReg, kScratchReg, Operand::Zero()); __ StoreU64(kScratchReg, @@ -1010,29 +1010,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY + int pc_offset; if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots, has_function_descriptor); + pc_offset = + __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, has_function_descriptor); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots, has_function_descriptor); + pc_offset = + __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, has_function_descriptor); } #if V8_ENABLE_WEBASSEMBLY if (isWasmCapiFunction) { - int offset_since_start_call = __ SizeOfCodeGeneratedSince(&start_call); + int offset_since_start_call = pc_offset - start_pc_offset; // Here we are going to patch the `addi` instruction above to use the // correct offset. // LoadPC emits two instructions and pc is the address of its // second emitted instruction therefore there is one more instruction to // count. offset_since_start_call += kInstrSize; - __ patch_wasm_cpi_return_address(kScratchReg, start_pc_offset, - offset_since_start_call); - RecordSafepoint(instr->reference_map()); + __ patch_pc_address(kScratchReg, start_pc_offset, + offset_since_start_call); } #endif // V8_ENABLE_WEBASSEMBLY + RecordSafepoint(instr->reference_map(), pc_offset); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -3330,6 +3333,15 @@ void CodeGenerator::AssembleConstructFrame() { const int returns = frame()->GetReturnSlotCount(); // Create space for returns. __ AllocateStackSpace(returns * kSystemPointerSize); + + if (!frame()->tagged_slots().IsEmpty()) { + __ mov(kScratchReg, Operand(0)); + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ StoreU64(kScratchReg, MemOperand(fp, offset.offset())); + } + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index 8f68e6004ed4ce..b9a2cedcc22174 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -187,7 +187,8 @@ void VisitBinop(InstructionSelectorT* selector, template void InstructionSelectorT::VisitStackSlot(node_t node) { StackSlotRepresentation rep = this->stack_slot_representation_of(node); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); OperandGenerator g(this); Emit(kArchStackSlot, g.DefineAsRegister(node), diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc index cf8c6637340cee..593f725b73e684 100644 --- a/deps/v8/src/compiler/backend/register-allocator.cc +++ b/deps/v8/src/compiler/backend/register-allocator.cc @@ -505,7 +505,7 @@ void LiveRange::ConvertUsesToOperand(const InstructionOperand& op, break; case UsePositionType::kRequiresRegister: DCHECK(op.IsRegister() || op.IsFPRegister()); - V8_FALLTHROUGH; + [[fallthrough]]; case UsePositionType::kRegisterOrSlot: case UsePositionType::kRegisterOrSlotOrConstant: InstructionOperand::ReplaceWith(pos->operand(), &op); @@ -1810,15 +1810,15 @@ int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) { case MachineRepresentation::kSimd256: result -= kNumberOfFixedRangesPerRegister * config()->num_simd128_registers(); - V8_FALLTHROUGH; + [[fallthrough]]; case MachineRepresentation::kSimd128: result -= kNumberOfFixedRangesPerRegister * config()->num_float_registers(); - V8_FALLTHROUGH; + [[fallthrough]]; case MachineRepresentation::kFloat32: result -= kNumberOfFixedRangesPerRegister * config()->num_double_registers(); - V8_FALLTHROUGH; + [[fallthrough]]; case MachineRepresentation::kFloat64: result -= kNumberOfFixedRangesPerRegister * config()->num_general_registers(); diff --git a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc index d3cc06e524e9fa..bb52536d6b776f 100644 --- a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc +++ b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc @@ -12,7 +12,7 @@ #include "src/compiler/backend/gap-resolver.h" #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" namespace v8 { namespace internal { @@ -163,33 +163,35 @@ namespace { class OutOfLineRecordWrite final : public OutOfLineCode { public: - OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, - Register value, Register scratch0, Register scratch1, - RecordWriteMode mode, StubCallMode stub_mode) + OutOfLineRecordWrite( + CodeGenerator* gen, Register object, Operand offset, Register value, + RecordWriteMode mode, StubCallMode stub_mode, + IndirectPointerTag indirect_pointer_tag = kIndirectPointerNullTag) : OutOfLineCode(gen), object_(object), - index_(index), + offset_(offset), value_(value), - scratch0_(scratch0), - scratch1_(scratch1), mode_(mode), +#if V8_ENABLE_WEBASSEMBLY stub_mode_(stub_mode), +#endif // V8_ENABLE_WEBASSEMBLY must_save_lr_(!gen->frame_access_state()->has_frame()), - zone_(gen->zone()) { - DCHECK(!AreAliased(object, index, scratch0, scratch1)); - DCHECK(!AreAliased(value, index, scratch0, scratch1)); + zone_(gen->zone()), + indirect_pointer_tag_(indirect_pointer_tag) { } void Generate() final { -#if V8_TARGET_ARCH_RISCV64 - if (COMPRESS_POINTERS_BOOL) { +#ifdef V8_TARGET_ARCH_RISCV64 + // When storing an indirect pointer, the value will always be a + // full/decompressed pointer. + if (COMPRESS_POINTERS_BOOL && + mode_ != RecordWriteMode::kValueIsIndirectPointer) { __ DecompressTagged(value_, value_); } #endif - __ CheckPageFlag(value_, scratch0_, - MemoryChunk::kPointersToHereAreInterestingMask, eq, + __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq, exit()); - __ AddWord(scratch1_, object_, index_); + SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() ? SaveFPRegsMode::kSave : SaveFPRegsMode::kIgnore; @@ -198,15 +200,21 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ Push(ra); } if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { - __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); + __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode); + } else if (mode_ == RecordWriteMode::kValueIsIndirectPointer) { + DCHECK(IsValidIndirectPointerTag(indirect_pointer_tag_)); + __ CallIndirectPointerBarrier(object_, offset_, save_fp_mode, + indirect_pointer_tag_); +#if V8_ENABLE_WEBASSEMBLY } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { // A direct call to a wasm runtime stub defined in this module. // Just encode the stub index. This will be patched when the code // is added to the native module and copied into wasm code space. - __ CallRecordWriteStubSaveRegisters(object_, scratch1_, save_fp_mode, + __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode, StubCallMode::kCallWasmRuntimeStub); +#endif // V8_ENABLE_WEBASSEMBLY } else { - __ CallRecordWriteStubSaveRegisters(object_, scratch1_, save_fp_mode); + __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode); } if (must_save_lr_) { __ Pop(ra); @@ -215,14 +223,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode { private: Register const object_; - Register const index_; + Operand const offset_; Register const value_; - Register const scratch0_; - Register const scratch1_; RecordWriteMode const mode_; +#if V8_ENABLE_WEBASSEMBLY StubCallMode const stub_mode_; +#endif // V8_ENABLE_WEBASSEMBLY bool must_save_lr_; Zone* zone_; + IndirectPointerTag indirect_pointer_tag_; }; Condition FlagsConditionToConditionCmp(FlagsCondition condition) { @@ -314,6 +323,18 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, case kFloatGreaterThanOrEqual: *predicate = true; return GE; + case kFloatLessThanOrUnordered: + *predicate = true; + return LT; + case kFloatGreaterThanOrUnordered: + *predicate = false; + return LE; + case kFloatGreaterThanOrEqualOrUnordered: + *predicate = false; + return LT; + case kFloatLessThanOrEqualOrUnordered: + *predicate = true; + return LE; default: *predicate = true; break; @@ -321,18 +342,70 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, UNREACHABLE(); } +#if V8_ENABLE_WEBASSEMBLY +class WasmOutOfLineTrap : public OutOfLineCode { + public: + WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr) + : OutOfLineCode(gen), gen_(gen), instr_(instr) {} + void Generate() override { + RiscvOperandConverter i(gen_, instr_); + TrapId trap_id = + static_cast(i.InputInt32(instr_->InputCount() - 1)); + GenerateCallToTrap(trap_id); + } + + protected: + CodeGenerator* gen_; + + void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); } + + private: + void GenerateCallToTrap(TrapId trap_id) { + gen_->AssembleSourcePosition(instr_); + // A direct call to a wasm runtime stub defined in this module. + // Just encode the stub index. This will be patched when the code + // is added to the native module and copied into wasm code space. + __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); + ReferenceMap* reference_map = gen_->zone()->New(gen_->zone()); + gen_->RecordSafepoint(reference_map); + __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); + } + + Instruction* instr_; +}; + +void RecordTrapInfoIfNeeded(Zone* zone, CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + int pc) { + const MemoryAccessMode access_mode = AccessModeField::decode(opcode); + if (access_mode == kMemoryAccessProtectedMemOutOfBounds || + access_mode == kMemoryAccessProtectedNullDereference) { + codegen->RecordProtectedInstruction(pc); + } +} +#else +void RecordTrapInfoIfNeeded(Zone* zone, CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + int pc) { + DCHECK_EQ(kMemoryAccessDirect, AccessModeField::decode(opcode)); +} +#endif // V8_ENABLE_WEBASSEMBLY } // namespace -#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ - do { \ - __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ - __ sync(); \ +#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ + do { \ + __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, \ + (__ pc_offset() - kInstrSize)); \ + __ sync(); \ } while (0) #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ do { \ __ sync(); \ __ asm_instr(i.InputOrZeroRegister(0), i.MemoryOperand(1)); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, \ + (__ pc_offset() - kInstrSize)); \ __ sync(); \ } while (0) @@ -342,6 +415,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ sync(); \ __ bind(&binop); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ Operand(i.InputRegister(2))); \ @@ -386,6 +460,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, __ Sll32(i.TempRegister(3), i.TempRegister(3), 3); \ __ sync(); \ __ bind(&binop); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ size, sign_extend); \ @@ -404,6 +479,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, __ sync(); \ __ bind(&exchange); \ __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ __ Move(i.TempRegister(1), i.InputRegister(2)); \ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ @@ -427,6 +503,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \ __ sync(); \ __ bind(&exchange); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ size, sign_extend); \ @@ -445,6 +522,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ sync(); \ __ bind(&compareExchange); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ __ BranchShort(&exit, ne, i.InputRegister(2), \ Operand(i.OutputRegister(0))); \ @@ -473,6 +551,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \ __ sync(); \ __ bind(&compareExchange); \ + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ size, sign_extend); \ @@ -662,10 +741,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ CallCodeObject(reg); + __ CallCodeObject(reg, tag); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -701,10 +782,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputOrZeroRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ JumpCodeObject(reg); + __ JumpCodeObject(reg, tag); } frame_access_state()->ClearSPDelta(); frame_access_state()->SetFrameAccessToDefault(); @@ -788,34 +871,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallCFunction: { int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); - Label after_call; + Label return_location; SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; #if V8_ENABLE_WEBASSEMBLY bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); if (isWasmCapiFunction) { // Put the return address in a stack slot. - __ LoadAddress(kScratchReg, &after_call, RelocInfo::EXTERNAL_REFERENCE); + __ LoadAddress(kScratchReg, &return_location, + RelocInfo::EXTERNAL_REFERENCE); __ StoreWord(kScratchReg, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY + int pc_offset; if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots); + pc_offset = __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } else { - Register func = i.InputOrZeroRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots); - } - __ bind(&after_call); -#if V8_ENABLE_WEBASSEMBLY - if (isWasmCapiFunction) { - RecordSafepoint(instr->reference_map()); + Register func = i.InputRegister(0); + pc_offset = __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } -#endif // V8_ENABLE_WEBASSEMBLY + RecordSafepoint(instr->reference_map(), pc_offset); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack @@ -917,37 +997,76 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); break; - case kArchStoreWithWriteBarrier: // Fall through. - case kArchAtomicStoreWithWriteBarrier: { + case kArchStoreWithWriteBarrier: { RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode()); + // Indirect pointer writes must use a different opcode. + DCHECK_NE(mode, RecordWriteMode::kValueIsIndirectPointer); Register object = i.InputRegister(0); - Register index = i.InputRegister(1); Register value = i.InputRegister(2); - Register scratch0 = i.TempRegister(0); - Register scratch1 = i.TempRegister(1); - auto ool = zone()->New(this, object, index, value, - scratch0, scratch1, mode, - DetermineStubCallMode()); - __ AddWord(kScratchReg, object, index); - if (arch_opcode == kArchStoreWithWriteBarrier) { - __ StoreTaggedField(value, MemOperand(kScratchReg)); - } else { - DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode); - __ sync(); - __ StoreTaggedField(value, MemOperand(kScratchReg)); - __ sync(); + auto ool = zone()->New( + this, object, Operand(i.InputRegister(1)), value, mode, + DetermineStubCallMode()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); + __ AddWord(kScratchReg, object, i.InputRegister(1)); + __ StoreTaggedField(value, MemOperand(kScratchReg, 0)); + if (mode > RecordWriteMode::kValueIsIndirectPointer) { + __ JumpIfSmi(value, ool->exit()); } - if (mode > RecordWriteMode::kValueIsPointer) { + __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, + ne, ool->entry()); + __ bind(ool->exit()); + break; + } + case kArchAtomicStoreWithWriteBarrier: { +#ifdef V8_TARGET_ARCH_RISCV64 + RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode()); + // Indirect pointer writes must use a different opcode. + DCHECK_NE(mode, RecordWriteMode::kValueIsIndirectPointer); + Register object = i.InputRegister(0); + Register offset = i.InputRegister(1); + Register value = i.InputRegister(2); + + auto ool = zone()->New( + this, object, Operand(offset), value, mode, DetermineStubCallMode()); + __ AddWord(kScratchReg, object, offset); + __ AtomicStoreTaggedField(value, MemOperand(kScratchReg, 0)); + // Skip the write barrier if the value is a Smi. However, this is only + // valid if the value isn't an indirect pointer. Otherwise the value will + // be a pointer table index, which will always look like a Smi (but + // actually reference a pointer in the pointer table). + if (mode > RecordWriteMode::kValueIsIndirectPointer) { __ JumpIfSmi(value, ool->exit()); } - __ CheckPageFlag(object, scratch0, - MemoryChunk::kPointersFromHereAreInterestingMask, ne, - ool->entry()); + __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, + ne, ool->entry()); __ bind(ool->exit()); break; +#else + UNREACHABLE(); +#endif } - case kArchStoreIndirectWithWriteBarrier: + case kArchStoreIndirectWithWriteBarrier: { +#ifdef V8_TARGET_ARCH_RISCV64 + RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode()); + DCHECK_EQ(mode, RecordWriteMode::kValueIsIndirectPointer); + IndirectPointerTag tag = static_cast(i.InputInt64(3)); + DCHECK(IsValidIndirectPointerTag(tag)); + Register object = i.InputRegister(0); + Register value = i.InputRegister(2); + auto ool = zone()->New( + this, object, Operand(i.InputRegister(1)), value, mode, + DetermineStubCallMode(), tag); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); + __ AddWord(kScratchReg, object, i.InputRegister(1)); + __ StoreIndirectPointerField(value, MemOperand(kScratchReg, 0)); + __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, + ne, ool->entry()); + __ bind(ool->exit()); + break; +#else UNREACHABLE(); +#endif + } case kArchStackSlot: { FrameOffset offset = frame_access_state()->GetFrameOffset(i.InputInt32(0)); @@ -1846,26 +1965,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kRiscvLbu: __ Lbu(i.OutputRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvLb: __ Lb(i.OutputRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvSb: __ Sb(i.InputOrZeroRegister(0), i.MemoryOperand(1)); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvLhu: __ Lhu(i.OutputRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvUlhu: __ Ulhu(i.OutputRegister(), i.MemoryOperand()); break; case kRiscvLh: __ Lh(i.OutputRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvUlh: __ Ulh(i.OutputRegister(), i.MemoryOperand()); break; case kRiscvSh: + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Sh(i.InputOrZeroRegister(0), i.MemoryOperand(1)); break; case kRiscvUsh: @@ -1873,6 +2003,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kRiscvLw: __ Lw(i.OutputRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvUlw: __ Ulw(i.OutputRegister(), i.MemoryOperand()); @@ -1880,18 +2012,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #if V8_TARGET_ARCH_RISCV64 case kRiscvLwu: __ Lwu(i.OutputRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvUlwu: __ Ulwu(i.OutputRegister(), i.MemoryOperand()); break; case kRiscvLd: __ Ld(i.OutputRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvUld: __ Uld(i.OutputRegister(), i.MemoryOperand()); break; case kRiscvSd: __ Sd(i.InputOrZeroRegister(0), i.MemoryOperand(1)); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvUsd: __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand()); @@ -1899,12 +2037,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #endif case kRiscvSw: __ Sw(i.InputOrZeroRegister(0), i.MemoryOperand(1)); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvUsw: __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand()); break; case kRiscvLoadFloat: { __ LoadFloat(i.OutputSingleRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; } case kRiscvULoadFloat: { @@ -1918,6 +2060,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ LoadFPRImmediate(kSingleRegZero, 0.0f); } __ StoreFloat(ft, operand); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; } case kRiscvUStoreFloat: { @@ -1932,6 +2076,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kRiscvLoadDouble: __ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; case kRiscvULoadDouble: __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); @@ -1942,6 +2088,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ LoadFPRImmediate(kDoubleRegZero, 0.0); } __ StoreDouble(ft, i.MemoryOperand(1)); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; } case kRiscvUStoreDouble: { @@ -2344,6 +2492,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ StoreSandboxedPointerField(i.InputOrZeroRegister(0), mem); break; } + case kRiscvStoreIndirectPointer: { + MemOperand mem = i.MemoryOperand(1); + __ StoreIndirectPointerField(i.InputOrZeroRegister(0), mem); + break; + } case kRiscvAtomicLoadDecompressTaggedSigned: __ AtomicDecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand()); break; @@ -2364,6 +2517,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (memOperand.offset() != 0) { __ AddWord(dst, memOperand.rm(), memOperand.offset()); } + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ vs(i.InputSimd128Register(0), dst, 0, VSew::E8); break; } @@ -2374,6 +2528,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (i.MemoryOperand().offset() != 0) { __ AddWord(src, i.MemoryOperand().rm(), i.MemoryOperand().offset()); } + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ vl(i.OutputSimd128Register(), src, 0, VSew::E8); break; } @@ -2387,6 +2542,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register dst = i.OutputSimd128Register(); __ VU.set(kScratchReg, E32, m1); __ Load32U(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_sx(dst, kScratchReg); break; } @@ -2395,9 +2552,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ VU.set(kScratchReg, E64, m1); #if V8_TARGET_ARCH_RISCV64 __ LoadWord(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_sx(dst, kScratchReg); #elif V8_TARGET_ARCH_RISCV32 __ LoadDouble(kScratchDoubleReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vfmv_sf(dst, kScratchDoubleReg); #endif break; @@ -2405,24 +2566,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kRiscvS128LoadLane: { Simd128Register dst = i.OutputSimd128Register(); DCHECK_EQ(dst, i.InputSimd128Register(0)); - auto sz = static_cast(MiscField::decode(instr->opcode())); + auto sz = LaneSizeField::decode(opcode); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ LoadLane(sz, dst, i.InputUint8(1), i.MemoryOperand(2)); break; } case kRiscvS128StoreLane: { Simd128Register src = i.InputSimd128Register(0); DCHECK_EQ(src, i.InputSimd128Register(0)); - auto sz = static_cast(MiscField::decode(instr->opcode())); + auto sz = LaneSizeField::decode(opcode); __ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2)); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); break; } case kRiscvS128Load64ExtendS: { __ VU.set(kScratchReg, E64, m1); #if V8_TARGET_ARCH_RISCV64 __ LoadWord(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_vx(kSimd128ScratchReg, kScratchReg); #elif V8_TARGET_ARCH_RISCV32 __ LoadDouble(kScratchDoubleReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vfmv_vf(kSimd128ScratchReg, kScratchDoubleReg); #endif __ VU.set(kScratchReg, i.InputInt8(2), m1); @@ -2433,9 +2601,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ VU.set(kScratchReg, E64, m1); #if V8_TARGET_ARCH_RISCV64 __ LoadWord(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_vx(kSimd128ScratchReg, kScratchReg); #elif V8_TARGET_ARCH_RISCV32 __ LoadDouble(kScratchDoubleReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vfmv_vf(kSimd128ScratchReg, kScratchDoubleReg); #endif __ VU.set(kScratchReg, i.InputInt8(2), m1); @@ -2447,22 +2619,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( switch (i.InputInt8(2)) { case E8: __ Lb(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_vx(i.OutputSimd128Register(), kScratchReg); break; case E16: __ Lh(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_vx(i.OutputSimd128Register(), kScratchReg); break; case E32: __ Lw(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_vx(i.OutputSimd128Register(), kScratchReg); break; case E64: #if V8_TARGET_ARCH_RISCV64 __ LoadWord(kScratchReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vmv_vx(i.OutputSimd128Register(), kScratchReg); #elif V8_TARGET_ARCH_RISCV32 __ LoadDouble(kScratchDoubleReg, i.MemoryOperand()); + RecordTrapInfoIfNeeded(zone(), this, opcode, instr, + (__ pc_offset() - kInstrSize)); __ vfmv_vf(i.OutputSimd128Register(), kScratchDoubleReg); #endif break; @@ -3800,6 +3982,7 @@ void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder( __ Branch(GetLabel(target)); } +#if V8_ENABLE_WEBASSEMBLY void CodeGenerator::AssembleArchTrap(Instruction* instr, FlagsCondition condition) { class OutOfLineTrap final : public OutOfLineCode { @@ -3812,7 +3995,6 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, static_cast(i.InputInt32(instr_->InputCount() - 1)); GenerateCallToTrap(trap_id); } - private: void GenerateCallToTrap(TrapId trap_id) { gen_->AssembleSourcePosition(instr_); @@ -3834,6 +4016,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, Label* tlabel = ool->entry(); AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true); } +#endif // V8_ENABLE_WEBASSEMBLY // Assembles boolean materializations after an instruction. void CodeGenerator::AssembleArchBoolean(Instruction* instr, @@ -4254,6 +4437,12 @@ void CodeGenerator::AssembleConstructFrame() { // Create space for returns. __ SubWord(sp, sp, Operand(returns * kSystemPointerSize)); } + + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ StoreWord(zero_reg, MemOperand(fp, offset.offset())); + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { diff --git a/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h b/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h index 156c6b10280e49..b55065ee42e8aa 100644 --- a/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h +++ b/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h @@ -12,66 +12,92 @@ namespace compiler { // RISC-V-specific opcodes that specify which assembly sequence to emit. // Most opcodes specify a single instruction. #if V8_TARGET_ARCH_RISCV64 -#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \ - V(RiscvAdd64) \ - V(RiscvAddOvf64) \ - V(RiscvSub64) \ - V(RiscvSubOvf64) \ - V(RiscvMulHigh64) \ - V(RiscvMulHighU64) \ - V(RiscvMul64) \ - V(RiscvMulOvf64) \ - V(RiscvDiv64) \ - V(RiscvDivU64) \ - V(RiscvMod64) \ - V(RiscvModU64) \ - V(RiscvZeroExtendWord) \ - V(RiscvSignExtendWord) \ - V(RiscvClz64) \ - V(RiscvCtz64) \ - V(RiscvPopcnt64) \ - V(RiscvShl64) \ - V(RiscvShr64) \ - V(RiscvSar64) \ - V(RiscvRor64) \ - V(RiscvFloat64RoundDown) \ - V(RiscvFloat64RoundTruncate) \ - V(RiscvFloat64RoundUp) \ - V(RiscvFloat64RoundTiesEven) \ - V(RiscvTruncLS) \ - V(RiscvTruncLD) \ - V(RiscvTruncUlS) \ - V(RiscvTruncUlD) \ - V(RiscvCvtSL) \ - V(RiscvCvtSUl) \ - V(RiscvCvtDL) \ - V(RiscvCvtDUl) \ - V(RiscvLd) \ - V(RiscvSd) \ - V(RiscvUsd) \ - V(RiscvLwu) \ - V(RiscvUlwu) \ - V(RiscvBitcastDL) \ - V(RiscvBitcastLD) \ - V(RiscvByteSwap64) \ - V(RiscvWord64AtomicLoadUint64) \ - V(RiscvWord64AtomicStoreWord64) \ - V(RiscvWord64AtomicAddUint64) \ - V(RiscvWord64AtomicSubUint64) \ - V(RiscvWord64AtomicAndUint64) \ - V(RiscvWord64AtomicOrUint64) \ - V(RiscvWord64AtomicXorUint64) \ - V(RiscvWord64AtomicExchangeUint64) \ - V(RiscvStoreCompressTagged) \ - V(RiscvLoadDecompressTaggedSigned) \ - V(RiscvLoadDecompressTagged) \ - V(RiscvLoadDecodeSandboxedPointer) \ - V(RiscvStoreEncodeSandboxedPointer) \ - V(RiscvAtomicLoadDecompressTaggedSigned) \ - V(RiscvAtomicLoadDecompressTagged) \ - V(RiscvAtomicStoreCompressTagged) \ - V(RiscvWord64AtomicCompareExchangeUint64) \ - V(RiscvCmp32) \ +// Opcodes that support a MemoryAccessMode. +#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \ + V(RiscvLd) \ + V(RiscvSd) \ + V(RiscvLwu) \ + V(RiscvWord64AtomicLoadUint64) \ + V(RiscvWord64AtomicStoreWord64) \ + V(RiscvLb) \ + V(RiscvLbu) \ + V(RiscvSb) \ + V(RiscvLh) \ + V(RiscvLhu) \ + V(RiscvSh) \ + V(RiscvLw) \ + V(RiscvSw) \ + V(RiscvLoadDouble) \ + V(RiscvStoreDouble) \ + V(RiscvStoreFloat) \ + V(RiscvLoadFloat) \ + V(RiscvStoreCompressTagged) \ + V(RiscvLoadDecompressTaggedSigned) \ + V(RiscvLoadDecompressTagged) \ + V(RiscvS128LoadSplat) \ + V(RiscvS128Load64ExtendS) \ + V(RiscvS128Load64ExtendU) \ + V(RiscvS128Load64Zero) \ + V(RiscvS128Load32Zero) \ + V(RiscvS128LoadLane) \ + V(RiscvS128StoreLane) \ + V(RiscvRvvLd) \ + V(RiscvRvvSt) + +#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \ + TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \ + V(RiscvAdd64) \ + V(RiscvAddOvf64) \ + V(RiscvSub64) \ + V(RiscvSubOvf64) \ + V(RiscvMulHigh64) \ + V(RiscvMulHighU64) \ + V(RiscvMul64) \ + V(RiscvMulOvf64) \ + V(RiscvDiv64) \ + V(RiscvDivU64) \ + V(RiscvMod64) \ + V(RiscvModU64) \ + V(RiscvZeroExtendWord) \ + V(RiscvSignExtendWord) \ + V(RiscvClz64) \ + V(RiscvCtz64) \ + V(RiscvPopcnt64) \ + V(RiscvShl64) \ + V(RiscvShr64) \ + V(RiscvSar64) \ + V(RiscvRor64) \ + V(RiscvFloat64RoundDown) \ + V(RiscvFloat64RoundTruncate) \ + V(RiscvFloat64RoundUp) \ + V(RiscvFloat64RoundTiesEven) \ + V(RiscvTruncLS) \ + V(RiscvTruncLD) \ + V(RiscvTruncUlS) \ + V(RiscvTruncUlD) \ + V(RiscvCvtSL) \ + V(RiscvCvtSUl) \ + V(RiscvCvtDL) \ + V(RiscvCvtDUl) \ + V(RiscvUsd) \ + V(RiscvUlwu) \ + V(RiscvBitcastDL) \ + V(RiscvBitcastLD) \ + V(RiscvByteSwap64) \ + V(RiscvWord64AtomicAddUint64) \ + V(RiscvWord64AtomicSubUint64) \ + V(RiscvWord64AtomicAndUint64) \ + V(RiscvWord64AtomicOrUint64) \ + V(RiscvWord64AtomicXorUint64) \ + V(RiscvWord64AtomicExchangeUint64) \ + V(RiscvLoadDecodeSandboxedPointer) \ + V(RiscvStoreEncodeSandboxedPointer) \ + V(RiscvStoreIndirectPointer) \ + V(RiscvAtomicLoadDecompressTaggedSigned) \ + V(RiscvAtomicLoadDecompressTagged) \ + V(RiscvAtomicStoreCompressTagged) \ + V(RiscvWord64AtomicCompareExchangeUint64) \ + V(RiscvCmp32) \ V(RiscvTst64) #elif V8_TARGET_ARCH_RISCV32 #define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \ @@ -94,7 +120,28 @@ namespace compiler { V(RiscvWord32AtomicPairOr) \ V(RiscvWord32AtomicPairXor) \ V(RiscvWord32AtomicPairExchange) \ - V(RiscvWord32AtomicPairCompareExchange) + V(RiscvWord32AtomicPairCompareExchange) \ + V(RiscvLb) \ + V(RiscvLbu) \ + V(RiscvSb) \ + V(RiscvLh) \ + V(RiscvLhu) \ + V(RiscvSh) \ + V(RiscvLw) \ + V(RiscvSw) \ + V(RiscvLoadDouble) \ + V(RiscvStoreDouble) \ + V(RiscvStoreFloat) \ + V(RiscvLoadFloat) \ + V(RiscvS128LoadSplat) \ + V(RiscvS128Load64ExtendS) \ + V(RiscvS128Load64ExtendU) \ + V(RiscvS128Load64Zero) \ + V(RiscvS128Load32Zero) \ + V(RiscvS128LoadLane) \ + V(RiscvS128StoreLane) \ + V(RiscvRvvLd) \ + V(RiscvRvvSt) #endif #define TARGET_ARCH_OPCODE_LIST_COMMON(V) \ @@ -169,27 +216,15 @@ namespace compiler { V(RiscvCvtSW) \ V(RiscvCvtSUw) \ V(RiscvCvtDUw) \ - V(RiscvLb) \ - V(RiscvLbu) \ - V(RiscvSb) \ - V(RiscvLh) \ V(RiscvUlh) \ - V(RiscvLhu) \ V(RiscvUlhu) \ - V(RiscvSh) \ V(RiscvUsh) \ V(RiscvUld) \ - V(RiscvLw) \ V(RiscvUlw) \ - V(RiscvSw) \ V(RiscvUsw) \ - V(RiscvLoadFloat) \ - V(RiscvULoadFloat) \ - V(RiscvStoreFloat) \ V(RiscvUStoreFloat) \ - V(RiscvLoadDouble) \ + V(RiscvULoadFloat) \ V(RiscvULoadDouble) \ - V(RiscvStoreDouble) \ V(RiscvUStoreDouble) \ V(RiscvBitcastInt32ToFloat32) \ V(RiscvBitcastFloat32ToInt32) \ @@ -280,17 +315,8 @@ namespace compiler { V(RiscvI8x16Popcnt) \ V(RiscvVnot) \ V(RiscvS128Select) \ - V(RiscvS128Load64Zero) \ - V(RiscvS128Load32Zero) \ V(RiscvV128AnyTrue) \ V(RiscvI8x16Shuffle) \ - V(RiscvS128LoadSplat) \ - V(RiscvS128Load64ExtendS) \ - V(RiscvS128Load64ExtendU) \ - V(RiscvS128LoadLane) \ - V(RiscvS128StoreLane) \ - V(RiscvRvvLd) \ - V(RiscvRvvSt) \ V(RiscvVmv) \ V(RiscvVandVv) \ V(RiscvVnotVv) \ diff --git a/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc b/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc index 713a7efb460d9e..c93fe5a74e0aed 100644 --- a/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc +++ b/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc @@ -55,6 +55,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kRiscvTruncUlS: case kRiscvLoadDecodeSandboxedPointer: case kRiscvStoreEncodeSandboxedPointer: + case kRiscvStoreIndirectPointer: case kRiscvCmp32: #elif V8_TARGET_ARCH_RISCV32 case kRiscvAdd32: diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h index f785b77c210233..c319800a45403d 100644 --- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h +++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h @@ -136,18 +136,6 @@ class RiscvOperandGeneratorT final : public OperandGeneratorT { } }; -template -void InstructionSelectorT::VisitProtectedStore(node_t node) { - // TODO(eholk) - UNIMPLEMENTED(); -} - -template -void InstructionSelectorT::VisitProtectedLoad(node_t node) { - // TODO(eholk) - UNIMPLEMENTED(); -} - template void VisitRR(InstructionSelectorT* selector, ArchOpcode opcode, typename Adapter::node_t node) { @@ -388,7 +376,8 @@ static void VisitBinop(InstructionSelectorT* selector, template void InstructionSelectorT::VisitStackSlot(node_t node) { StackSlotRepresentation rep = this->stack_slot_representation_of(node); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); OperandGenerator g(this); Emit(kArchStackSlot, g.DefineAsRegister(node), @@ -415,43 +404,104 @@ void InstructionSelectorT::VisitLoadTransform(node_t node) { UNIMPLEMENTED(); } else { LoadTransformParameters params = LoadTransformParametersOf(node->op()); - + bool is_protected = (params.kind == MemoryAccessKind::kProtected); + InstructionCode opcode = kArchNop; switch (params.transformation) { case LoadTransformation::kS128Load8Splat: - EmitS128Load(this, node, kRiscvS128LoadSplat, E8, m1); + opcode = kRiscvS128LoadSplat; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E8, m1); break; case LoadTransformation::kS128Load16Splat: - EmitS128Load(this, node, kRiscvS128LoadSplat, E16, m1); + opcode = kRiscvS128LoadSplat; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E16, m1); break; case LoadTransformation::kS128Load32Splat: - EmitS128Load(this, node, kRiscvS128LoadSplat, E32, m1); + opcode = kRiscvS128LoadSplat; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E32, m1); break; case LoadTransformation::kS128Load64Splat: - EmitS128Load(this, node, kRiscvS128LoadSplat, E64, m1); + opcode = kRiscvS128LoadSplat; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E64, m1); break; case LoadTransformation::kS128Load8x8S: - EmitS128Load(this, node, kRiscvS128Load64ExtendS, E16, m1); + opcode = kRiscvS128Load64ExtendS; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E16, m1); break; case LoadTransformation::kS128Load8x8U: - EmitS128Load(this, node, kRiscvS128Load64ExtendU, E16, m1); + opcode = kRiscvS128Load64ExtendU; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E16, m1); break; case LoadTransformation::kS128Load16x4S: - EmitS128Load(this, node, kRiscvS128Load64ExtendS, E32, m1); + opcode = kRiscvS128Load64ExtendS; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E32, m1); break; case LoadTransformation::kS128Load16x4U: - EmitS128Load(this, node, kRiscvS128Load64ExtendU, E32, m1); + opcode = kRiscvS128Load64ExtendU; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E32, m1); break; case LoadTransformation::kS128Load32x2S: - EmitS128Load(this, node, kRiscvS128Load64ExtendS, E64, m1); + opcode = kRiscvS128Load64ExtendS; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E64, m1); break; case LoadTransformation::kS128Load32x2U: - EmitS128Load(this, node, kRiscvS128Load64ExtendU, E64, m1); + opcode = kRiscvS128Load64ExtendU; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E64, m1); break; case LoadTransformation::kS128Load32Zero: - EmitS128Load(this, node, kRiscvS128Load32Zero, E32, m1); + opcode = kRiscvS128Load32Zero; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E32, m1); break; case LoadTransformation::kS128Load64Zero: - EmitS128Load(this, node, kRiscvS128Load64Zero, E64, m1); + opcode = kRiscvS128Load64Zero; + if (is_protected) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + EmitS128Load(this, node, opcode, E64, m1); break; default: UNIMPLEMENTED(); @@ -675,58 +725,6 @@ void EmitWordCompareZero(InstructionSelectorT* selector, g.UseRegisterOrImmediateZero(value), cont); } -template -void VisitAtomicExchange(InstructionSelectorT* selector, Node* node, - ArchOpcode opcode, AtomicWidth width) { - RiscvOperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - - AddressingMode addressing_mode = kMode_MRI; - InstructionOperand inputs[3]; - size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - inputs[input_count++] = g.UseUniqueRegister(index); - inputs[input_count++] = g.UseUniqueRegister(value); - InstructionOperand outputs[1]; - outputs[0] = g.UseUniqueRegister(node); - InstructionOperand temp[3]; - temp[0] = g.TempRegister(); - temp[1] = g.TempRegister(); - temp[2] = g.TempRegister(); - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | - AtomicWidthField::encode(width); - selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); -} - -template -void VisitAtomicCompareExchange(InstructionSelectorT* selector, - Node* node, ArchOpcode opcode, - AtomicWidth width) { - RiscvOperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* old_value = node->InputAt(2); - Node* new_value = node->InputAt(3); - - AddressingMode addressing_mode = kMode_MRI; - InstructionOperand inputs[4]; - size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - inputs[input_count++] = g.UseUniqueRegister(index); - inputs[input_count++] = g.UseUniqueRegister(old_value); - inputs[input_count++] = g.UseUniqueRegister(new_value); - InstructionOperand outputs[1]; - outputs[0] = g.UseUniqueRegister(node); - InstructionOperand temp[3]; - temp[0] = g.TempRegister(); - temp[1] = g.TempRegister(); - temp[2] = g.TempRegister(); - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | - AtomicWidthField::encode(width); - selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); -} template void InstructionSelectorT::VisitFloat32Equal(node_t node) { @@ -1037,11 +1035,7 @@ void InstructionSelectorT::VisitFloat64Min(node_t node) { template void InstructionSelectorT::VisitTruncateFloat64ToWord32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitRR(this, kArchTruncateDoubleToI, node); - } + VisitRR(this, kArchTruncateDoubleToI, node); } template diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc index 3aab14d20f4e7e..5cfd4d7fbca930 100644 --- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc +++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc @@ -9,6 +9,8 @@ #include "src/compiler/backend/riscv/instruction-selector-riscv.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" +#include "src/compiler/turboshaft/operations.h" +#include "src/compiler/turboshaft/opmasks.h" namespace v8 { namespace internal { @@ -107,6 +109,69 @@ void EmitLoad(InstructionSelectorT* selector, } } +template <> +void EmitLoad(InstructionSelectorT* selector, + typename TurboshaftAdapter::node_t node, InstructionCode opcode, + typename TurboshaftAdapter::node_t output) { + RiscvOperandGeneratorT g(selector); + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& op = selector->Get(node); + const LoadOp& load = op.Cast(); + + // The LoadStoreSimplificationReducer transforms all loads into + // *(base + index). + OpIndex base = load.base(); + OptionalOpIndex index = load.index(); + DCHECK_EQ(load.offset, 0); + DCHECK_EQ(load.element_size_log2, 0); + + InstructionOperand inputs[3]; + size_t input_count = 0; + InstructionOperand output_op; + + // If output is valid, use that as the output register. This is used when we + // merge a conversion into the load. + output_op = g.DefineAsRegister(output.valid() ? output : node); + + const Operation& base_op = selector->Get(base); + if (base_op.Is()) { + UNIMPLEMENTED(); + } + + if (base_op.Is()) { + DCHECK(selector->is_integer_constant(selector->value(index))); + input_count = 1; + inputs[0] = + g.UseImmediate64(selector->integer_constant(selector->value(index))); + opcode |= AddressingModeField::encode(kMode_Root); + selector->Emit(opcode, 1, &output_op, input_count, inputs); + return; + } + + if (index.has_value() && g.CanBeImmediate(selector->value(index), opcode)) { + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(output.valid() ? output : node), + g.UseRegister(base), + index.has_value() ? g.UseImmediate(selector->value(index)) + : g.UseImmediate(0)); + } else { + if (index.has_value()) { + InstructionOperand addr_reg = g.TempRegister(); + selector->Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None), + addr_reg, g.UseRegister(selector->value(index)), + g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(output.valid() ? output : node), + addr_reg, g.TempImmediate(0)); + } else { + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(output.valid() ? output : node), + g.UseRegister(base), g.TempImmediate(0)); + } + } +} + template void EmitS128Load(InstructionSelectorT* selector, Node* node, InstructionCode opcode, VSew sew, Vlmul lmul) { @@ -179,58 +244,143 @@ void InstructionSelectorT::VisitLoadLane(node_t node) { template void InstructionSelectorT::VisitLoad(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + auto load = this->load_view(node); + LoadRepresentation load_rep = load.loaded_rep(); + InstructionCode opcode = kArchNop; + switch (load_rep.representation()) { + case MachineRepresentation::kFloat32: + opcode = kRiscvLoadFloat; + break; + case MachineRepresentation::kFloat64: + opcode = kRiscvLoadDouble; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kWord32: + opcode = kRiscvLw; + break; + case MachineRepresentation::kSimd128: + opcode = kRiscvRvvLd; + break; + case MachineRepresentation::kCompressedPointer: + case MachineRepresentation::kCompressed: + case MachineRepresentation::kSandboxedPointer: + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kWord64: + case MachineRepresentation::kNone: + case MachineRepresentation::kSimd256: // Fall through. + case MachineRepresentation::kIndirectPointer: + UNREACHABLE(); + } - InstructionCode opcode = kArchNop; - switch (load_rep.representation()) { + EmitLoad(this, node, opcode); +} + +template +void InstructionSelectorT::VisitStorePair(node_t node) { + UNREACHABLE(); +} + +template <> +void InstructionSelectorT::VisitStore(node_t node) { + RiscvOperandGeneratorT g(this); + typename TurboshaftAdapter::StoreView store_view = this->store_view(node); + node_t base = store_view.base(); + optional_node_t index = store_view.index(); + node_t value = store_view.value(); + + WriteBarrierKind write_barrier_kind = + store_view.stored_rep().write_barrier_kind(); + MachineRepresentation rep = store_view.stored_rep().representation(); + + // TODO(riscv): I guess this could be done in a better way. + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!v8_flags.disable_write_barriers)) { + UNREACHABLE(); + } else { + InstructionCode code; + switch (rep) { case MachineRepresentation::kFloat32: - opcode = kRiscvLoadFloat; + code = kRiscvStoreFloat; break; case MachineRepresentation::kFloat64: - opcode = kRiscvLoadDouble; + code = kRiscvStoreDouble; break; case MachineRepresentation::kBit: // Fall through. case MachineRepresentation::kWord8: - opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb; + code = kRiscvSb; break; case MachineRepresentation::kWord16: - opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh; + code = kRiscvSh; break; case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kTagged: case MachineRepresentation::kWord32: - opcode = kRiscvLw; + code = kRiscvSw; break; case MachineRepresentation::kSimd128: - opcode = kRiscvRvvLd; + code = kRiscvRvvSt; break; - case MachineRepresentation::kCompressedPointer: + case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressed: case MachineRepresentation::kSandboxedPointer: case MachineRepresentation::kMapWord: // Fall through. - case MachineRepresentation::kWord64: case MachineRepresentation::kNone: + case MachineRepresentation::kWord64: case MachineRepresentation::kSimd256: // Fall through. case MachineRepresentation::kIndirectPointer: UNREACHABLE(); } - EmitLoad(this, node, opcode); + if (this->is_load_root_register(base)) { + Emit(code | AddressingModeField::encode(kMode_Root), g.NoOutput(), + g.UseRegisterOrImmediateZero(value), + index.has_value() ? g.UseImmediate(this->value(index)) + : g.UseImmediate(0)); + return; + } + + if (index.has_value() && g.CanBeImmediate(this->value(index), code)) { + Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegisterOrImmediateZero(value), g.UseRegister(base), + index.has_value() ? g.UseImmediate(this->value(index)) + : g.UseImmediate(0)); + } else { + if (index.has_value()) { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(this->value(index)), g.UseRegister(base)); + // Emit desired store opcode, using temp addr_reg. + Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0)); + } else { + Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegisterOrImmediateZero(value), g.UseRegister(base), + g.UseImmediate(0)); + } + } } } template -void InstructionSelectorT::VisitStorePair(node_t node) { - UNREACHABLE(); +void InstructionSelectorT::VisitProtectedLoad(node_t node) { + // TODO(eholk) + UNIMPLEMENTED(); } -template <> -void InstructionSelectorT::VisitStore(turboshaft::OpIndex) { - UNREACHABLE(); +template +void InstructionSelectorT::VisitProtectedStore(node_t node) { + // TODO(eholk) + UNIMPLEMENTED(); } template <> @@ -318,16 +468,10 @@ void InstructionSelectorT::VisitStore(Node* node) { } } -template <> -void InstructionSelectorT::VisitWord32And( - turboshaft::OpIndex) { - UNIMPLEMENTED(); -} - -template <> -void InstructionSelectorT::VisitWord32And(Node* node) { - VisitBinop(this, node, kRiscvAnd, true, - kRiscvAnd); +template +void InstructionSelectorT::VisitWord32And(node_t node) { + VisitBinop(this, node, kRiscvAnd, true, + kRiscvAnd); } template @@ -921,11 +1065,7 @@ template void VisitWordCompare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitWordCompare(selector, node, kRiscvCmp, cont, false); - } + VisitWordCompare(selector, node, kRiscvCmp, cont, false); } template @@ -1050,122 +1190,214 @@ void InstructionSelectorT::VisitStackPointerGreaterThan( } // Shared routine for word comparisons against zero. -template -void InstructionSelectorT::VisitWordCompareZero( - node_t user, node_t value, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - // Try to combine with comparisons against 0 by simply inverting the branch. - while (CanCover(user, value)) { - if (value->opcode() == IrOpcode::kWord32Equal) { - Int32BinopMatcher m(value); - if (!m.right().Is(0)) break; - user = value; - value = m.left().node(); - } else if (value->opcode() == IrOpcode::kWord64Equal) { - Int64BinopMatcher m(value); - if (!m.right().Is(0)) break; - user = value; - value = m.left().node(); - } else { - break; - } +template <> +void InstructionSelectorT::VisitWordCompareZero( + node_t user, node_t value, FlagsContinuationT* cont) { + // Try to combine with comparisons against 0 by simply inverting the branch. + while (CanCover(user, value)) { + if (value->opcode() == IrOpcode::kWord32Equal) { + Int32BinopMatcher m(value); + if (!m.right().Is(0)) break; + user = value; + value = m.left().node(); + } else if (value->opcode() == IrOpcode::kWord64Equal) { + Int64BinopMatcher m(value); + if (!m.right().Is(0)) break; + user = value; + value = m.left().node(); + } else { + break; + } - cont->Negate(); + cont->Negate(); + } + + if (CanCover(user, value)) { + switch (value->opcode()) { + case IrOpcode::kWord32Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitWordCompare(this, value, cont); + case IrOpcode::kInt32LessThan: + cont->OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWordCompare(this, value, cont); + case IrOpcode::kInt32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWordCompare(this, value, cont); + case IrOpcode::kUint32LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitWordCompare(this, value, cont); + case IrOpcode::kUint32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitWordCompare(this, value, cont); + case IrOpcode::kFloat32Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat32LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat64Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kFloat64LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kFloat64LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kProjection: + // Check if this is the overflow output projection of an + // WithOverflow node. + if (ProjectionIndexOf(value->op()) == 1u) { + // We cannot combine the WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // is either nullptr, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + Node* const node = value->InputAt(0); + Node* const result = NodeProperties::FindProjection(node, 0); + if (result == nullptr || IsDefined(result)) { + switch (node->opcode()) { + case IrOpcode::kInt32AddWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop( + this, node, kRiscvAddOvf, cont); + case IrOpcode::kInt32SubWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop( + this, node, kRiscvSubOvf, cont); + case IrOpcode::kInt32MulWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop( + this, node, kRiscvMulOvf32, cont); + case IrOpcode::kInt64AddWithOverflow: + case IrOpcode::kInt64SubWithOverflow: + TRACE_UNIMPL(); + break; + default: + break; + } + } + } + break; + case IrOpcode::kWord32And: + return VisitWordCompare(this, value, kRiscvTst32, cont, true); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); + default: + break; } + } - if (CanCover(user, value)) { - switch (value->opcode()) { - case IrOpcode::kWord32Equal: - cont->OverwriteAndNegateIfEqual(kEqual); - return VisitWordCompare(this, value, cont); - case IrOpcode::kInt32LessThan: - cont->OverwriteAndNegateIfEqual(kSignedLessThan); - return VisitWordCompare(this, value, cont); - case IrOpcode::kInt32LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); - return VisitWordCompare(this, value, cont); - case IrOpcode::kUint32LessThan: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitWordCompare(this, value, cont); - case IrOpcode::kUint32LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + // Continuation could not be combined with a compare, emit compare against + // 0. + EmitWordCompareZero(this, value, cont); +} + +template <> +void InstructionSelectorT::VisitWordCompareZero( + node_t user, node_t value, FlagsContinuation* cont) { + using namespace turboshaft; // NOLINT(build/namespaces) + // Try to combine with comparisons against 0 by simply inverting the branch. + while (const ComparisonOp* equal = + this->TryCast(value)) { + if (!CanCover(user, value)) break; + if (!MatchIntegralZero(equal->right())) break; + + user = value; + value = equal->left(); + cont->Negate(); + } + + const Operation& value_op = Get(value); + if (CanCover(user, value)) { + if (const ComparisonOp* comparison = value_op.TryCast()) { + switch (comparison->rep.value()) { + case RegisterRepresentation::Word32(): + cont->OverwriteAndNegateIfEqual( + GetComparisonFlagCondition(*comparison)); return VisitWordCompare(this, value, cont); - case IrOpcode::kFloat32Equal: - cont->OverwriteAndNegateIfEqual(kEqual); - return VisitFloat32Compare(this, value, cont); - case IrOpcode::kFloat32LessThan: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitFloat32Compare(this, value, cont); - case IrOpcode::kFloat32LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitFloat32Compare(this, value, cont); - case IrOpcode::kFloat64Equal: - cont->OverwriteAndNegateIfEqual(kEqual); - return VisitFloat64Compare(this, value, cont); - case IrOpcode::kFloat64LessThan: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitFloat64Compare(this, value, cont); - case IrOpcode::kFloat64LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitFloat64Compare(this, value, cont); - case IrOpcode::kProjection: - // Check if this is the overflow output projection of an - // WithOverflow node. - if (ProjectionIndexOf(value->op()) == 1u) { - // We cannot combine the WithOverflow with this branch - // unless the 0th projection (the use of the actual value of the - // is either nullptr, which means there's no use of the - // actual value, or was already defined, which means it is scheduled - // *AFTER* this branch). - Node* const node = value->InputAt(0); - Node* const result = NodeProperties::FindProjection(node, 0); - if (result == nullptr || IsDefined(result)) { - switch (node->opcode()) { - case IrOpcode::kInt32AddWithOverflow: + case RegisterRepresentation::Float32(): + switch (comparison->kind) { + case ComparisonOp::Kind::kEqual: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat32Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThan: + cont->OverwriteAndNegateIfEqual(kFloatLessThan); + return VisitFloat32Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual); + return VisitFloat32Compare(this, value, cont); + default: + UNREACHABLE(); + } + case RegisterRepresentation::Float64(): + switch (comparison->kind) { + case ComparisonOp::Kind::kEqual: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat64Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThan: + cont->OverwriteAndNegateIfEqual(kFloatLessThan); + return VisitFloat64Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual); + return VisitFloat64Compare(this, value, cont); + default: + UNREACHABLE(); + } + default: + break; + } + } else if (const ProjectionOp* projection = + value_op.TryCast()) { + // Check if this is the overflow output projection of an + // WithOverflow node. + if (projection->index == 1u) { + // We cannot combine the WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // is either nullptr, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + OpIndex node = projection->input(); + OpIndex result = FindProjection(node, 0); + if (!result.valid() || IsDefined(result)) { + if (const OverflowCheckedBinopOp* binop = + TryCast(node)) { + const bool is64 = binop->rep == WordRepresentation::Word64(); + if (is64) { + TRACE_UNIMPL(); + } else { + switch (binop->kind) { + case OverflowCheckedBinopOp::Kind::kSignedAdd: cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop( + return VisitBinop( this, node, kRiscvAddOvf, cont); - case IrOpcode::kInt32SubWithOverflow: + case OverflowCheckedBinopOp::Kind::kSignedSub: cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop( + return VisitBinop( this, node, kRiscvSubOvf, cont); - case IrOpcode::kInt32MulWithOverflow: + case OverflowCheckedBinopOp::Kind::kSignedMul: cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop( + return VisitBinop( this, node, kRiscvMulOvf32, cont); - case IrOpcode::kInt64AddWithOverflow: - case IrOpcode::kInt64SubWithOverflow: - TRACE_UNIMPL(); - break; - default: - break; } } } - break; - case IrOpcode::kWord32And: - return VisitWordCompare(this, value, kRiscvTst32, cont, true); - case IrOpcode::kStackPointerGreaterThan: - cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); - return VisitStackPointerGreaterThan(value, cont); - default: - break; + } } } - - // Continuation could not be combined with a compare, emit compare against - // 0. - EmitWordCompareZero(this, value, cont); } + + // Continuation could not be combined with a compare, emit compare against + // 0. + EmitWordCompareZero(this, value, cont); } template void InstructionSelectorT::VisitWord32Equal(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); Int32BinopMatcher m(node); if (m.right().Is(0)) { @@ -1173,7 +1405,22 @@ void InstructionSelectorT::VisitWord32Equal(node_t node) { } VisitWordCompare(this, node, &cont); +} + +template <> +void InstructionSelectorT::VisitWord32Equal(node_t node) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& equal = Get(node); + DCHECK(equal.Is()); + OpIndex left = equal.input(0); + OpIndex right = equal.input(1); + OpIndex user = node; + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + + if (MatchZero(right)) { + return VisitWordCompareZero(user, left, &cont); } + VisitWordCompare(this, node, &cont); } template @@ -1305,6 +1552,59 @@ void InstructionSelectorT::VisitWord32AtomicStore(node_t node) { } } +template +void VisitAtomicExchange(InstructionSelectorT* selector, Node* node, + ArchOpcode opcode, AtomicWidth width) { + RiscvOperandGeneratorT g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | + AtomicWidthField::encode(width); + selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); +} + +template +void VisitAtomicCompareExchange(InstructionSelectorT* selector, + Node* node, ArchOpcode opcode, + AtomicWidth width) { + RiscvOperandGeneratorT g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* old_value = node->InputAt(2); + Node* new_value = node->InputAt(3); + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[4]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(old_value); + inputs[input_count++] = g.UseUniqueRegister(new_value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | + AtomicWidthField::encode(width); + selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); +} + template void InstructionSelectorT::VisitWord32AtomicExchange(node_t node) { if constexpr (Adapter::IsTurboshaft) { diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc index 657d6177bad2b2..4807682aaab5c1 100644 --- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc +++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc @@ -310,7 +310,6 @@ void EmitS128Load(InstructionSelectorT* selector, Node* node, RiscvOperandGeneratorT g(selector); Node* base = node->InputAt(0); Node* index = node->InputAt(1); - if (g.CanBeImmediate(index, opcode)) { selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), g.DefineAsRegister(node), g.UseRegister(base), @@ -335,8 +334,11 @@ void InstructionSelectorT::VisitStoreLane(node_t node) { StoreLaneParameters params = StoreLaneParametersOf(node->op()); LoadStoreLaneParams f(params.rep, params.laneidx); InstructionCode opcode = kRiscvS128StoreLane; - opcode |= MiscField::encode(f.sz); - + opcode |= + LaneSizeField::encode(ElementSizeInBytes(params.rep) * kBitsPerByte); + if (params.kind == MemoryAccessKind::kProtected) { + opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } RiscvOperandGeneratorT g(this); Node* base = node->InputAt(0); Node* index = node->InputAt(1); @@ -359,10 +361,16 @@ void InstructionSelectorT::VisitLoadLane(node_t node) { UNIMPLEMENTED(); } else { LoadLaneParameters params = LoadLaneParametersOf(node->op()); + DCHECK(params.rep == MachineType::Int8() || + params.rep == MachineType::Int16() || + params.rep == MachineType::Int32() || + params.rep == MachineType::Int64()); LoadStoreLaneParams f(params.rep.representation(), params.laneidx); InstructionCode opcode = kRiscvS128LoadLane; - opcode |= MiscField::encode(f.sz); - + opcode |= LaneSizeField::encode(params.rep.MemSize() * kBitsPerByte); + if (params.kind == MemoryAccessKind::kProtected) { + opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } RiscvOperandGeneratorT g(this); Node* base = node->InputAt(0); Node* index = node->InputAt(1); @@ -434,6 +442,15 @@ void InstructionSelectorT::VisitLoad(node_t node) { case MachineRepresentation::kNone: UNREACHABLE(); } + bool traps_on_null; + if (load.is_protected(&traps_on_null)) { + if (traps_on_null) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedNullDereference); + } else { + opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + } EmitLoad(this, node, opcode); } @@ -442,6 +459,11 @@ void InstructionSelectorT::VisitStorePair(node_t node) { UNREACHABLE(); } +template +void InstructionSelectorT::VisitProtectedLoad(node_t node) { + VisitLoad(node); +} + template void InstructionSelectorT::VisitStore(typename Adapter::node_t node) { RiscvOperandGeneratorT g(this); @@ -457,8 +479,8 @@ void InstructionSelectorT::VisitStore(typename Adapter::node_t node) { // TODO(riscv): I guess this could be done in a better way. if (write_barrier_kind != kNoWriteBarrier && V8_LIKELY(!v8_flags.disable_write_barriers)) { - DCHECK(CanBeTaggedPointer(rep)); - InstructionOperand inputs[3]; + DCHECK(CanBeTaggedOrCompressedOrIndirectPointer(rep)); + InstructionOperand inputs[4]; size_t input_count = 0; inputs[input_count++] = g.UseUniqueRegister(base); inputs[input_count++] = g.UseUniqueRegister(index); @@ -467,67 +489,87 @@ void InstructionSelectorT::VisitStore(typename Adapter::node_t node) { WriteBarrierKindToRecordWriteMode(write_barrier_kind); InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; size_t const temp_count = arraysize(temps); - InstructionCode code = kArchStoreWithWriteBarrier; + InstructionCode code; + if (rep == MachineRepresentation::kIndirectPointer) { + DCHECK_EQ(write_barrier_kind, kIndirectPointerWriteBarrier); + // In this case we need to add the IndirectPointerTag as additional input. + code = kArchStoreIndirectWithWriteBarrier; + node_t tag = store_view.indirect_pointer_tag(); + inputs[input_count++] = g.UseImmediate(tag); + } else { + code = kArchStoreWithWriteBarrier; + } code |= RecordWriteModeField::encode(record_write_mode); + if (store_view.is_store_trap_on_null()) { + code |= AccessModeField::encode(kMemoryAccessProtectedNullDereference); + } Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); } else { - ArchOpcode opcode; + InstructionCode code; switch (rep) { case MachineRepresentation::kFloat32: - opcode = kRiscvStoreFloat; + code = kRiscvStoreFloat; break; case MachineRepresentation::kFloat64: - opcode = kRiscvStoreDouble; + code = kRiscvStoreDouble; break; case MachineRepresentation::kBit: // Fall through. case MachineRepresentation::kWord8: - opcode = kRiscvSb; + code = kRiscvSb; break; case MachineRepresentation::kWord16: - opcode = kRiscvSh; + code = kRiscvSh; break; case MachineRepresentation::kWord32: - opcode = kRiscvSw; + code = kRiscvSw; break; case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTagged: #ifdef V8_COMPRESS_POINTERS - opcode = kRiscvStoreCompressTagged; + code = kRiscvStoreCompressTagged; break; #endif case MachineRepresentation::kWord64: - opcode = kRiscvSd; + code = kRiscvSd; break; case MachineRepresentation::kSimd128: - opcode = kRiscvRvvSt; + code = kRiscvRvvSt; break; case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressed: #ifdef V8_COMPRESS_POINTERS - opcode = kRiscvStoreCompressTagged; + code = kRiscvStoreCompressTagged; break; #else UNREACHABLE(); #endif case MachineRepresentation::kSandboxedPointer: - opcode = kRiscvStoreEncodeSandboxedPointer; + code = kRiscvStoreEncodeSandboxedPointer; + break; + case MachineRepresentation::kIndirectPointer: + code = kRiscvStoreIndirectPointer; break; case MachineRepresentation::kSimd256: // Fall through. case MachineRepresentation::kMapWord: // Fall through. - case MachineRepresentation::kIndirectPointer: // Fall through. case MachineRepresentation::kNone: UNREACHABLE(); } if (this->is_load_root_register(base)) { - Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(), + Emit(code | AddressingModeField::encode(kMode_Root), g.NoOutput(), g.UseRegisterOrImmediateZero(value), g.UseImmediate(index)); return; } - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + if (store_view.is_store_trap_on_null()) { + code |= AccessModeField::encode(kMemoryAccessProtectedNullDereference); + } else if (store_view.access_kind() == MemoryAccessKind::kProtected) { + code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + + if (g.CanBeImmediate(index, code)) { + Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(), g.UseRegisterOrImmediateZero(value), g.UseRegister(base), g.UseImmediate(index)); } else { @@ -535,12 +577,17 @@ void InstructionSelectorT::VisitStore(typename Adapter::node_t node) { Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg, g.UseRegister(index), g.UseRegister(base)); // Emit desired store opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(), g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0)); } } } +template +void InstructionSelectorT::VisitProtectedStore(node_t node) { + VisitStore(node); +} + template void InstructionSelectorT::VisitWord32And(node_t node) { VisitBinop(this, node, kRiscvAnd32, true, @@ -1830,12 +1877,13 @@ void InstructionSelectorT::VisitUnalignedLoad(node_t node) { if constexpr (Adapter::IsTurboshaft) { UNIMPLEMENTED(); } else { - LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + auto load = this->load_view(node); + LoadRepresentation load_rep = load.loaded_rep(); RiscvOperandGeneratorT g(this); Node* base = node->InputAt(0); Node* index = node->InputAt(1); - ArchOpcode opcode; + InstructionCode opcode = kArchNop; switch (load_rep.representation()) { case MachineRepresentation::kFloat32: opcode = kRiscvULoadFloat; @@ -1871,7 +1919,15 @@ void InstructionSelectorT::VisitUnalignedLoad(node_t node) { case MachineRepresentation::kNone: UNREACHABLE(); } - + bool traps_on_null; + if (load.is_protected(&traps_on_null)) { + if (traps_on_null) { + opcode |= + AccessModeField::encode(kMemoryAccessProtectedNullDereference); + } else { + opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + } if (g.CanBeImmediate(index, opcode)) { Emit(opcode | AddressingModeField::encode(kMode_MRI), g.DefineAsRegister(node), g.UseRegister(base), @@ -2125,6 +2181,10 @@ void VisitAtomicLoad(InstructionSelectorT* selector, UNREACHABLE(); } + if (atomic_load_params.kind() == MemoryAccessKind::kProtected) { + code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + if (g.CanBeImmediate(index, code)) { selector->Emit(code | AddressingModeField::encode(kMode_MRI) | AtomicWidthField::encode(width), @@ -2204,6 +2264,9 @@ void VisitAtomicStore(InstructionSelectorT* selector, Node* node, } code |= AtomicWidthField::encode(width); + if (store_params.kind() == MemoryAccessKind::kProtected) { + code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } if (g.CanBeImmediate(index, code)) { selector->Emit(code | AddressingModeField::encode(kMode_MRI) | AtomicWidthField::encode(width), @@ -2224,7 +2287,8 @@ void VisitAtomicStore(InstructionSelectorT* selector, Node* node, template void VisitAtomicBinop(InstructionSelectorT* selector, Node* node, - ArchOpcode opcode, AtomicWidth width) { + ArchOpcode opcode, AtomicWidth width, + MemoryAccessKind access_kind) { RiscvOperandGeneratorT g(selector); Node* base = node->InputAt(0); Node* index = node->InputAt(1); @@ -2245,6 +2309,9 @@ void VisitAtomicBinop(InstructionSelectorT* selector, Node* node, temps[3] = g.TempRegister(); InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | AtomicWidthField::encode(width); + if (access_kind == MemoryAccessKind::kProtected) { + code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } selector->Emit(code, 1, outputs, input_count, inputs, 4, temps); } @@ -2841,100 +2908,176 @@ void InstructionSelectorT::VisitWord64AtomicStore(node_t node) { } template -void InstructionSelectorT::VisitWord32AtomicExchange(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Int8()) { - opcode = kAtomicExchangeInt8; - } else if (type == MachineType::Uint8()) { - opcode = kAtomicExchangeUint8; - } else if (type == MachineType::Int16()) { - opcode = kAtomicExchangeInt16; - } else if (type == MachineType::Uint16()) { - opcode = kAtomicExchangeUint16; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { - opcode = kAtomicExchangeWord32; - } else { - UNREACHABLE(); - } +void VisitAtomicExchange(InstructionSelectorT* selector, Node* node, + ArchOpcode opcode, AtomicWidth width, + MemoryAccessKind access_kind) { + RiscvOperandGeneratorT g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); - VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32); + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | + AtomicWidthField::encode(width); + if (access_kind == MemoryAccessKind::kProtected) { + code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); } + selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); } template -void InstructionSelectorT::VisitWord64AtomicExchange(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); +void VisitAtomicCompareExchange(InstructionSelectorT* selector, + Node* node, ArchOpcode opcode, + AtomicWidth width, + MemoryAccessKind access_kind) { + RiscvOperandGeneratorT g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* old_value = node->InputAt(2); + Node* new_value = node->InputAt(3); + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[4]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(old_value); + inputs[input_count++] = g.UseUniqueRegister(new_value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | + AtomicWidthField::encode(width); + if (access_kind == MemoryAccessKind::kProtected) { + code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); +} + +template <> +void InstructionSelectorT::VisitWord32AtomicExchange( + node_t node) { + UNIMPLEMENTED(); +} + +template <> +void InstructionSelectorT::VisitWord32AtomicExchange( + Node* node) { + ArchOpcode opcode; + AtomicOpParameters params = AtomicOpParametersOf(node->op()); + if (params.type() == MachineType::Int8()) { + opcode = kAtomicExchangeInt8; + } else if (params.type() == MachineType::Uint8()) { + opcode = kAtomicExchangeUint8; + } else if (params.type() == MachineType::Int16()) { + opcode = kAtomicExchangeInt16; + } else if (params.type() == MachineType::Uint16()) { + opcode = kAtomicExchangeUint16; + } else if (params.type() == MachineType::Int32() || + params.type() == MachineType::Uint32()) { + opcode = kAtomicExchangeWord32; } else { - ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Uint8()) { - opcode = kAtomicExchangeUint8; - } else if (type == MachineType::Uint16()) { - opcode = kAtomicExchangeUint16; - } else if (type == MachineType::Uint32()) { - opcode = kAtomicExchangeWord32; - } else if (type == MachineType::Uint64()) { - opcode = kRiscvWord64AtomicExchangeUint64; - } else { - UNREACHABLE(); - } - VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64); + UNREACHABLE(); } + + VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32, params.kind()); } -template -void InstructionSelectorT::VisitWord32AtomicCompareExchange( +template <> +void InstructionSelectorT::VisitWord64AtomicExchange( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + UNIMPLEMENTED(); +} + +template <> +void InstructionSelectorT::VisitWord64AtomicExchange( + Node* node) { + ArchOpcode opcode; + AtomicOpParameters params = AtomicOpParametersOf(node->op()); + if (params.type() == MachineType::Uint8()) { + opcode = kAtomicExchangeUint8; + } else if (params.type() == MachineType::Uint16()) { + opcode = kAtomicExchangeUint16; + } else if (params.type() == MachineType::Uint32()) { + opcode = kAtomicExchangeWord32; + } else if (params.type() == MachineType::Uint64()) { + opcode = kRiscvWord64AtomicExchangeUint64; } else { - ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Int8()) { - opcode = kAtomicCompareExchangeInt8; - } else if (type == MachineType::Uint8()) { - opcode = kAtomicCompareExchangeUint8; - } else if (type == MachineType::Int16()) { - opcode = kAtomicCompareExchangeInt16; - } else if (type == MachineType::Uint16()) { - opcode = kAtomicCompareExchangeUint16; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { - opcode = kAtomicCompareExchangeWord32; - } else { - UNREACHABLE(); - } + UNREACHABLE(); + } + VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64, params.kind()); +} + +template <> +void InstructionSelectorT::VisitWord32AtomicCompareExchange( + node_t node) { + UNIMPLEMENTED(); +} - VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32); +template <> +void InstructionSelectorT::VisitWord32AtomicCompareExchange( + Node* node) { + ArchOpcode opcode; + AtomicOpParameters params = AtomicOpParametersOf(node->op()); + if (params.type() == MachineType::Int8()) { + opcode = kAtomicCompareExchangeInt8; + } else if (params.type() == MachineType::Uint8()) { + opcode = kAtomicCompareExchangeUint8; + } else if (params.type() == MachineType::Int16()) { + opcode = kAtomicCompareExchangeInt16; + } else if (params.type() == MachineType::Uint16()) { + opcode = kAtomicCompareExchangeUint16; + } else if (params.type() == MachineType::Int32() || + params.type() == MachineType::Uint32()) { + opcode = kAtomicCompareExchangeWord32; + } else { + UNREACHABLE(); } + + VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32, + params.kind()); } -template -void InstructionSelectorT::VisitWord64AtomicCompareExchange( +template <> +void InstructionSelectorT::VisitWord64AtomicCompareExchange( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + UNIMPLEMENTED(); +} + +template <> +void InstructionSelectorT::VisitWord64AtomicCompareExchange( + Node* node) { + ArchOpcode opcode; + AtomicOpParameters params = AtomicOpParametersOf(node->op()); + if (params.type() == MachineType::Uint8()) { + opcode = kAtomicCompareExchangeUint8; + } else if (params.type() == MachineType::Uint16()) { + opcode = kAtomicCompareExchangeUint16; + } else if (params.type() == MachineType::Uint32()) { + opcode = kAtomicCompareExchangeWord32; + } else if (params.type() == MachineType::Uint64()) { + opcode = kRiscvWord64AtomicCompareExchangeUint64; } else { - ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Uint8()) { - opcode = kAtomicCompareExchangeUint8; - } else if (type == MachineType::Uint16()) { - opcode = kAtomicCompareExchangeUint16; - } else if (type == MachineType::Uint32()) { - opcode = kAtomicCompareExchangeWord32; - } else if (type == MachineType::Uint64()) { - opcode = kRiscvWord64AtomicCompareExchangeUint64; - } else { - UNREACHABLE(); - } - VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64); + UNREACHABLE(); } + VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64, + params.kind()); } + template void InstructionSelectorT::VisitWord32AtomicBinaryOperation( node_t node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, @@ -2943,22 +3086,23 @@ void InstructionSelectorT::VisitWord32AtomicBinaryOperation( UNIMPLEMENTED(); } else { ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Int8()) { + AtomicOpParameters params = AtomicOpParametersOf(node->op()); + if (params.type() == MachineType::Int8()) { opcode = int8_op; - } else if (type == MachineType::Uint8()) { + } else if (params.type() == MachineType::Uint8()) { opcode = uint8_op; - } else if (type == MachineType::Int16()) { + } else if (params.type() == MachineType::Int16()) { opcode = int16_op; - } else if (type == MachineType::Uint16()) { + } else if (params.type() == MachineType::Uint16()) { opcode = uint16_op; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + } else if (params.type() == MachineType::Int32() || + params.type() == MachineType::Uint32()) { opcode = word32_op; } else { UNREACHABLE(); } - VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32); + VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32, params.kind()); } } @@ -2989,19 +3133,19 @@ void InstructionSelectorT::VisitWord64AtomicBinaryOperation( UNIMPLEMENTED(); } else { ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Uint8()) { + AtomicOpParameters params = AtomicOpParametersOf(node->op()); + if (params.type() == MachineType::Uint8()) { opcode = uint8_op; - } else if (type == MachineType::Uint16()) { + } else if (params.type() == MachineType::Uint16()) { opcode = uint16_op; - } else if (type == MachineType::Uint32()) { + } else if (params.type() == MachineType::Uint32()) { opcode = uint32_op; - } else if (type == MachineType::Uint64()) { + } else if (params.type() == MachineType::Uint64()) { opcode = uint64_op; } else { UNREACHABLE(); } - VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64); + VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64, params.kind()); } } diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index f22568ad7d3fab..cc85eaa10dc7c9 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -11,7 +11,7 @@ #include "src/compiler/backend/gap-resolver.h" #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #if V8_ENABLE_WEBASSEMBLY #include "src/wasm/wasm-objects.h" @@ -167,36 +167,13 @@ namespace { class OutOfLineRecordWrite final : public OutOfLineCode { public: - OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset, + OutOfLineRecordWrite(CodeGenerator* gen, Register object, MemOperand operand, Register value, Register scratch0, Register scratch1, RecordWriteMode mode, StubCallMode stub_mode, UnwindingInfoWriter* unwinding_info_writer) : OutOfLineCode(gen), object_(object), - offset_(offset), - offset_immediate_(0), - value_(value), - scratch0_(scratch0), - scratch1_(scratch1), - mode_(mode), -#if V8_ENABLE_WEBASSEMBLY - stub_mode_(stub_mode), -#endif // V8_ENABLE_WEBASSEMBLY - must_save_lr_(!gen->frame_access_state()->has_frame()), - unwinding_info_writer_(unwinding_info_writer), - zone_(gen->zone()) { - DCHECK(!AreAliased(object, offset, scratch0, scratch1)); - DCHECK(!AreAliased(value, offset, scratch0, scratch1)); - } - - OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset, - Register value, Register scratch0, Register scratch1, - RecordWriteMode mode, StubCallMode stub_mode, - UnwindingInfoWriter* unwinding_info_writer) - : OutOfLineCode(gen), - object_(object), - offset_(no_reg), - offset_immediate_(offset), + operand_(operand), value_(value), scratch0_(scratch0), scratch1_(scratch1), @@ -207,6 +184,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode { must_save_lr_(!gen->frame_access_state()->has_frame()), unwinding_info_writer_(unwinding_info_writer), zone_(gen->zone()) { + DCHECK(!AreAliased(object, scratch0, scratch1)); + DCHECK(!AreAliased(value, scratch0, scratch1)); } void Generate() final { @@ -216,12 +195,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ CheckPageFlag(value_, scratch0_, MemoryChunk::kPointersToHereAreInterestingMask, eq, exit()); - if (offset_ == no_reg) { - __ AddS64(scratch1_, object_, Operand(offset_immediate_)); - } else { - DCHECK_EQ(0, offset_immediate_); - __ AddS64(scratch1_, object_, offset_); - } + __ lay(scratch1_, operand_); SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() ? SaveFPRegsMode::kSave : SaveFPRegsMode::kIgnore; @@ -249,8 +223,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { private: Register const object_; - Register const offset_; - int32_t const offset_immediate_; // Valid if offset_ == no_reg. + MemOperand const operand_; Register const value_; Register const scratch0_; Register const scratch1_; @@ -273,28 +246,28 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { // unsigned number never less than 0 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64) return CC_NOP; - V8_FALLTHROUGH; + [[fallthrough]]; case kSignedLessThan: return lt; case kUnsignedGreaterThanOrEqual: // unsigned number always greater than or equal 0 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64) return CC_ALWAYS; - V8_FALLTHROUGH; + [[fallthrough]]; case kSignedGreaterThanOrEqual: return ge; case kUnsignedLessThanOrEqual: // unsigned number never less than 0 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64) return CC_EQ; - V8_FALLTHROUGH; + [[fallthrough]]; case kSignedLessThanOrEqual: return le; case kUnsignedGreaterThan: // unsigned number always greater than or equal 0 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64) return ne; - V8_FALLTHROUGH; + [[fallthrough]]; case kSignedGreaterThan: return gt; case kOverflow: @@ -1310,21 +1283,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY + int pc_offset; if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots); + pc_offset = __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters, - set_isolate_data_slots); + pc_offset = __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } - __ bind(&return_location); -#if V8_ENABLE_WEBASSEMBLY - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { - RecordSafepoint(instr->reference_map()); - } -#endif // V8_ENABLE_WEBASSEMBLY + RecordSafepoint(instr->reference_map(), pc_offset); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -1427,11 +1396,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchStoreWithWriteBarrier: { RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode()); + AddressingMode addressing_mode = + AddressingModeField::decode(instr->opcode()); Register object = i.InputRegister(0); - Register value = i.InputRegister(2); + size_t index = 0; + MemOperand operand = i.MemoryOperand(&addressing_mode, &index); + Register value = i.InputRegister(index); Register scratch0 = i.TempRegister(0); Register scratch1 = i.TempRegister(1); - OutOfLineRecordWrite* ool; if (v8_flags.debug_code) { // Checking that |value| is not a cleared weakref: our write barrier @@ -1440,22 +1412,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Check(ne, AbortReason::kOperandIsCleared); } - AddressingMode addressing_mode = - AddressingModeField::decode(instr->opcode()); - if (addressing_mode == kMode_MRI) { - int32_t offset = i.InputInt32(1); - ool = zone()->New( - this, object, offset, value, scratch0, scratch1, mode, - DetermineStubCallMode(), &unwinding_info_writer_); - __ StoreTaggedField(value, MemOperand(object, offset), r0); - } else { - DCHECK_EQ(kMode_MRR, addressing_mode); - Register offset(i.InputRegister(1)); - ool = zone()->New( - this, object, offset, value, scratch0, scratch1, mode, - DetermineStubCallMode(), &unwinding_info_writer_); - __ StoreTaggedField(value, MemOperand(object, offset)); - } + OutOfLineRecordWrite* ool = zone()->New( + this, object, operand, value, scratch0, scratch1, mode, + DetermineStubCallMode(), &unwinding_info_writer_); + __ StoreTaggedField(value, operand); + if (mode > RecordWriteMode::kValueIsPointer) { __ JumpIfSmi(value, ool->exit()); } @@ -3529,6 +3490,15 @@ void CodeGenerator::AssembleConstructFrame() { const int returns = frame()->GetReturnSlotCount(); // Create space for returns. __ AllocateStackSpace(returns * kSystemPointerSize); + + if (!frame()->tagged_slots().IsEmpty()) { + __ mov(kScratchReg, Operand(0)); + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ StoreU64(kScratchReg, MemOperand(fp, offset.offset())); + } + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index feeea3bcb5b0d8..43f6f531f4bb57 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -8,6 +8,7 @@ #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" #include "src/compiler/turboshaft/operations.h" +#include "src/compiler/turboshaft/opmasks.h" #include "src/execution/frame-constants.h" namespace v8 { @@ -81,6 +82,83 @@ OperandModes immediateModeMask = #define MulOperandMode \ (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm) +template +struct BaseWithScaledIndexAndDisplacementMatch { + using node_t = typename Adapter::node_t; + + node_t base = {}; + node_t index = {}; + int scale = 0; + int64_t displacement = 0; + DisplacementMode displacement_mode = kPositiveDisplacement; +}; + +base::Optional> +TryMatchBaseWithScaledIndexAndDisplacement64( + InstructionSelectorT* selector, + turboshaft::OpIndex node) { + using namespace turboshaft; // NOLINT(build/namespaces) + + // The BaseWithIndexAndDisplacementMatcher canonicalizes the order of + // displacements and scale factors that are used as inputs, so instead of + // enumerating all possible patterns by brute force, checking for node + // clusters using the following templates in the following order suffices + // to find all of the interesting cases (S = index * scale, B = base + // input, D = displacement input): + // + // (S + (B + D)) + // (S + (B + B)) + // (S + D) + // (S + B) + // ((S + D) + B) + // ((S + B) + D) + // ((B + D) + B) + // ((B + B) + D) + // (B + D) + // (B + B) + BaseWithScaledIndexAndDisplacementMatch result; + result.displacement_mode = kPositiveDisplacement; + + const Operation& op = selector->Get(node); + if (const LoadOp* load = op.TryCast()) { + result.base = load->base(); + result.index = load->index().value_or_invalid(); + result.scale = load->element_size_log2; + result.displacement = load->offset; + if (load->kind.tagged_base) result.displacement -= kHeapObjectTag; + return result; + } else if (const StoreOp* store = op.TryCast()) { + result.base = store->base(); + result.index = store->index().value_or_invalid(); + result.scale = store->element_size_log2; + result.displacement = store->offset; + if (store->kind.tagged_base) result.displacement -= kHeapObjectTag; + return result; + } else if (op.Is()) { + UNIMPLEMENTED(); +#ifdef V8_ENABLE_WEBASSEMBLY + } else if (const Simd128LaneMemoryOp* lane_op = + op.TryCast()) { + result.base = lane_op->base(); + result.index = lane_op->index(); + result.scale = 0; + result.displacement = 0; + if (lane_op->kind.tagged_base) result.displacement -= kHeapObjectTag; + return result; + } else if (const Simd128LoadTransformOp* load_transform = + op.TryCast()) { + result.base = load_transform->base(); + result.index = load_transform->index(); + DCHECK_EQ(load_transform->offset, 0); + result.scale = 0; + result.displacement = 0; + DCHECK(!load_transform->load_kind.tagged_base); + return result; +#endif // V8_ENABLE_WEBASSEMBLY + } + return base::nullopt; +} + // Adds S390-specific methods for generating operands. template class S390OperandGeneratorT final : public OperandGeneratorT { @@ -90,37 +168,47 @@ class S390OperandGeneratorT final : public OperandGeneratorT { explicit S390OperandGeneratorT(InstructionSelectorT* selector) : super(selector) {} - InstructionOperand UseOperand(Node* node, OperandModes mode) { + InstructionOperand UseOperand(node_t node, OperandModes mode) { if (CanBeImmediate(node, mode)) { return UseImmediate(node); } return UseRegister(node); } - InstructionOperand UseAnyExceptImmediate(Node* node) { - if (NodeProperties::IsConstant(node)) + InstructionOperand UseAnyExceptImmediate(node_t node) { + if (this->is_integer_constant(node)) return UseRegister(node); else return this->Use(node); } - int64_t GetImmediate(Node* node) { - if (node->opcode() == IrOpcode::kInt32Constant) - return OpParameter(node->op()); - else if (node->opcode() == IrOpcode::kInt64Constant) - return OpParameter(node->op()); - else - UNIMPLEMENTED(); + int64_t GetImmediate(node_t node) { + if constexpr (Adapter::IsTurboshaft) { + turboshaft::ConstantOp* op = + this->turboshaft_graph() + ->Get(node) + .template TryCast(); + switch (op->kind) { + case turboshaft::ConstantOp::Kind::kWord32: + return op->word32(); + case turboshaft::ConstantOp::Kind::kWord64: + return op->word64(); + default: + UNIMPLEMENTED(); + } + } else { + if (node->opcode() == IrOpcode::kInt32Constant) + return OpParameter(node->op()); + else if (node->opcode() == IrOpcode::kInt64Constant) + return OpParameter(node->op()); + else + UNIMPLEMENTED(); + } } - bool CanBeImmediate(Node* node, OperandModes mode) { - int64_t value; - if (node->opcode() == IrOpcode::kInt32Constant) - value = OpParameter(node->op()); - else if (node->opcode() == IrOpcode::kInt64Constant) - value = OpParameter(node->op()); - else - return false; + bool CanBeImmediate(node_t node, OperandModes mode) { + if (!selector()->is_integer_constant(node)) return false; + int64_t value = selector()->integer_constant(node); return CanBeImmediate(value, mode); } @@ -143,20 +231,16 @@ class S390OperandGeneratorT final : public OperandGeneratorT { return false; } - bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input, + bool CanBeMemoryOperand(InstructionCode opcode, node_t user, node_t input, int effect_level) { - if ((input->opcode() != IrOpcode::kLoad && - input->opcode() != IrOpcode::kLoadImmutable) || - !selector()->CanCover(user, input)) { - return false; - } - + if (!this->IsLoadOrLoadImmutable(input)) return false; + if (!selector()->CanCover(user, input)) return false; if (effect_level != selector()->GetEffectLevel(input)) { return false; } MachineRepresentation rep = - LoadRepresentationOf(input->op()).representation(); + this->load_view(input).loaded_rep().representation(); switch (opcode) { case kS390_Cmp64: case kS390_LoadAndTestWord64: @@ -172,41 +256,41 @@ class S390OperandGeneratorT final : public OperandGeneratorT { return false; } - AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base, - Node* displacement, - DisplacementMode displacement_mode, - InstructionOperand inputs[], - size_t* input_count) { + AddressingMode GenerateMemoryOperandInputs( + optional_node_t index, node_t base, int64_t displacement, + DisplacementMode displacement_mode, InstructionOperand inputs[], + size_t* input_count, + RegisterUseKind reg_kind = RegisterUseKind::kUseRegister) { AddressingMode mode = kMode_MRI; - if (base != nullptr) { - inputs[(*input_count)++] = UseRegister(base); - if (index != nullptr) { - inputs[(*input_count)++] = UseRegister(index); - if (displacement != nullptr) { - inputs[(*input_count)++] = displacement_mode - ? UseNegatedImmediate(displacement) - : UseImmediate(displacement); + if (this->valid(base)) { + inputs[(*input_count)++] = UseRegister(base, reg_kind); + if (this->valid(index)) { + inputs[(*input_count)++] = UseRegister(this->value(index), reg_kind); + if (displacement != 0) { + inputs[(*input_count)++] = UseImmediate( + displacement_mode == kNegativeDisplacement ? -displacement + : displacement); mode = kMode_MRRI; } else { mode = kMode_MRR; } } else { - if (displacement == nullptr) { + if (displacement == 0) { mode = kMode_MR; } else { - inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement - ? UseNegatedImmediate(displacement) - : UseImmediate(displacement); + inputs[(*input_count)++] = UseImmediate( + displacement_mode == kNegativeDisplacement ? -displacement + : displacement); mode = kMode_MRI; } } } else { - DCHECK_NOT_NULL(index); - inputs[(*input_count)++] = UseRegister(index); - if (displacement != nullptr) { - inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement - ? UseNegatedImmediate(displacement) - : UseImmediate(displacement); + DCHECK(this->valid(index)); + inputs[(*input_count)++] = UseRegister(this->value(index), reg_kind); + if (displacement != 0) { + inputs[(*input_count)++] = UseImmediate( + displacement_mode == kNegativeDisplacement ? -displacement + : displacement); mode = kMode_MRI; } else { mode = kMode_MR; @@ -215,9 +299,59 @@ class S390OperandGeneratorT final : public OperandGeneratorT { return mode; } + AddressingMode GenerateMemoryOperandInputs( + Node* index, Node* base, Node* displacement, + DisplacementMode displacement_mode, InstructionOperand inputs[], + size_t* input_count, + RegisterUseKind reg_kind = RegisterUseKind::kUseRegister) { + if constexpr (Adapter::IsTurboshaft) { + // Turboshaft is not using this overload. + UNREACHABLE(); + } else { + int64_t displacement_value; + if (displacement == nullptr) { + displacement_value = 0; + } else if (displacement->opcode() == IrOpcode::kInt32Constant) { + displacement_value = OpParameter(displacement->op()); + } else if (displacement->opcode() == IrOpcode::kInt64Constant) { + displacement_value = OpParameter(displacement->op()); + } else { + UNREACHABLE(); + } + return GenerateMemoryOperandInputs(index, base, displacement_value, + displacement_mode, inputs, + input_count); + } + } + AddressingMode GetEffectiveAddressMemoryOperand( - Node* operand, InstructionOperand inputs[], size_t* input_count, + typename Adapter::node_t operand, InstructionOperand inputs[], + size_t* input_count, OperandModes immediate_mode = OperandMode::kInt20Imm) { + if constexpr (Adapter::IsTurboshaft) { + auto m = + TryMatchBaseWithScaledIndexAndDisplacement64(selector(), operand); + DCHECK(m.has_value()); + if (TurboshaftAdapter::valid(m->base) && + this->Get(m->base).template Is()) { + DCHECK(!this->valid(m->index)); + DCHECK_EQ(m->scale, 0); + inputs[(*input_count)++] = + UseImmediate(static_cast(m->displacement)); + return kMode_Root; + } else if (CanBeImmediate(m->displacement, immediate_mode)) { + DCHECK_EQ(m->scale, 0); + return GenerateMemoryOperandInputs(m->index, m->base, m->displacement, + m->displacement_mode, inputs, + input_count); + } else { + DCHECK_EQ(m->displacement, 0); + inputs[(*input_count)++] = UseRegister(m->base); + inputs[(*input_count)++] = UseRegister(m->index); + return kMode_MRR; + } + + } else { #if V8_TARGET_ARCH_S390X BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowInputSwap); @@ -243,9 +377,10 @@ class S390OperandGeneratorT final : public OperandGeneratorT { inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); return kMode_MRR; } + } } - bool CanBeBetterLeftOperand(Node* node) const { + bool CanBeBetterLeftOperand(node_t node) const { return !selector()->IsLive(node); } @@ -380,7 +515,42 @@ ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) { V(Word32Shr) \ V(Word32Sar) -bool ProduceWord32Result(Node* node) { +template +bool ProduceWord32Result(InstructionSelectorT* selector, + typename Adapter::node_t node) { + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& op = selector->Get(node); + switch (op.opcode) { + case Opcode::kWordBinop: + return op.Cast().rep == WordRepresentation::Word32(); + case Opcode::kWordUnary: + return op.Cast().rep == WordRepresentation::Word32(); + case Opcode::kShift: + return op.Cast().rep == WordRepresentation::Word32(); + case Opcode::kOverflowCheckedBinop: + return op.Cast().rep == + WordRepresentation::Word32(); + case Opcode::kLoad: { + LoadRepresentation load_rep = selector->load_view(node).loaded_rep(); + MachineRepresentation rep = load_rep.representation(); + switch (rep) { + case MachineRepresentation::kWord32: + return true; + case MachineRepresentation::kWord8: + if (load_rep.IsSigned()) + return false; + else + return true; + default: + return false; + } + } + default: + return false; + } + + } else { #if !V8_TARGET_ARCH_S390X return true; #else @@ -430,11 +600,14 @@ bool ProduceWord32Result(Node* node) { return false; } #endif + } } -static inline bool DoZeroExtForResult(Node* node) { +template +static inline bool DoZeroExtForResult(InstructionSelectorT* selector, + typename Adapter::node_t node) { #if V8_TARGET_ARCH_S390X - return ProduceWord32Result(node); + return ProduceWord32Result(selector, node); #else return false; #endif @@ -464,10 +637,58 @@ void VisitTryTruncateDouble(InstructionSelectorT* selector, } #endif +template +void GenerateRightOperands(InstructionSelectorT* selector, + typename TurboshaftAdapter::node_t node, + typename TurboshaftAdapter::node_t right, + InstructionCode* opcode, OperandModes* operand_mode, + InstructionOperand* inputs, size_t* input_count, + CanCombineWithLoad canCombineWithLoad) { + using namespace turboshaft; // NOLINT(build/namespaces) + S390OperandGeneratorT g(selector); + + if ((*operand_mode & OperandMode::kAllowImmediate) && + g.CanBeImmediate(right, *operand_mode)) { + inputs[(*input_count)++] = g.UseImmediate(right); + // Can only be RI or RRI + *operand_mode &= OperandMode::kAllowImmediate; + } else if (*operand_mode & OperandMode::kAllowMemoryOperand) { + const Operation& right_op = selector->Get(right); + if (right_op.Is() && selector->CanCover(node, right) && + canCombineWithLoad( + SelectLoadOpcode(selector->load_view(right).loaded_rep()))) { + AddressingMode mode = g.GetEffectiveAddressMemoryOperand( + right, inputs, input_count, OpcodeImmMode(*opcode)); + *opcode |= AddressingModeField::encode(mode); + *operand_mode &= ~OperandMode::kAllowImmediate; + if (*operand_mode & OperandMode::kAllowRM) + *operand_mode &= ~OperandMode::kAllowDistinctOps; + } else if (*operand_mode & OperandMode::kAllowRM) { + DCHECK(!(*operand_mode & OperandMode::kAllowRRM)); + inputs[(*input_count)++] = g.UseAnyExceptImmediate(right); + // Can not be Immediate + *operand_mode &= + ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps; + } else if (*operand_mode & OperandMode::kAllowRRM) { + DCHECK(!(*operand_mode & OperandMode::kAllowRM)); + inputs[(*input_count)++] = g.UseAnyExceptImmediate(right); + // Can not be Immediate + *operand_mode &= ~OperandMode::kAllowImmediate; + } else { + UNREACHABLE(); + } + } else { + inputs[(*input_count)++] = g.UseRegister(right); + // Can only be RR or RRR + *operand_mode &= OperandMode::kAllowRRR; + } +} + template -void GenerateRightOperands(InstructionSelectorT* selector, Node* node, - Node* right, InstructionCode* opcode, - OperandModes* operand_mode, +void GenerateRightOperands(InstructionSelectorT* selector, + typename Adapter::node_t node, + typename Adapter::node_t right, + InstructionCode* opcode, OperandModes* operand_mode, InstructionOperand* inputs, size_t* input_count, CanCombineWithLoad canCombineWithLoad) { S390OperandGeneratorT g(selector); @@ -510,9 +731,11 @@ void GenerateRightOperands(InstructionSelectorT* selector, Node* node, } template -void GenerateBinOpOperands(InstructionSelectorT* selector, Node* node, - Node* left, Node* right, InstructionCode* opcode, - OperandModes* operand_mode, +void GenerateBinOpOperands(InstructionSelectorT* selector, + typename Adapter::node_t node, + typename Adapter::node_t left, + typename Adapter::node_t right, + InstructionCode* opcode, OperandModes* operand_mode, InstructionOperand* inputs, size_t* input_count, CanCombineWithLoad canCombineWithLoad) { S390OperandGeneratorT g(selector); @@ -531,15 +754,15 @@ void GenerateBinOpOperands(InstructionSelectorT* selector, Node* node, } template -void VisitUnaryOp(InstructionSelectorT* selector, Node* node, - InstructionCode opcode, OperandModes operand_mode, - FlagsContinuationT* cont, +void VisitUnaryOp(InstructionSelectorT* selector, + typename Adapter::node_t node, InstructionCode opcode, + OperandModes operand_mode, FlagsContinuationT* cont, CanCombineWithLoad canCombineWithLoad); template -void VisitBinOp(InstructionSelectorT* selector, Node* node, - InstructionCode opcode, OperandModes operand_mode, - FlagsContinuationT* cont, +void VisitBinOp(InstructionSelectorT* selector, + typename Adapter::node_t node, InstructionCode opcode, + OperandModes operand_mode, FlagsContinuationT* cont, CanCombineWithLoad canCombineWithLoad); // Generate The following variations: @@ -572,21 +795,21 @@ void VisitBinOp(InstructionSelectorT* selector, Node* node, #define VISIT_OP_LIST VISIT_OP_LIST_32 #endif -#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \ - template \ - static inline void Visit##type1##type2##Op( \ - InstructionSelectorT* selector, Node* node, \ - InstructionCode opcode, OperandModes operand_mode, \ - FlagsContinuationT* cont) { \ - Visit##type2##Op(selector, node, opcode, operand_mode, cont, \ - canCombineWithLoad); \ - } \ - template \ - static inline void Visit##type1##type2##Op( \ - InstructionSelectorT* selector, Node* node, \ - InstructionCode opcode, OperandModes operand_mode) { \ - FlagsContinuationT cont; \ - Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \ +#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \ + template \ + static inline void Visit##type1##type2##Op( \ + InstructionSelectorT* selector, typename Adapter::node_t node, \ + InstructionCode opcode, OperandModes operand_mode, \ + FlagsContinuationT* cont) { \ + Visit##type2##Op(selector, node, opcode, operand_mode, cont, \ + canCombineWithLoad); \ + } \ + template \ + static inline void Visit##type1##type2##Op( \ + InstructionSelectorT* selector, typename Adapter::node_t node, \ + InstructionCode opcode, OperandModes operand_mode) { \ + FlagsContinuationT cont; \ + Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \ } VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS) #undef DECLARE_VISIT_HELPER_FUNCTIONS @@ -594,23 +817,25 @@ VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS) #undef VISIT_OP_LIST template -void VisitUnaryOp(InstructionSelectorT* selector, Node* node, - InstructionCode opcode, OperandModes operand_mode, - FlagsContinuationT* cont, +void VisitUnaryOp(InstructionSelectorT* selector, + typename Adapter::node_t node, InstructionCode opcode, + OperandModes operand_mode, FlagsContinuationT* cont, CanCombineWithLoad canCombineWithLoad) { + using namespace turboshaft; // NOLINT(build/namespaces) + using node_t = typename Adapter::node_t; S390OperandGeneratorT g(selector); InstructionOperand inputs[8]; size_t input_count = 0; InstructionOperand outputs[2]; size_t output_count = 0; - Node* input = node->InputAt(0); + node_t input = selector->input_at(node, 0); GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs, &input_count, canCombineWithLoad); - bool input_is_word32 = ProduceWord32Result(input); + bool input_is_word32 = ProduceWord32Result(selector, input); - bool doZeroExt = DoZeroExtForResult(node); + bool doZeroExt = DoZeroExtForResult(selector, node); bool canEliminateZeroExt = input_is_word32; if (doZeroExt) { @@ -642,31 +867,43 @@ void VisitUnaryOp(InstructionSelectorT* selector, Node* node, } template -void VisitBinOp(InstructionSelectorT* selector, Node* node, - InstructionCode opcode, OperandModes operand_mode, - FlagsContinuationT* cont, +void VisitBinOp(InstructionSelectorT* selector, + typename Adapter::node_t node, InstructionCode opcode, + OperandModes operand_mode, FlagsContinuationT* cont, CanCombineWithLoad canCombineWithLoad) { + using namespace turboshaft; // NOLINT(build/namespaces) + using node_t = typename Adapter::node_t; S390OperandGeneratorT g(selector); - Int32BinopMatcher m(node); - Node* left = m.left().node(); - Node* right = m.right().node(); + node_t left = selector->input_at(node, 0); + node_t right = selector->input_at(node, 1); InstructionOperand inputs[8]; size_t input_count = 0; InstructionOperand outputs[2]; size_t output_count = 0; - if (node->op()->HasProperty(Operator::kCommutative) && - !g.CanBeImmediate(right, operand_mode) && - (g.CanBeBetterLeftOperand(right))) { - std::swap(left, right); + if constexpr (Adapter::IsTurboshaft) { + const Operation& op = selector->Get(node); + if (op.TryCast() && + WordBinopOp::IsCommutative( + selector->Get(node).template Cast().kind) && + !g.CanBeImmediate(right, operand_mode) && + (g.CanBeBetterLeftOperand(right))) { + std::swap(left, right); + } + } else { + if (node->op()->HasProperty(Operator::kCommutative) && + !g.CanBeImmediate(right, operand_mode) && + (g.CanBeBetterLeftOperand(right))) { + std::swap(left, right); + } } GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode, inputs, &input_count, canCombineWithLoad); - bool left_is_word32 = ProduceWord32Result(left); + bool left_is_word32 = ProduceWord32Result(selector, left); - bool doZeroExt = DoZeroExtForResult(node); + bool doZeroExt = DoZeroExtForResult(selector, node); bool canEliminateZeroExt = left_is_word32; if (doZeroExt) { @@ -702,16 +939,13 @@ void VisitBinOp(InstructionSelectorT* selector, Node* node, template void InstructionSelectorT::VisitStackSlot(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); - OperandGenerator g(this); + StackSlotRepresentation rep = this->stack_slot_representation_of(node); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); + OperandGenerator g(this); - Emit(kArchStackSlot, g.DefineAsRegister(node), - sequence()->AddImmediate(Constant(slot)), 0, nullptr); - } + Emit(kArchStackSlot, g.DefineAsRegister(node), + sequence()->AddImmediate(Constant(slot)), 0, nullptr); } template @@ -727,9 +961,6 @@ void InstructionSelectorT::VisitAbortCSADcheck(node_t node) { template void InstructionSelectorT::VisitLoad(node_t node, node_t value, InstructionCode opcode) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); InstructionOperand outputs[] = {g.DefineAsRegister(node)}; InstructionOperand inputs[3]; @@ -738,7 +969,6 @@ void InstructionSelectorT::VisitLoad(node_t node, node_t value, g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count); opcode |= AddressingModeField::encode(mode); Emit(opcode, 1, outputs, input_count, inputs); - } } template @@ -757,29 +987,32 @@ void InstructionSelectorT::VisitProtectedLoad(node_t node) { template static void VisitGeneralStore( - InstructionSelectorT* selector, Node* node, + InstructionSelectorT* selector, typename Adapter::node_t node, MachineRepresentation rep, WriteBarrierKind write_barrier_kind = kNoWriteBarrier) { + using node_t = typename Adapter::node_t; + using optional_node_t = typename Adapter::optional_node_t; S390OperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* offset = node->InputAt(1); - Node* value = node->InputAt(2); + + auto store_view = selector->store_view(node); + DCHECK_EQ(store_view.element_size_log2(), 0); + + node_t base = store_view.base(); + optional_node_t index = store_view.index(); + node_t value = store_view.value(); + int32_t displacement = store_view.displacement(); + if (write_barrier_kind != kNoWriteBarrier && !v8_flags.disable_write_barriers) { DCHECK(CanBeTaggedOrCompressedPointer(rep)); AddressingMode addressing_mode; - InstructionOperand inputs[3]; + InstructionOperand inputs[4]; size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - // OutOfLineRecordWrite uses the offset in an 'AddS64' instruction as well - // as for the store itself, so we must check compatibility with both. - if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) { - inputs[input_count++] = g.UseImmediate(offset); - addressing_mode = kMode_MRI; - } else { - inputs[input_count++] = g.UseUniqueRegister(offset); - addressing_mode = kMode_MRR; - } + addressing_mode = g.GenerateMemoryOperandInputs( + index, base, displacement, DisplacementMode::kPositiveDisplacement, + inputs, &input_count, + S390OperandGeneratorT::RegisterUseKind::kUseUniqueRegister); + DCHECK_LT(input_count, 4); inputs[input_count++] = g.UseUniqueRegister(value); RecordWriteMode record_write_mode = WriteBarrierKindToRecordWriteMode(write_barrier_kind); @@ -791,7 +1024,6 @@ static void VisitGeneralStore( selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); } else { ArchOpcode opcode; - NodeMatcher m(value); switch (rep) { case MachineRepresentation::kFloat32: opcode = kS390_StoreFloat32; @@ -806,13 +1038,23 @@ static void VisitGeneralStore( case MachineRepresentation::kWord16: opcode = kS390_StoreWord16; break; - case MachineRepresentation::kWord32: + case MachineRepresentation::kWord32: { opcode = kS390_StoreWord32; - if (m.IsWord32ReverseBytes()) { + bool is_w32_reverse_bytes = false; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& reverse_op = selector->Get(value); + is_w32_reverse_bytes = reverse_op.Is(); + } else { + NodeMatcher m(value); + is_w32_reverse_bytes = m.IsWord32ReverseBytes(); + } + if (is_w32_reverse_bytes) { opcode = kS390_StoreReverse32; - value = value->InputAt(0); + value = selector->input_at(value, 0); } break; + } case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressed: case MachineRepresentation::kIndirectPointer: // Fall through. @@ -828,20 +1070,36 @@ static void VisitGeneralStore( case MachineRepresentation::kTagged: opcode = kS390_StoreCompressTagged; break; - case MachineRepresentation::kWord64: + case MachineRepresentation::kWord64: { opcode = kS390_StoreWord64; - if (m.IsWord64ReverseBytes()) { + bool is_w64_reverse_bytes = false; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& reverse_op = selector->Get(value); + is_w64_reverse_bytes = reverse_op.Is(); + } else { + NodeMatcher m(value); + is_w64_reverse_bytes = m.IsWord64ReverseBytes(); + } + if (is_w64_reverse_bytes) { opcode = kS390_StoreReverse64; - value = value->InputAt(0); + value = selector->input_at(value, 0); } break; - case MachineRepresentation::kSimd128: + } + case MachineRepresentation::kSimd128: { opcode = kS390_StoreSimd128; - if (m.IsSimd128ReverseBytes()) { - opcode = kS390_StoreReverseSimd128; - value = value->InputAt(0); + if constexpr (Adapter::IsTurboshaft) { + UNIMPLEMENTED(); + } else { + NodeMatcher m(value); + if (m.IsSimd128ReverseBytes()) { + opcode = kS390_StoreReverseSimd128; + value = value->InputAt(0); + } } break; + } case MachineRepresentation::kSimd256: // Fall through. case MachineRepresentation::kMapWord: // Fall through. case MachineRepresentation::kNone: @@ -867,20 +1125,16 @@ void InstructionSelectorT::VisitStorePair(node_t node) { template void InstructionSelectorT::VisitStore(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - StoreRepresentation store_rep = StoreRepresentationOf(node->op()); - WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); - MachineRepresentation rep = store_rep.representation(); + StoreRepresentation store_rep = this->store_view(node).stored_rep(); + WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); + MachineRepresentation rep = store_rep.representation(); - if (v8_flags.enable_unconditional_write_barriers && - CanBeTaggedOrCompressedPointer(rep)) { - write_barrier_kind = kFullWriteBarrier; - } + if (v8_flags.enable_unconditional_write_barriers && + CanBeTaggedOrCompressedPointer(rep)) { + write_barrier_kind = kFullWriteBarrier; + } VisitGeneralStore(this, node, rep, write_barrier_kind); - } } template @@ -969,11 +1223,68 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) { #endif #if V8_TARGET_ARCH_S390X +template <> +void InstructionSelectorT::VisitWord64And(node_t node) { + using namespace turboshaft; // NOLINT(build/namespaces) + S390OperandGeneratorT g(this); + + const WordBinopOp& bitwise_and = Get(node).Cast(); + int mb = 0; + int me = 0; + if (is_integer_constant(bitwise_and.right()) && + IsContiguousMask64(integer_constant(bitwise_and.right()), &mb, &me)) { + int sh = 0; + node_t left = bitwise_and.left(); + const Operation& lhs = Get(left); + if ((lhs.Is() || + lhs.Is()) && + CanCover(node, left)) { + // Try to absorb left/right shift into rldic + int64_t shift_by; + const ShiftOp& shift_op = lhs.Cast(); + if (MatchIntegralWord64Constant(shift_op.right(), &shift_by) && + base::IsInRange(shift_by, 0, 63)) { + left = shift_op.left(); + sh = integer_constant(shift_op.right()); + if (lhs.Is()) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 63 - sh) mb = 63 - sh; + sh = (64 - sh) & 0x3F; + } else { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; + } + } + } + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kS390_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kS390_RotLeftAndClearRight64; + mask = me; + } else if (sh && me <= sh && lhs.Is()) { + match = true; + opcode = kS390_RotLeftAndClear64; + mask = mb; + } + if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { + Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left), + g.TempImmediate(sh), g.TempImmediate(mask)); + return; + } + } + } + VisitWord64BinOp(this, node, kS390_And64, And64OperandMode); +} + template void InstructionSelectorT::VisitWord64And(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); Int64BinopMatcher m(node); int mb = 0; @@ -1023,14 +1334,94 @@ void InstructionSelectorT::VisitWord64And(node_t node) { } } VisitWord64BinOp(this, node, kS390_And64, And64OperandMode); +} + +template <> +Node* InstructionSelectorT::FindProjection( + Node* node, size_t projection_index) { + return NodeProperties::FindProjection(node, projection_index); +} + +template <> +TurboshaftAdapter::node_t +InstructionSelectorT::FindProjection( + node_t node, size_t projection_index) { + using namespace turboshaft; // NOLINT(build/namespaces) + const turboshaft::Graph* graph = this->turboshaft_graph(); + // Projections are always emitted right after the operation. + for (OpIndex next = graph->NextIndex(node); next.valid(); + next = graph->NextIndex(next)) { + const ProjectionOp* projection = graph->Get(next).TryCast(); + if (projection == nullptr) break; + if (projection->index == projection_index) return next; + } + + // If there is no Projection with index {projection_index} following the + // operation, then there shouldn't be any such Projection in the graph. We + // verify this in Debug mode. +#ifdef DEBUG + for (turboshaft::OpIndex use : turboshaft_uses(node)) { + if (const turboshaft::ProjectionOp* projection = + this->Get(use).TryCast()) { + DCHECK_EQ(projection->input(), node); + if (projection->index == projection_index) { + UNREACHABLE(); + } + } } +#endif // DEBUG + return turboshaft::OpIndex::Invalid(); +} + +template <> +void InstructionSelectorT::VisitWord64Shl(node_t node) { + S390OperandGeneratorT g(this); + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& shl = this->Get(node).template Cast(); + const Operation& lhs = this->Get(shl.left()); + if (lhs.Is() && + this->is_integer_constant(shl.right()) && + base::IsInRange(this->integer_constant(shl.right()), 0, 63)) { + int sh = this->integer_constant(shl.right()); + int mb; + int me; + const WordBinopOp& bitwise_and = lhs.Cast(); + if (this->is_integer_constant(bitwise_and.right()) && + IsContiguousMask64(this->integer_constant(bitwise_and.right()) << sh, + &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kS390_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kS390_RotLeftAndClearRight64; + mask = me; + } else if (sh && me <= sh) { + match = true; + opcode = kS390_RotLeftAndClear64; + mask = mb; + } + if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(bitwise_and.left()), g.TempImmediate(sh), + g.TempImmediate(mask)); + return; + } + } + } + } + VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode); } template void InstructionSelectorT::VisitWord64Shl(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); Int64BinopMatcher m(node); // TODO(mbrandy): eliminate left sign extension if right >= 32 @@ -1070,14 +1461,56 @@ void InstructionSelectorT::VisitWord64Shl(node_t node) { } } VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode); +} + +template <> +void InstructionSelectorT::VisitWord64Shr(node_t node) { + S390OperandGeneratorT g(this); + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& shr = this->Get(node).template Cast(); + const Operation& lhs = this->Get(shr.left()); + if (lhs.Is() && + this->is_integer_constant(shr.right()) && + base::IsInRange(this->integer_constant(shr.right()), 0, 63)) { + int sh = this->integer_constant(shr.right()); + int mb; + int me; + const WordBinopOp& bitwise_and = lhs.Cast(); + if (this->is_integer_constant(bitwise_and.right()) && + IsContiguousMask64( + static_cast(this->integer_constant(bitwise_and.right()) >> + sh), + &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 63 - sh) mb = 63 - sh; + sh = (64 - sh) & 0x3F; + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kS390_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kS390_RotLeftAndClearRight64; + mask = me; + } + if (match) { + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(bitwise_and.left()), g.TempImmediate(sh), + g.TempImmediate(mask)); + return; + } + } + } } + VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode); } template void InstructionSelectorT::VisitWord64Shr(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); Int64BinopMatcher m(node); if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { @@ -1114,19 +1547,58 @@ void InstructionSelectorT::VisitWord64Shr(node_t node) { } } VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode); - } } #endif +static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar( + InstructionSelectorT* selector, + typename TurboshaftAdapter::node_t node) { + S390OperandGeneratorT g(selector); + + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& sar = selector->Get(node).template Cast(); + const Operation& lhs = selector->Get(sar.left()); + if (selector->CanCover(node, sar.left()) && + lhs.Is()) { + const ShiftOp& shl = lhs.Cast(); + if (selector->is_integer_constant(sar.right()) && + selector->is_integer_constant(shl.right())) { + uint32_t sar_by = selector->integer_constant(sar.right()); + uint32_t shl_by = selector->integer_constant(shl.right()); + if ((sar_by == shl_by) && (sar_by == 16)) { + bool canEliminateZeroExt = + ProduceWord32Result(selector, shl.left()); + selector->Emit(kS390_SignExtendWord16ToInt32, + canEliminateZeroExt ? g.DefineSameAsFirst(node) + : g.DefineAsRegister(node), + g.UseRegister(shl.left()), + g.TempImmediate(!canEliminateZeroExt)); + return true; + } else if ((sar_by == shl_by) && (sar_by == 24)) { + bool canEliminateZeroExt = + ProduceWord32Result(selector, shl.left()); + selector->Emit(kS390_SignExtendWord8ToInt32, + canEliminateZeroExt ? g.DefineSameAsFirst(node) + : g.DefineAsRegister(node), + g.UseRegister(shl.left()), + g.TempImmediate(!canEliminateZeroExt)); + return true; + } + } + } + return false; +} + template static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar( - InstructionSelectorT* selector, Node* node) { + InstructionSelectorT* selector, typename Adapter::node_t node) { S390OperandGeneratorT g(selector); Int32BinopMatcher m(node); if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) { Int32BinopMatcher mleft(m.left().node()); if (mleft.right().Is(16) && m.right().Is(16)) { - bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node()); + bool canEliminateZeroExt = + ProduceWord32Result(selector, mleft.left().node()); selector->Emit(kS390_SignExtendWord16ToInt32, canEliminateZeroExt ? g.DefineSameAsFirst(node) : g.DefineAsRegister(node), @@ -1134,7 +1606,8 @@ static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar( g.TempImmediate(!canEliminateZeroExt)); return true; } else if (mleft.right().Is(24) && m.right().Is(24)) { - bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node()); + bool canEliminateZeroExt = + ProduceWord32Result(selector, mleft.left().node()); selector->Emit(kS390_SignExtendWord8ToInt32, canEliminateZeroExt ? g.DefineSameAsFirst(node) : g.DefineAsRegister(node), @@ -1272,60 +1745,120 @@ void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { template static inline bool TryMatchNegFromSub(InstructionSelectorT* selector, - Node* node) { + typename Adapter::node_t node) { S390OperandGeneratorT g(selector); - Matcher m(node); - static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64, - "Provided opcode is not a Neg opcode."); - if (m.left().Is(0)) { - Node* value = m.right().node(); - bool doZeroExt = DoZeroExtForResult(node); - bool canEliminateZeroExt = ProduceWord32Result(value); - if (doZeroExt) { - selector->Emit(neg_opcode, - canEliminateZeroExt ? g.DefineSameAsFirst(node) - : g.DefineAsRegister(node), - g.UseRegister(value), - g.TempImmediate(!canEliminateZeroExt)); - } else { - selector->Emit(neg_opcode, g.DefineAsRegister(node), - g.UseRegister(value)); + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64, + "Provided opcode is not a Neg opcode."); + const WordBinopOp& sub_op = + selector->Get(node).template Cast(); + if (selector->MatchIntegralZero(sub_op.left())) { + typename Adapter::node_t value = sub_op.right(); + bool doZeroExt = DoZeroExtForResult(selector, node); + bool canEliminateZeroExt = ProduceWord32Result(selector, value); + if (doZeroExt) { + selector->Emit(neg_opcode, + canEliminateZeroExt ? g.DefineSameAsFirst(node) + : g.DefineAsRegister(node), + g.UseRegister(value), + g.TempImmediate(!canEliminateZeroExt)); + } else { + selector->Emit(neg_opcode, g.DefineAsRegister(node), + g.UseRegister(value)); + } + return true; } - return true; + return false; + + } else { + Matcher m(node); + static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64, + "Provided opcode is not a Neg opcode."); + if (m.left().Is(0)) { + Node* value = m.right().node(); + bool doZeroExt = DoZeroExtForResult(selector, node); + bool canEliminateZeroExt = ProduceWord32Result(selector, value); + if (doZeroExt) { + selector->Emit(neg_opcode, + canEliminateZeroExt ? g.DefineSameAsFirst(node) + : g.DefineAsRegister(node), + g.UseRegister(value), + g.TempImmediate(!canEliminateZeroExt)); + } else { + selector->Emit(neg_opcode, g.DefineAsRegister(node), + g.UseRegister(value)); + } + return true; + } + return false; } - return false; } template -bool TryMatchShiftFromMul(InstructionSelectorT* selector, Node* node) { +bool TryMatchShiftFromMul(InstructionSelectorT* selector, + typename Adapter::node_t node) { S390OperandGeneratorT g(selector); - Matcher m(node); - Node* left = m.left().node(); - Node* right = m.right().node(); - if (g.CanBeImmediate(right, OperandMode::kInt32Imm) && - base::bits::IsPowerOfTwo(g.GetImmediate(right))) { - int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right)); - bool doZeroExt = DoZeroExtForResult(node); - bool canEliminateZeroExt = ProduceWord32Result(left); - InstructionOperand dst = (doZeroExt && !canEliminateZeroExt && - CpuFeatures::IsSupported(DISTINCT_OPS)) - ? g.DefineAsRegister(node) - : g.DefineSameAsFirst(node); - - if (doZeroExt) { - selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power), - g.TempImmediate(!canEliminateZeroExt)); - } else { - selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power)); + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& op = selector->Get(node); + const WordBinopOp& mul_op = op.Cast(); + turboshaft::OpIndex left = mul_op.left(); + turboshaft::OpIndex right = mul_op.right(); + if (g.CanBeImmediate(right, OperandMode::kInt32Imm) && + base::bits::IsPowerOfTwo(g.GetImmediate(right))) { + int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right)); + bool doZeroExt = DoZeroExtForResult(selector, node); + bool canEliminateZeroExt = ProduceWord32Result(selector, left); + InstructionOperand dst = (doZeroExt && !canEliminateZeroExt && + CpuFeatures::IsSupported(DISTINCT_OPS)) + ? g.DefineAsRegister(node) + : g.DefineSameAsFirst(node); + + if (doZeroExt) { + selector->Emit(shift_op, dst, g.UseRegister(left), + g.UseImmediate(power), + g.TempImmediate(!canEliminateZeroExt)); + } else { + selector->Emit(shift_op, dst, g.UseRegister(left), + g.UseImmediate(power)); + } + return true; } - return true; + return false; + + } else { + Matcher m(node); + Node* left = m.left().node(); + Node* right = m.right().node(); + if (g.CanBeImmediate(right, OperandMode::kInt32Imm) && + base::bits::IsPowerOfTwo(g.GetImmediate(right))) { + int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right)); + bool doZeroExt = DoZeroExtForResult(selector, node); + bool canEliminateZeroExt = ProduceWord32Result(selector, left); + InstructionOperand dst = (doZeroExt && !canEliminateZeroExt && + CpuFeatures::IsSupported(DISTINCT_OPS)) + ? g.DefineAsRegister(node) + : g.DefineSameAsFirst(node); + + if (doZeroExt) { + selector->Emit(shift_op, dst, g.UseRegister(left), + g.UseImmediate(power), + g.TempImmediate(!canEliminateZeroExt)); + } else { + selector->Emit(shift_op, dst, g.UseRegister(left), + g.UseImmediate(power)); + } + return true; + } + return false; } - return false; } template static inline bool TryMatchInt32OpWithOverflow( - InstructionSelectorT* selector, Node* node, OperandModes mode) { + InstructionSelectorT* selector, typename Adapter::node_t node, + OperandModes mode) { if constexpr (Adapter::IsTurboshaft) { UNIMPLEMENTED(); } else { @@ -1341,21 +1874,21 @@ static inline bool TryMatchInt32OpWithOverflow( template static inline bool TryMatchInt32AddWithOverflow( - InstructionSelectorT* selector, Node* node) { + InstructionSelectorT* selector, typename Adapter::node_t node) { return TryMatchInt32OpWithOverflow(selector, node, AddOperandMode); } template static inline bool TryMatchInt32SubWithOverflow( - InstructionSelectorT* selector, Node* node) { + InstructionSelectorT* selector, typename Adapter::node_t node) { return TryMatchInt32OpWithOverflow(selector, node, SubOperandMode); } template static inline bool TryMatchInt32MulWithOverflow( - InstructionSelectorT* selector, Node* node) { + InstructionSelectorT* selector, typename Adapter::node_t node) { if constexpr (Adapter::IsTurboshaft) { UNIMPLEMENTED(); } else { @@ -1380,7 +1913,8 @@ static inline bool TryMatchInt32MulWithOverflow( #if V8_TARGET_ARCH_S390X template static inline bool TryMatchInt64OpWithOverflow( - InstructionSelectorT* selector, Node* node, OperandModes mode) { + InstructionSelectorT* selector, typename Adapter::node_t node, + OperandModes mode) { if constexpr (Adapter::IsTurboshaft) { UNIMPLEMENTED(); } else { @@ -1396,30 +1930,32 @@ static inline bool TryMatchInt64OpWithOverflow( template static inline bool TryMatchInt64AddWithOverflow( - InstructionSelectorT* selector, Node* node) { + InstructionSelectorT* selector, typename Adapter::node_t node) { return TryMatchInt64OpWithOverflow(selector, node, AddOperandMode); } template static inline bool TryMatchInt64SubWithOverflow( - InstructionSelectorT* selector, Node* node) { + InstructionSelectorT* selector, typename Adapter::node_t node) { return TryMatchInt64OpWithOverflow(selector, node, SubOperandMode); } template void EmitInt64MulWithOverflow(InstructionSelectorT* selector, - Node* node, FlagsContinuationT* cont) { + typename Adapter::node_t node, + FlagsContinuationT* cont) { S390OperandGeneratorT g(selector); - Int64BinopMatcher m(node); + typename Adapter::node_t lhs = selector->input_at(node, 0); + typename Adapter::node_t rhs = selector->input_at(node, 1); InstructionOperand inputs[2]; size_t input_count = 0; InstructionOperand outputs[1]; size_t output_count = 0; - inputs[input_count++] = g.UseUniqueRegister(m.left().node()); - inputs[input_count++] = g.UseUniqueRegister(m.right().node()); + inputs[input_count++] = g.UseUniqueRegister(lhs); + inputs[input_count++] = g.UseUniqueRegister(rhs); outputs[output_count++] = g.DefineAsRegister(node); selector->EmitWithContinuation(kS390_Mul64WithOverflow, output_count, outputs, input_count, inputs, cont); @@ -1429,34 +1965,38 @@ void EmitInt64MulWithOverflow(InstructionSelectorT* selector, template static inline bool TryMatchDoubleConstructFromInsert( - InstructionSelectorT* selector, Node* node) { + InstructionSelectorT* selector, typename Adapter::node_t node) { S390OperandGeneratorT g(selector); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - Node* lo32 = nullptr; - Node* hi32 = nullptr; - - if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) { - lo32 = right; - } else if (node->opcode() == IrOpcode::kFloat64InsertHighWord32) { - hi32 = right; + if constexpr (Adapter::IsTurboshaft) { + UNIMPLEMENTED(); } else { - return false; // doesn't match - } + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + Node* lo32 = nullptr; + Node* hi32 = nullptr; - if (left->opcode() == IrOpcode::kFloat64InsertLowWord32) { - lo32 = left->InputAt(1); - } else if (left->opcode() == IrOpcode::kFloat64InsertHighWord32) { - hi32 = left->InputAt(1); - } else { - return false; // doesn't match - } + if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) { + lo32 = right; + } else if (node->opcode() == IrOpcode::kFloat64InsertHighWord32) { + hi32 = right; + } else { + return false; // doesn't match + } - if (!lo32 || !hi32) return false; // doesn't match + if (left->opcode() == IrOpcode::kFloat64InsertLowWord32) { + lo32 = left->InputAt(1); + } else if (left->opcode() == IrOpcode::kFloat64InsertHighWord32) { + hi32 = left->InputAt(1); + } else { + return false; // doesn't match + } - selector->Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), - g.UseRegister(hi32), g.UseRegister(lo32)); - return true; + if (!lo32 || !hi32) return false; // doesn't match + + selector->Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), + g.UseRegister(hi32), g.UseRegister(lo32)); + return true; + } } #define null ([]() { return false; }) @@ -1554,7 +2094,7 @@ static inline bool TryMatchDoubleConstructFromInsert( OperandMode::kNone, null) \ V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \ [&]() -> bool { \ - if (ProduceWord32Result(node->InputAt(0))) { \ + if (ProduceWord32Result(this, this->input_at(node, 0))) { \ EmitIdentity(node); \ return true; \ } \ @@ -1656,23 +2196,15 @@ static inline bool TryMatchDoubleConstructFromInsert( #define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \ template \ void InstructionSelectorT::Visit##name(node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ if (std::function(try_extra)()) return; \ Visit##type##UnaryOp(this, node, op, mode); \ - } \ } #define DECLARE_BIN_OP(type, name, op, mode, try_extra) \ template \ void InstructionSelectorT::Visit##name(node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ if (std::function(try_extra)()) return; \ Visit##type##BinOp(this, node, op, mode); \ - } \ } FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP) @@ -1781,42 +2313,32 @@ void InstructionSelectorT::VisitFloat64Mod(node_t node) { template void InstructionSelectorT::VisitFloat64Ieee754Unop( node_t node, InstructionCode opcode) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); - Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1)) + Emit(opcode, g.DefineAsFixed(node, d1), + g.UseFixed(this->input_at(node, 0), d1)) ->MarkAsCall(); - } } template void InstructionSelectorT::VisitFloat64Ieee754Binop( node_t node, InstructionCode opcode) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - S390OperandGeneratorT g(this); - Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1), - g.UseFixed(node->InputAt(1), d2)) - ->MarkAsCall(); - } + S390OperandGeneratorT g(this); + Emit(opcode, g.DefineAsFixed(node, d1), + g.UseFixed(this->input_at(node, 0), d1), + g.UseFixed(this->input_at(node, 1), d2)) + ->MarkAsCall(); } template void InstructionSelectorT::VisitInt64MulWithOverflow(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet( - CpuFeatures::IsSupported(MISC_INSTR_EXT2) ? kOverflow : kNotEqual, - ovf); - return EmitInt64MulWithOverflow(this, node, &cont); - } + node_t ovf = FindProjection(node, 1); + if (this->valid(ovf)) { + FlagsContinuation cont = FlagsContinuation::ForSet( + CpuFeatures::IsSupported(MISC_INSTR_EXT2) ? kOverflow : kNotEqual, ovf); + return EmitInt64MulWithOverflow(this, node, &cont); + } FlagsContinuation cont; EmitInt64MulWithOverflow(this, node, &cont); - } } template @@ -1845,7 +2367,8 @@ void VisitCompare(InstructionSelectorT* selector, template void VisitLoadAndTest(InstructionSelectorT* selector, - InstructionCode opcode, Node* node, Node* value, + InstructionCode opcode, typename Adapter::node_t node, + typename Adapter::node_t value, FlagsContinuationT* cont, bool discard_output = false); @@ -1855,16 +2378,21 @@ void VisitWordCompare(InstructionSelectorT* selector, typename Adapter::node_t node, InstructionCode opcode, FlagsContinuationT* cont, OperandModes immediate_mode) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(selector); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - - DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) || - node->opcode() == IrOpcode::kInt32Sub || - node->opcode() == IrOpcode::kInt64Sub); + typename Adapter::node_t lhs = selector->input_at(node, 0); + typename Adapter::node_t rhs = selector->input_at(node, 1); + + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& op = selector->Get(node); + DCHECK(op.Is() || op.Is() || + op.Is()); + USE(op); + } else { + DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) || + node->opcode() == IrOpcode::kInt32Sub || + node->opcode() == IrOpcode::kInt64Sub); + } InstructionOperand inputs[8]; InstructionOperand outputs[1]; @@ -1875,57 +2403,53 @@ void VisitWordCompare(InstructionSelectorT* selector, // if one of the two inputs is a memory operand, make sure it's on the left. int effect_level = selector->GetEffectLevel(node, cont); - if ((!g.CanBeImmediate(right, immediate_mode) && - g.CanBeImmediate(left, immediate_mode)) || - (!g.CanBeMemoryOperand(opcode, node, right, effect_level) && - g.CanBeMemoryOperand(opcode, node, left, effect_level))) { - if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); - std::swap(left, right); + if ((!g.CanBeImmediate(rhs, immediate_mode) && + g.CanBeImmediate(lhs, immediate_mode)) || + (!g.CanBeMemoryOperand(opcode, node, rhs, effect_level) && + g.CanBeMemoryOperand(opcode, node, lhs, effect_level))) { + if (!selector->IsCommutative(node)) cont->Commute(); + std::swap(lhs, rhs); } // check if compare with 0 - if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) { + if (g.CanBeImmediate(rhs, immediate_mode) && g.GetImmediate(rhs) == 0) { DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64); ArchOpcode load_and_test = (opcode == kS390_Cmp32) ? kS390_LoadAndTestWord32 : kS390_LoadAndTestWord64; - return VisitLoadAndTest(selector, load_and_test, node, left, cont, true); + return VisitLoadAndTest(selector, load_and_test, node, lhs, cont, true); } - inputs[input_count++] = g.UseRegister(left); - if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) { + inputs[input_count++] = g.UseRegister(lhs); + if (g.CanBeMemoryOperand(opcode, node, rhs, effect_level)) { // generate memory operand AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand( - right, inputs, &input_count, OpcodeImmMode(opcode)); + rhs, inputs, &input_count, OpcodeImmMode(opcode)); opcode |= AddressingModeField::encode(addressing_mode); - } else if (g.CanBeImmediate(right, immediate_mode)) { - inputs[input_count++] = g.UseImmediate(right); + } else if (g.CanBeImmediate(rhs, immediate_mode)) { + inputs[input_count++] = g.UseImmediate(rhs); } else { - inputs[input_count++] = g.UseAnyExceptImmediate(right); + inputs[input_count++] = g.UseAnyExceptImmediate(rhs); } DCHECK(input_count <= 8 && output_count <= 1); selector->EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, cont); - } } template void VisitWord32Compare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { OperandModes mode = (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm); VisitWordCompare(selector, node, kS390_Cmp32, cont, mode); - } } #if V8_TARGET_ARCH_S390X template -void VisitWord64Compare(InstructionSelectorT* selector, Node* node, +void VisitWord64Compare(InstructionSelectorT* selector, + typename Adapter::node_t node, FlagsContinuationT* cont) { OperandModes mode = (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm); @@ -1938,11 +2462,7 @@ template void VisitFloat32Compare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone); - } } // Shared routine for multiple float64 compare operations. @@ -1950,11 +2470,34 @@ template void VisitFloat64Compare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone); +} + +void VisitTestUnderMask(InstructionSelectorT* selector, + TurboshaftAdapter::node_t node, + FlagsContinuationT* cont) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& op = selector->Get(node); + DCHECK(op.Is() || + op.Is()); + USE(op); + + ArchOpcode opcode; + if (selector->Get(node).template TryCast()) { + opcode = kS390_Tst32; + } else { + opcode = kS390_Tst64; } + + S390OperandGeneratorT g(selector); + typename TurboshaftAdapter::node_t lhs = selector->input_at(node, 0); + typename TurboshaftAdapter::node_t rhs = selector->input_at(node, 1); + if (!g.CanBeImmediate(rhs, OperandMode::kUint32Imm) && + g.CanBeImmediate(lhs, OperandMode::kUint32Imm)) { + std::swap(lhs, rhs); + } + VisitCompare(selector, opcode, g.UseRegister(lhs), + g.UseOperand(rhs, OperandMode::kUint32Imm), cont); } template @@ -1977,11 +2520,11 @@ void VisitTestUnderMask(InstructionSelectorT* selector, Node* node, template void VisitLoadAndTest(InstructionSelectorT* selector, - InstructionCode opcode, Node* node, Node* value, + InstructionCode opcode, typename Adapter::node_t node, + typename Adapter::node_t value, FlagsContinuationT* cont, bool discard_output) { static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3, "LoadAndTest Opcode shouldn't contain other opcodes."); - // TODO(john.yan): Add support for Float32/Float64. DCHECK(opcode >= kS390_LoadAndTestWord32 || opcode <= kS390_LoadAndTestWord64); @@ -2016,13 +2559,214 @@ void VisitLoadAndTest(InstructionSelectorT* selector, } // namespace +// Shared routine for word comparisons against zero. +template <> +void InstructionSelectorT::VisitWordCompareZero( + node_t user, node_t value, FlagsContinuation* cont) { + using namespace turboshaft; // NOLINT(build/namespaces) + + // Try to combine with comparisons against 0 by simply inverting the branch. + while (const ComparisonOp* equal = + this->TryCast(value)) { + if (!CanCover(user, value)) break; + if (!MatchIntegralZero(equal->right())) break; + + user = value; + value = equal->left(); + cont->Negate(); + } + + FlagsCondition fc = cont->condition(); + if (CanCover(user, value)) { + const Operation& value_op = this->Get(value); + if (const ComparisonOp* comparison = value_op.TryCast()) { + if (comparison->kind == ComparisonOp::Kind::kEqual) { + switch (comparison->rep.MapTaggedToWord().value()) { + case RegisterRepresentation::Word32(): { + cont->OverwriteAndNegateIfEqual(kEqual); + if (this->MatchIntegralZero(comparison->right())) { + // Try to combine the branch with a comparison. + if (CanCover(value, comparison->left())) { + const Operation& left_op = this->Get(comparison->left()); + if (left_op.Is()) { + return VisitWord32Compare(this, comparison->left(), cont); + } else if (left_op.Is()) { + return VisitTestUnderMask(this, comparison->left(), cont); + } + } + } + return VisitWord32Compare(this, value, cont); + } + case RegisterRepresentation::Word64(): { + cont->OverwriteAndNegateIfEqual(kEqual); + if (this->MatchIntegralZero(comparison->right())) { + // Try to combine the branch with a comparison. + if (CanCover(value, comparison->left())) { + const Operation& left_op = this->Get(comparison->left()); + if (left_op.Is()) { + return VisitWord64Compare(this, comparison->left(), cont); + } else if (left_op.Is()) { + return VisitTestUnderMask(this, comparison->left(), cont); + } + } + } + return VisitWord64Compare(this, value, cont); + } + case RegisterRepresentation::Float32(): + cont->OverwriteAndNegateIfEqual(kUnorderedEqual); + return VisitFloat32Compare(this, value, cont); + case RegisterRepresentation::Float64(): + return VisitFloat64Compare(this, value, cont); + default: + break; + } + } else { + switch (comparison->rep.MapTaggedToWord().value()) { + case RegisterRepresentation::Word32(): + cont->OverwriteAndNegateIfEqual( + GetComparisonFlagCondition(*comparison)); + return VisitWord32Compare(this, value, cont); + case RegisterRepresentation::Word64(): + cont->OverwriteAndNegateIfEqual( + GetComparisonFlagCondition(*comparison)); + return VisitWord64Compare(this, value, cont); + case RegisterRepresentation::Float32(): + switch (comparison->kind) { + case ComparisonOp::Kind::kSignedLessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat32Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat32Compare(this, value, cont); + default: + UNREACHABLE(); + } + case RegisterRepresentation::Float64(): + switch (comparison->kind) { + case ComparisonOp::Kind::kSignedLessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat64Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat64Compare(this, value, cont); + default: + UNREACHABLE(); + } + default: + break; + } + } + } else if (const ProjectionOp* projection = + value_op.TryCast()) { + // Check if this is the overflow output projection of an + // WithOverflow node. + if (projection->index == 1u) { + // We cannot combine the WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // is either nullptr, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + OpIndex node = projection->input(); + OpIndex result = FindProjection(node, 0); + if (!result.valid() || IsDefined(result)) { + if (const OverflowCheckedBinopOp* binop = + TryCast(node)) { + const bool is64 = binop->rep == WordRepresentation::Word64(); + switch (binop->kind) { + case OverflowCheckedBinopOp::Kind::kSignedAdd: + cont->OverwriteAndNegateIfEqual(kOverflow); + if (is64) { + return VisitWord64BinOp(this, node, kS390_Add64, + AddOperandMode, cont); + } else { + return VisitWord32BinOp(this, node, kS390_Add32, + AddOperandMode, cont); + } + case OverflowCheckedBinopOp::Kind::kSignedSub: + cont->OverwriteAndNegateIfEqual(kOverflow); + if (is64) { + return VisitWord64BinOp(this, node, kS390_Sub64, + AddOperandMode, cont); + } else { + return VisitWord32BinOp(this, node, kS390_Sub32, + AddOperandMode, cont); + } + case OverflowCheckedBinopOp::Kind::kSignedMul: + if (is64) { + cont->OverwriteAndNegateIfEqual( + CpuFeatures::IsSupported(MISC_INSTR_EXT2) ? kOverflow + : kNotEqual); + return EmitInt64MulWithOverflow(this, node, cont); + + } else { + if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) { + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitWord32BinOp( + this, node, kS390_Mul32, + OperandMode::kAllowRRR | OperandMode::kAllowRM, cont); + } else { + cont->OverwriteAndNegateIfEqual(kNotEqual); + return VisitWord32BinOp( + this, node, kS390_Mul32WithOverflow, + OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, + cont); + } + } + // TODO(miladfarca): Add kInt64AbsWithOverflow and + // kInt32AbsWithOverflow + default: + break; + } + } + } + } + } else if (value_op.Is()) { + if (fc == kNotEqual || fc == kEqual) + return VisitWord32Compare(this, value, cont); + } else if (value_op.Is()) { + return VisitTestUnderMask(this, value, cont); + } else if (value_op.Is()) { + auto load = this->load_view(value); + LoadRepresentation load_rep = load.loaded_rep(); + switch (load_rep.representation()) { + case MachineRepresentation::kWord32: + return VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, + cont); + default: + break; + } + } else if (value_op.Is()) { + if (fc == kNotEqual || fc == kEqual) + return VisitWord32BinOp(this, value, kS390_Or32, Or32OperandMode, cont); + } else if (value_op.Is()) { + if (fc == kNotEqual || fc == kEqual) + return VisitWord32BinOp(this, value, kS390_Xor32, Xor32OperandMode, + cont); + } else if (value_op.Is()) { + if (fc == kNotEqual || fc == kEqual) + return VisitWord64Compare(this, value, cont); + } else if (value_op.Is()) { + return VisitTestUnderMask(this, value, cont); + } else if (value_op.Is()) { + if (fc == kNotEqual || fc == kEqual) + return VisitWord64BinOp(this, value, kS390_Or64, Or64OperandMode, cont); + } else if (value_op.Is()) { + if (fc == kNotEqual || fc == kEqual) + return VisitWord64BinOp(this, value, kS390_Xor64, Xor64OperandMode, + cont); + } else if (value_op.Is()) { + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); + } + } + // Branch could not be combined with a compare, emit LoadAndTest + VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true); +} + // Shared routine for word comparisons against zero. template void InstructionSelectorT::VisitWordCompareZero( node_t user, node_t value, FlagsContinuation* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // Try to combine with comparisons against 0 by simply inverting the branch. while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) { Int32BinopMatcher m(value); @@ -2257,17 +3001,13 @@ void InstructionSelectorT::VisitWordCompareZero( // Branch could not be combined with a compare, emit LoadAndTest VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true); - } } template void InstructionSelectorT::VisitSwitch(node_t node, const SwitchInfo& sw) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); - InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); + InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0)); // Emit either ArchTableSwitch or ArchBinarySearchSwitch. if (enable_switch_jump_table_ == @@ -2300,22 +3040,27 @@ void InstructionSelectorT::VisitSwitch(node_t node, // Generate a tree of conditional jumps. return EmitBinarySearchSwitch(sw, value_operand); - } } template void InstructionSelectorT::VisitWord32Equal(node_t const node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& op = this->Get(node).template Cast(); + if (this->MatchIntegralZero(op.right())) { + return VisitLoadAndTest(this, kS390_LoadAndTestWord32, node, op.left(), + &cont, true); + } + } else { - FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); - Int32BinopMatcher m(node); - if (m.right().Is(0)) { + Int32BinopMatcher m(node); + if (m.right().Is(0)) { return VisitLoadAndTest(this, kS390_LoadAndTestWord32, m.node(), m.left().node(), &cont, true); + } } VisitWord32Compare(this, node, &cont); - } } template @@ -2347,59 +3092,48 @@ void InstructionSelectorT::VisitUint32LessThanOrEqual(node_t node) { #if V8_TARGET_ARCH_S390X template void InstructionSelectorT::VisitWord64Equal(node_t const node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& op = this->Get(node).template Cast(); + if (this->MatchIntegralZero(op.right())) { + return VisitLoadAndTest(this, kS390_LoadAndTestWord64, node, op.left(), + &cont, true); + } } else { - FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); Int64BinopMatcher m(node); if (m.right().Is(0)) { return VisitLoadAndTest(this, kS390_LoadAndTestWord64, m.node(), m.left().node(), &cont, true); } - VisitWord64Compare(this, node, &cont); } + VisitWord64Compare(this, node, &cont); } template void InstructionSelectorT::VisitInt64LessThan(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); VisitWord64Compare(this, node, &cont); - } } template void InstructionSelectorT::VisitInt64LessThanOrEqual(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); VisitWord64Compare(this, node, &cont); - } } template void InstructionSelectorT::VisitUint64LessThan(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); VisitWord64Compare(this, node, &cont); - } } template void InstructionSelectorT::VisitUint64LessThanOrEqual(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); VisitWord64Compare(this, node, &cont); - } } #endif @@ -2459,9 +3193,6 @@ template void InstructionSelectorT::EmitPrepareArguments( ZoneVector* arguments, const CallDescriptor* call_descriptor, node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); // Prepare for C function call. @@ -2473,10 +3204,10 @@ void InstructionSelectorT::EmitPrepareArguments( // Poke any stack arguments. int slot = kStackFrameExtraParamSlot; for (PushParameter input : (*arguments)) { - if (input.node == nullptr) continue; - Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), - g.TempImmediate(slot)); - ++slot; + if (!this->valid(input.node)) continue; + Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), + g.TempImmediate(slot)); + ++slot; } } else { // Push any stack arguments. @@ -2484,13 +3215,12 @@ void InstructionSelectorT::EmitPrepareArguments( for (PushParameter input : base::Reversed(*arguments)) { stack_decrement += kSystemPointerSize; // Skip any alignment holes in pushed nodes. - if (input.node == nullptr) continue; + if (!this->valid(input.node)) continue; InstructionOperand decrement = g.UseImmediate(stack_decrement); stack_decrement = 0; Emit(kS390_Push, g.NoOutput(), decrement, g.UseRegister(input.node)); } } - } } template @@ -2510,24 +3240,19 @@ bool InstructionSelectorT::IsTailCallAddressImmediate() { template void InstructionSelectorT::VisitWord32AtomicLoad(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicLoadParameters atomic_load_params = - AtomicLoadParametersOf(node->op()); - LoadRepresentation load_rep = atomic_load_params.representation(); - VisitLoad(node, node, SelectLoadOpcode(load_rep)); - } + auto load = this->load_view(node); + LoadRepresentation load_rep = load.loaded_rep(); + VisitLoad(node, node, SelectLoadOpcode(load_rep)); } template void InstructionSelectorT::VisitWord32AtomicStore(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); + auto store = this->store_view(node); + AtomicStoreParameters store_params(store.stored_rep().representation(), + store.stored_rep().write_barrier_kind(), + store.memory_order().value(), + store.access_kind()); VisitGeneralStore(this, node, store_params.representation()); - } } template @@ -2815,24 +3540,19 @@ VISIT_ATOMIC64_BINOP(Xor) template void InstructionSelectorT::VisitWord64AtomicLoad(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicLoadParameters atomic_load_params = - AtomicLoadParametersOf(node->op()); - LoadRepresentation load_rep = atomic_load_params.representation(); - VisitLoad(node, node, SelectLoadOpcode(load_rep)); - } + auto load = this->load_view(node); + LoadRepresentation load_rep = load.loaded_rep(); + VisitLoad(node, node, SelectLoadOpcode(load_rep)); } template void InstructionSelectorT::VisitWord64AtomicStore(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); - VisitGeneralStore(this, node, store_params.representation()); - } + auto store = this->store_view(node); + AtomicStoreParameters store_params(store.stored_rep().representation(), + store.stored_rep().write_barrier_kind(), + store.memory_order().value(), + store.access_kind()); + VisitGeneralStore(this, node, store_params.representation()); } #define SIMD_TYPES(V) \ @@ -3320,30 +4040,26 @@ template void InstructionSelectorT::EmitPrepareResults( ZoneVector* results, const CallDescriptor* call_descriptor, node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { S390OperandGeneratorT g(this); for (PushParameter output : *results) { if (!output.location.IsCallerFrameSlot()) continue; // Skip any alignment holes in nodes. - if (output.node != nullptr) { - DCHECK(!call_descriptor->IsCFunctionCall()); - if (output.location.GetType() == MachineType::Float32()) { - MarkAsFloat32(output.node); - } else if (output.location.GetType() == MachineType::Float64()) { - MarkAsFloat64(output.node); - } else if (output.location.GetType() == MachineType::Simd128()) { - MarkAsSimd128(output.node); - } - int offset = call_descriptor->GetOffsetToReturns(); - int reverse_slot = -output.location.GetLocation() - offset; - Emit(kS390_Peek, g.DefineAsRegister(output.node), - g.UseImmediate(reverse_slot)); + if (this->valid(output.node)) { + DCHECK(!call_descriptor->IsCFunctionCall()); + if (output.location.GetType() == MachineType::Float32()) { + MarkAsFloat32(output.node); + } else if (output.location.GetType() == MachineType::Float64()) { + MarkAsFloat64(output.node); + } else if (output.location.GetType() == MachineType::Simd128()) { + MarkAsSimd128(output.node); + } + int offset = call_descriptor->GetOffsetToReturns(); + int reverse_slot = -output.location.GetLocation() - offset; + Emit(kS390_Peek, g.DefineAsRegister(output.node), + g.UseImmediate(reverse_slot)); } } - } } template @@ -3530,19 +4246,6 @@ void InstructionSelectorT::AddOutputToSelectContinuation( UNREACHABLE(); } -template <> -Node* InstructionSelectorT::FindProjection( - Node* node, size_t projection_index) { - return NodeProperties::FindProjection(node, projection_index); -} - -template <> -TurboshaftAdapter::node_t -InstructionSelectorT::FindProjection( - node_t node, size_t projection_index) { - UNIMPLEMENTED(); -} - MachineOperatorBuilder::Flags InstructionSelector::SupportedMachineOperatorFlags() { return MachineOperatorBuilder::kFloat32RoundDown | diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index 5691a8b6d88c7d..d815d9ac2b130f 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -24,7 +24,7 @@ #include "src/compiler/node-matchers.h" #include "src/compiler/osr.h" #include "src/execution/frame-constants.h" -#include "src/heap/memory-chunk.h" +#include "src/heap/mutable-page.h" #include "src/objects/code-kind.h" #include "src/objects/smi.h" diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index 09b57b4ebf5de9..39e2ba3f3e1ee1 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -310,6 +310,17 @@ TryMatchBaseWithScaledIndexAndDisplacement64( result.displacement = 0; DCHECK(!load_transform->load_kind.tagged_base); return result; +#if V8_ENABLE_WASM_SIMD256_REVEC + } else if (const Simd256LoadTransformOp* load_transform = + op.TryCast()) { + result.base = load_transform->base(); + result.index = load_transform->index(); + DCHECK_EQ(load_transform->offset, 0); + result.scale = 0; + result.displacement = 0; + DCHECK(!load_transform->load_kind.tagged_base); + return result; +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif // V8_ENABLE_WEBASSEMBLY } else { return base::nullopt; @@ -1276,6 +1287,64 @@ void InstructionSelectorT::VisitLoadTransform(Node* node) { } VisitLoad(node, node, code); } + +#if V8_ENABLE_WASM_SIMD256_REVEC +template <> +void InstructionSelectorT::VisitSimd256LoadTransform( + Node* node) { + // For Turbofan, VisitLoadTransform should be called instead. + UNREACHABLE(); +} + +template <> +void InstructionSelectorT::VisitSimd256LoadTransform( + node_t node) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Simd256LoadTransformOp& op = + this->Get(node).Cast(); + ArchOpcode opcode; + switch (op.transform_kind) { + case Simd256LoadTransformOp::TransformKind::k8x16S: + opcode = kX64S256Load8x16S; + break; + case Simd256LoadTransformOp::TransformKind::k8x16U: + opcode = kX64S256Load8x16U; + break; + case Simd256LoadTransformOp::TransformKind::k16x8S: + opcode = kX64S256Load16x8S; + break; + case Simd256LoadTransformOp::TransformKind::k16x8U: + opcode = kX64S256Load16x8U; + break; + case Simd256LoadTransformOp::TransformKind::k32x4S: + opcode = kX64S256Load32x4S; + break; + case Simd256LoadTransformOp::TransformKind::k32x4U: + opcode = kX64S256Load32x4U; + break; + case Simd256LoadTransformOp::TransformKind::k8Splat: + opcode = kX64S256Load8Splat; + break; + case Simd256LoadTransformOp::TransformKind::k16Splat: + opcode = kX64S256Load16Splat; + break; + case Simd256LoadTransformOp::TransformKind::k32Splat: + opcode = kX64S256Load32Splat; + break; + case Simd256LoadTransformOp::TransformKind::k64Splat: + opcode = kX64S256Load64Splat; + break; + } + + // x64 supports unaligned loads + DCHECK(!op.load_kind.maybe_unaligned); + InstructionCode code = opcode; + if (op.load_kind.with_trap_handler) { + code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds); + } + VisitLoad(node, node, code); +} +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif // V8_ENABLE_WEBASSEMBLY template @@ -5915,7 +5984,11 @@ void InstructionSelectorT::VisitS128Select(node_t node) { template <> void InstructionSelectorT::VisitS256Select(node_t node) { - UNIMPLEMENTED(); + X64OperandGeneratorT g(this); + Emit(kX64SSelect | VectorLengthField::encode(kV256), g.DefineAsRegister(node), + g.UseRegister(this->input_at(node, 0)), + g.UseRegister(this->input_at(node, 1)), + g.UseRegister(this->input_at(node, 2))); } template <> @@ -6080,6 +6153,25 @@ void InstructionSelectorT::VisitI32x8UConvertF32x8(node_t node) { Emit(kX64I32x8UConvertF32x8, g.DefineSameAsFirst(node), g.UseRegister(this->input_at(node, 0)), arraysize(temps), temps); } + +template <> +void InstructionSelectorT::VisitExtractF128(node_t node) { + X64OperandGeneratorT g(this); + int32_t lane = OpParameter(node->op()); + Emit(kX64ExtractF128, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); +} + +#if V8_ENABLE_WASM_SIMD256_REVEC +template <> +void InstructionSelectorT::VisitExtractF128(node_t node) { + X64OperandGeneratorT g(this); + const turboshaft::Simd256Extract128LaneOp& op = + this->Get(node).template Cast(); + Emit(kX64ExtractF128, g.DefineAsRegister(node), g.UseRegister(op.input()), + g.UseImmediate(op.lane)); +} +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif template @@ -6092,19 +6184,6 @@ void InstructionSelectorT::VisitInt64AbsWithOverflow(node_t node) { UNREACHABLE(); } -template <> -void InstructionSelectorT::VisitExtractF128(node_t node) { - UNIMPLEMENTED(); -} - -template <> -void InstructionSelectorT::VisitExtractF128(node_t node) { - X64OperandGeneratorT g(this); - int32_t lane = OpParameter(node->op()); - Emit(kX64ExtractF128, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); -} - #if V8_ENABLE_WEBASSEMBLY namespace { diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index 1c1ad26dc3acc0..01922e87846928 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -23,6 +23,7 @@ #include "src/interpreter/bytecode-array-iterator.h" #include "src/interpreter/bytecode-flags.h" #include "src/interpreter/bytecodes.h" +#include "src/objects/elements-kind.h" #include "src/objects/js-generator.h" #include "src/objects/literal-objects-inl.h" #include "src/objects/scope-info.h" @@ -282,6 +283,8 @@ class BytecodeGraphBuilder { const Operator* op, Node* receiver, Node* key, Node* value, FeedbackSlot slot); + bool DeoptimizeIfFP16(FeedbackSource feedback); + // Applies the given early reduction onto the current environment. void ApplyEarlyReduction(JSTypeHintLowering::LoweringResult reduction); @@ -2049,6 +2052,50 @@ void BytecodeGraphBuilder::VisitGetNamedPropertyFromSuper() { environment()->BindAccumulator(node, Environment::kAttachFrameState); } +bool BytecodeGraphBuilder::DeoptimizeIfFP16(FeedbackSource feedback) { + const compiler::ProcessedFeedback& processed_feedback = + broker()->GetFeedbackForPropertyAccess( + feedback, compiler::AccessMode::kLoad, base::nullopt); + if (processed_feedback.kind() != ProcessedFeedback::Kind::kElementAccess) { + return false; + } + + compiler::AccessInfoFactory access_info_factory(broker(), graph_zone()); + ZoneVector access_infos(graph_zone()); + if (!access_info_factory.ComputeElementAccessInfos( + processed_feedback.AsElementAccess(), &access_infos) || + access_infos.empty()) { + return false; + } + + bool has_float16_element = false; + + for (size_t i = 0; i < access_infos.size(); i++) { + if (access_infos[i].elements_kind() == FLOAT16_ELEMENTS) { + has_float16_element = true; + break; + } + } + + if (!has_float16_element) return false; + + Node* effect = environment()->GetEffectDependency(); + Node* control = environment()->GetControlDependency(); + + Node* deoptimize = jsgraph()->graph()->NewNode( + jsgraph()->common()->Deoptimize(DeoptimizeReason::kFloat16NotYetSupported, + FeedbackSource()), + jsgraph()->Dead(), effect, control); + + Node* frame_state = + NodeProperties::FindFrameStateBefore(deoptimize, jsgraph()->Dead()); + deoptimize->ReplaceInput(0, frame_state); + environment()->BindAccumulator(deoptimize, Environment::kAttachFrameState); + ApplyEarlyReduction(JSTypeHintLowering::LoweringResult::Exit(deoptimize)); + + return true; +} + void BytecodeGraphBuilder::VisitGetKeyedProperty() { PrepareEagerCheckpoint(); Node* key = environment()->LookupAccumulator(); @@ -2062,6 +2109,9 @@ void BytecodeGraphBuilder::VisitGetKeyedProperty() { TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot); if (lowering.IsExit()) return; + // TODO(v8:14012): We should avoid deopt-loop here. Before ship Float16Array. + if (DeoptimizeIfFP16(feedback)) return; + Node* node = nullptr; if (lowering.IsSideEffectFree()) { node = lowering.value(); @@ -2076,6 +2126,11 @@ void BytecodeGraphBuilder::VisitGetKeyedProperty() { environment()->BindAccumulator(node, Environment::kAttachFrameState); } +void BytecodeGraphBuilder::VisitGetEnumeratedKeyedProperty() { + // TODO(v8:14245): Implement this bytecode in Maglev/Turbofan. + UNREACHABLE(); +} + void BytecodeGraphBuilder::BuildNamedStore(NamedStoreMode store_mode) { PrepareEagerCheckpoint(); Node* value = environment()->LookupAccumulator(); @@ -2138,6 +2193,9 @@ void BytecodeGraphBuilder::VisitSetKeyedProperty() { TryBuildSimplifiedStoreKeyed(op, object, key, value, source.slot); if (lowering.IsExit()) return; + // TODO(v8:14012): We should avoid deopt-loop here. Before ship Float16Array. + if (DeoptimizeIfFP16(source)) return; + Node* node = nullptr; if (lowering.IsSideEffectFree()) { node = lowering.value(); diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc index 3ec8fbef3453d8..8ceb4ea61ffc21 100644 --- a/deps/v8/src/compiler/code-assembler.cc +++ b/deps/v8/src/compiler/code-assembler.cc @@ -611,6 +611,20 @@ TNode CodeAssembler::StackSlotPtr(int size, int alignment) { CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP) #undef DEFINE_CODE_ASSEMBLER_BINARY_OP +TNode> CodeAssembler::Int32PairAdd( + TNode lhs_lo_word, TNode lhs_hi_word, + TNode rhs_lo_word, TNode rhs_hi_word) { + return UncheckedCast>(raw_assembler()->Int32PairAdd( + lhs_lo_word, lhs_hi_word, rhs_lo_word, rhs_hi_word)); +} + +TNode> CodeAssembler::Int32PairSub( + TNode lhs_lo_word, TNode lhs_hi_word, + TNode rhs_lo_word, TNode rhs_hi_word) { + return UncheckedCast>(raw_assembler()->Int32PairSub( + lhs_lo_word, lhs_hi_word, rhs_lo_word, rhs_hi_word)); +} + TNode CodeAssembler::WordShl(TNode value, int shift) { return (shift != 0) ? WordShl(value, IntPtrConstant(shift)) : value; } @@ -755,6 +769,11 @@ Node* CodeAssembler::LoadFromObject(MachineType type, TNode object, return raw_assembler()->LoadFromObject(type, object, offset); } +Node* CodeAssembler::LoadProtectedPointerFromObject(TNode object, + TNode offset) { + return raw_assembler()->LoadProtectedPointerFromObject(object, offset); +} + #ifdef V8_MAP_PACKING Node* CodeAssembler::PackMapWord(Node* value) { TNode map_word = diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index db09113a9041c6..8ab0d557bb2798 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -233,6 +233,11 @@ class CodeAssemblerParameterizedLabel; V(Uint32LessThanOrEqual, BoolT, Word32T, Word32T) \ V(Uint32GreaterThan, BoolT, Word32T, Word32T) \ V(Uint32GreaterThanOrEqual, BoolT, Word32T, Word32T) \ + /* Use Word64Equal if you need Uint64Equal */ \ + V(Uint64LessThan, BoolT, Word64T, Word64T) \ + V(Uint64LessThanOrEqual, BoolT, Word64T, Word64T) \ + V(Uint64GreaterThan, BoolT, Word64T, Word64T) \ + V(Uint64GreaterThanOrEqual, BoolT, Word64T, Word64T) \ /* Use WordEqual if you need UintPtrEqual */ \ V(UintPtrLessThan, BoolT, WordT, WordT) \ V(UintPtrLessThanOrEqual, BoolT, WordT, WordT) \ @@ -241,6 +246,9 @@ class CodeAssemblerParameterizedLabel; #define CODE_ASSEMBLER_BINARY_OP_LIST(V) \ CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ + V(Float32Sub, Float32T, Float32T, Float32T) \ + V(Float32Add, Float32T, Float32T, Float32T) \ + V(Float32Mul, Float32T, Float32T, Float32T) \ V(Float64Add, Float64T, Float64T, Float64T) \ V(Float64Sub, Float64T, Float64T, Float64T) \ V(Float64Mul, Float64T, Float64T, Float64T) \ @@ -307,6 +315,7 @@ class CodeAssemblerParameterizedLabel; TNode Float64Add(TNode a, TNode b); #define CODE_ASSEMBLER_UNARY_OP_LIST(V) \ + V(Float32Abs, Float32T, Float32T) \ V(Float64Abs, Float64T, Float64T) \ V(Float64Acos, Float64T, Float64T) \ V(Float64Acosh, Float64T, Float64T) \ @@ -348,7 +357,8 @@ TNode Float64Add(TNode a, TNode b); V(ChangeUint32ToUint64, Uint64T, Word32T) \ V(BitcastInt32ToFloat32, Float32T, Word32T) \ V(BitcastFloat32ToInt32, Uint32T, Float32T) \ - V(BitcastFloat64ToInt64, IntPtrT, Float64T) \ + V(BitcastFloat64ToInt64, Int64T, Float64T) \ + V(BitcastInt64ToFloat64, Float64T, Int64T) \ V(RoundFloat64ToInt32, Int32T, Float64T) \ V(RoundInt32ToFloat32, Float32T, Int32T) \ V(Float64SilenceNaN, Float64T, Float64T) \ @@ -441,9 +451,9 @@ class V8_EXPORT_PRIVATE CodeAssembler { template operator TNode() { - static_assert( - !std::is_same::value, - "Can't cast to MaybeObject, use explicit conversion functions. "); + static_assert(!std::is_same>::value, + "Can't cast to Tagged, use explicit " + "conversion functions. "); static_assert(types_have_common_values::value, "Incompatible types: this cast can never succeed."); @@ -537,6 +547,16 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode Uint32Constant(uint32_t value) { return Unsigned(Int32Constant(base::bit_cast(value))); } + TNode Uint64HighWordConstant(uint64_t value) { + return Uint32Constant(value >> 32); + } + TNode Uint64HighWordConstantNoLowWord(uint64_t value) { + DCHECK_EQ(0, value & ~uint32_t{0}); + return Uint64HighWordConstant(value); + } + TNode Uint64LowWordConstant(uint64_t value) { + return Uint32Constant(static_cast(value)); + } TNode UintPtrConstant(uintptr_t value) { return Unsigned(IntPtrConstant(base::bit_cast(value))); } @@ -805,6 +825,8 @@ class V8_EXPORT_PRIVATE CodeAssembler { Node* LoadFromObject(MachineType type, TNode object, TNode offset); + Node* LoadProtectedPointerFromObject(TNode object, + TNode offset); #ifdef V8_MAP_PACKING Node* PackMapWord(Node* value); @@ -933,6 +955,16 @@ class V8_EXPORT_PRIVATE CodeAssembler { CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP) #undef DECLARE_CODE_ASSEMBLER_BINARY_OP + // Pairwise operations for 32bit. + TNode> Int32PairAdd(TNode lhs_lo_word, + TNode lhs_hi_word, + TNode rhs_lo_word, + TNode rhs_hi_word); + TNode> Int32PairSub(TNode lhs_lo_word, + TNode lhs_hi_word, + TNode rhs_lo_word, + TNode rhs_hi_word); + TNode WordShr(TNode left, TNode right) { return Unsigned(WordShr(static_cast>(left), right)); } diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index 14fc78c877332f..c04e0cf137751e 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -6851,7 +6851,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument( } else { switch (arg_type.GetType()) { case CTypeInfo::Type::kV8Value: { - return fast_api_call::AdaptLocalArgument(gasm(), node); + return __ AdaptLocalArgument(node); } case CTypeInfo::Type::kFloat32: { return __ TruncateFloat64ToFloat32(node); @@ -6947,7 +6947,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument( Node* value_is_smi = ObjectIsSmi(node); __ GotoIf(value_is_smi, if_error); - Node* node_to_pass = fast_api_call::AdaptLocalArgument(gasm(), node); + Node* node_to_pass = __ AdaptLocalArgument(node); // Check that the value is a JSArray. Node* value_map = __ LoadField(AccessBuilder::ForMap(), node); @@ -7007,7 +7007,7 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument( value_instance_type, __ Int32Constant(JS_ARRAY_TYPE)); __ GotoIfNot(value_is_js_array, &next); - Node* node_to_pass = fast_api_call::AdaptLocalArgument(gasm(), node); + Node* node_to_pass = __ AdaptLocalArgument(node); Node* target_address = __ ExternalConstant(ExternalReference::Create( c_functions[func_index].address, ref_type)); @@ -7334,7 +7334,7 @@ Node* EffectControlLinearizer::BuildReverseBytes(ExternalArrayType type, return result; } } - + case kExternalFloat16Array: case kExternalBigInt64Array: case kExternalBigUint64Array: UNREACHABLE(); diff --git a/deps/v8/src/compiler/fast-api-calls.cc b/deps/v8/src/compiler/fast-api-calls.cc index 935cc0ad7beefd..aa687fa10ffe39 100644 --- a/deps/v8/src/compiler/fast-api-calls.cc +++ b/deps/v8/src/compiler/fast-api-calls.cc @@ -349,7 +349,7 @@ Node* FastApiCallBuilder::Build(const FastApiCallFunctionVector& c_functions, static_cast(offsetof(v8::FastApiCallbackOptions, fallback)), __ Int32Constant(0)); - Node* data_argument_to_pass = AdaptLocalArgument(gasm(), data_argument); + Node* data_argument_to_pass = __ AdaptLocalArgument(data_argument); __ Store(StoreRepresentation(MachineType::PointerRepresentation(), kNoWriteBarrier), diff --git a/deps/v8/src/compiler/fast-api-calls.h b/deps/v8/src/compiler/fast-api-calls.h index c05e302988101b..b97b37e5746433 100644 --- a/deps/v8/src/compiler/fast-api-calls.h +++ b/deps/v8/src/compiler/fast-api-calls.h @@ -60,26 +60,6 @@ Node* BuildFastApiCall(Isolate* isolate, Graph* graph, const InitializeOptions& initialize_options, const GenerateSlowApiCall& generate_slow_api_call); -inline Node* AdaptLocalArgument(GraphAssembler* graph_assembler, - Node* argument) { -#define __ graph_assembler-> - -#ifdef V8_ENABLE_DIRECT_LOCAL - // With direct locals, the argument can be passed directly. - return __ BitcastTaggedToWord(argument); -#else - // With indirect locals, the argument has to be stored on the stack and the - // slot address is passed. - Node* stack_slot = __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - stack_slot, 0, __ BitcastTaggedToWord(argument)); - return stack_slot; -#endif - -#undef __ -} - } // namespace fast_api_call } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index 5ad82d160d435e..c4be019c4ca811 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -1036,6 +1036,21 @@ TNode GraphAssembler::StackSlot(int size, int alignment, graph()->NewNode(machine()->StackSlot(size, alignment, is_tagged))); } +Node* GraphAssembler::AdaptLocalArgument(Node* argument) { +#ifdef V8_ENABLE_DIRECT_LOCAL + // With direct locals, the argument can be passed directly. + return BitcastTaggedToWord(argument); +#else + // With indirect locals, the argument has to be stored on the stack and the + // slot address is passed. + Node* stack_slot = StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true); + Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + stack_slot, 0, BitcastTaggedToWord(argument)); + return stack_slot; +#endif +} + Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset, Node* value) { return AddNode(graph()->NewNode(machine()->Store(rep), object, offset, value, diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index f7c8b77d55daa2..017b8acafa1183 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -403,6 +403,8 @@ class V8_EXPORT_PRIVATE GraphAssembler { TNode StackSlot(int size, int alignment, bool is_tagged = false); + Node* AdaptLocalArgument(Node* argument); + Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value); Node* Store(StoreRepresentation rep, Node* object, int offset, Node* value); Node* Load(MachineType type, Node* object, Node* offset); diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc index 3187640efcc1e4..e31c586757b855 100644 --- a/deps/v8/src/compiler/heap-refs.cc +++ b/deps/v8/src/compiler/heap-refs.cc @@ -406,6 +406,13 @@ class JSDataViewData : public JSObjectData { : JSObjectData(broker, storage, object, kind) {} }; +class JSPrimitiveWrapperData : public JSObjectData { + public: + JSPrimitiveWrapperData(JSHeapBroker* broker, ObjectData** storage, + Handle object, ObjectDataKind kind) + : JSObjectData(broker, storage, object, kind) {} +}; + class JSBoundFunctionData : public JSObjectData { public: JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage, @@ -1073,6 +1080,13 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle object, { UNREACHABLE(); } + + // Our type checking (essentially GetMapInstanceType) assumes that a heap + // object with itself as map must be a meta map and so must be a MAP_TYPE. + // However, this isn't necessarily true in case of heap memory corruption. + // This check defends against that. See b/326700497 for more details. + SBXCHECK_EQ(object_data->IsMap(), IsMap(*object)); + // At this point the entry pointer is not guaranteed to be valid as // the refs_ hash hable could be resized by one of the constructors above. DCHECK_EQ(object_data, refs_->Lookup(object.address())->value); @@ -1436,7 +1450,7 @@ Float64 FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(int i) const { return Float64::FromBits(object()->get_representation(i)); } -Handle BytecodeArrayRef::SourcePositionTable( +Handle BytecodeArrayRef::SourcePositionTable( JSHeapBroker* broker) const { return broker->CanonicalPersistentHandle(object()->SourcePositionTable()); } @@ -1590,11 +1604,16 @@ StringRef RegExpBoilerplateDescriptionRef::source(JSHeapBroker* broker) const { int RegExpBoilerplateDescriptionRef::flags() const { return object()->flags(); } -OptionalCallHandlerInfoRef FunctionTemplateInfoRef::call_code( +Address FunctionTemplateInfoRef::callback(JSHeapBroker* broker) const { + return object()->callback(broker->isolate()); +} + +OptionalObjectRef FunctionTemplateInfoRef::callback_data( JSHeapBroker* broker) const { - Tagged call_code = object()->call_code(kAcquireLoad); - if (i::IsUndefined(call_code)) return base::nullopt; - return TryMakeRef(broker, CallHandlerInfo::cast(call_code)); + ObjectRef data = + MakeRefAssumeMemoryFence(broker, object()->callback_data(kAcquireLoad)); + if (data.IsTheHole()) return {}; + return data; } bool FunctionTemplateInfoRef::is_signature_undefined( @@ -1640,10 +1659,6 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType( prototype.AsJSObject()); } -ObjectRef CallHandlerInfoRef::data(JSHeapBroker* broker) const { - return MakeRefAssumeMemoryFence(broker, object()->data()); -} - HEAP_ACCESSOR_C(ScopeInfo, int, ContextLength) HEAP_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot) HEAP_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo) @@ -1757,6 +1772,12 @@ void* JSTypedArrayRef::data_ptr() const { return object()->DataPtr(); } +bool JSPrimitiveWrapperRef::IsStringWrapper(JSHeapBroker* broker) const { + auto elements_kind = map(broker).elements_kind(); + return elements_kind == FAST_STRING_WRAPPER_ELEMENTS || + elements_kind == SLOW_STRING_WRAPPER_ELEMENTS; +} + bool MapRef::IsInobjectSlackTrackingInProgress() const { return construction_counter() != Map::kNoSlackTracking; } @@ -1787,10 +1808,6 @@ bool StringRef::IsExternalString() const { return i::IsExternalString(*object()); } -Address CallHandlerInfoRef::callback(JSHeapBroker* broker) const { - return object()->callback(broker->isolate()); -} - ZoneVector
FunctionTemplateInfoRef::c_functions( JSHeapBroker* broker) const { return GetCFunctions(FixedArray::cast(object()->GetCFunctionOverloads()), @@ -2145,13 +2162,14 @@ HeapObjectType HeapObjectRef::GetHeapObjectType(JSHeapBroker* broker) const { HeapObjectType::Flags flags(0); if (map->is_undetectable()) flags |= HeapObjectType::kUndetectable; if (map->is_callable()) flags |= HeapObjectType::kCallable; - return HeapObjectType(map->instance_type(), flags, + return HeapObjectType(map->instance_type(), map->elements_kind(), flags, GetOddballType(broker->isolate(), map), HoleType()); } HeapObjectType::Flags flags(0); if (map(broker).is_undetectable()) flags |= HeapObjectType::kUndetectable; if (map(broker).is_callable()) flags |= HeapObjectType::kCallable; - return HeapObjectType(map(broker).instance_type(), flags, + return HeapObjectType(map(broker).instance_type(), + map(broker).elements_kind(), flags, map(broker).oddball_type(broker), HoleType()); } @@ -2350,10 +2368,10 @@ OptionalMapRef JSObjectRef::GetObjectCreateMap(JSHeapBroker* broker) const { map_handle->prototype_info(kAcquireLoad)); if (!IsPrototypeInfo(*maybe_proto_info)) return {}; - MaybeObject maybe_object_create_map = + Tagged maybe_object_create_map = Handle::cast(maybe_proto_info) ->ObjectCreateMap(kAcquireLoad); - if (!maybe_object_create_map->IsWeak()) return {}; + if (!maybe_object_create_map.IsWeak()) return {}; return MapRef(broker->GetOrCreateData( maybe_object_create_map.GetHeapObjectAssumeWeak(), kAssumeMemoryFence)); diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h index 249178dff14439..4d2aba18a7f806 100644 --- a/deps/v8/src/compiler/heap-refs.h +++ b/deps/v8/src/compiler/heap-refs.h @@ -23,7 +23,6 @@ class CFunctionInfo; namespace internal { class BytecodeArray; -class CallHandlerInfo; class FixedDoubleArray; class FunctionTemplateInfo; class HeapNumber; @@ -103,6 +102,7 @@ enum class RefSerializationKind { BACKGROUND_SERIALIZED(JSGlobalObject) \ BACKGROUND_SERIALIZED(JSGlobalProxy) \ BACKGROUND_SERIALIZED(JSTypedArray) \ + BACKGROUND_SERIALIZED(JSPrimitiveWrapper) \ /* Subtypes of Context */ \ NEVER_SERIALIZED(NativeContext) \ /* Subtypes of FixedArray */ \ @@ -124,7 +124,6 @@ enum class RefSerializationKind { NEVER_SERIALIZED(ArrayBoilerplateDescription) \ BACKGROUND_SERIALIZED(BigInt) \ NEVER_SERIALIZED(BytecodeArray) \ - NEVER_SERIALIZED(CallHandlerInfo) \ NEVER_SERIALIZED(Cell) \ NEVER_SERIALIZED(Code) \ NEVER_SERIALIZED(Context) \ @@ -440,9 +439,10 @@ class HeapObjectType { using Flags = base::Flags; - HeapObjectType(InstanceType instance_type, Flags flags, - OddballType oddball_type, HoleType hole_type) + HeapObjectType(InstanceType instance_type, ElementsKind elements_kind, + Flags flags, OddballType oddball_type, HoleType hole_type) : instance_type_(instance_type), + elements_kind_(elements_kind), oddball_type_(oddball_type), hole_type_(hole_type), flags_(flags) { @@ -457,12 +457,14 @@ class HeapObjectType { HoleType hole_type(JSHeapBroker* broker) const { return hole_type_; } InstanceType instance_type() const { return instance_type_; } Flags flags() const { return flags_; } + ElementsKind elements_kind() const { return elements_kind_; } bool is_callable() const { return flags_ & kCallable; } bool is_undetectable() const { return flags_ & kUndetectable; } private: InstanceType const instance_type_; + ElementsKind const elements_kind_; OddballType const oddball_type_; HoleType const hole_type_; Flags const flags_; @@ -786,16 +788,6 @@ class FeedbackVectorRef : public HeapObjectRef { FeedbackCellRef GetClosureFeedbackCell(JSHeapBroker* broker, int index) const; }; -class CallHandlerInfoRef : public HeapObjectRef { - public: - DEFINE_REF_CONSTRUCTOR(CallHandlerInfo, HeapObjectRef) - - Handle object() const; - - Address callback(JSHeapBroker* broker) const; - ObjectRef data(JSHeapBroker* broker) const; -}; - class AccessorInfoRef : public HeapObjectRef { public: DEFINE_REF_CONSTRUCTOR(AccessorInfo, HeapObjectRef) @@ -919,7 +911,11 @@ class FunctionTemplateInfoRef : public HeapObjectRef { int16_t allowed_receiver_instance_type_range_start() const; int16_t allowed_receiver_instance_type_range_end() const; - OptionalCallHandlerInfoRef call_code(JSHeapBroker* broker) const; + // Function pointer and a data value that should be passed to the callback. + // The |callback_data| must be read before the |callback|. + Address callback(JSHeapBroker* broker) const; + OptionalObjectRef callback_data(JSHeapBroker* broker) const; + ZoneVector
c_functions(JSHeapBroker* broker) const; ZoneVector c_signatures(JSHeapBroker* broker) const; HolderLookupResult LookupHolderOfExpectedType(JSHeapBroker* broker, @@ -980,7 +976,7 @@ class BytecodeArrayRef : public HeapObjectRef { int parameter_count() const; interpreter::Register incoming_new_target_or_generator_register() const; - Handle SourcePositionTable(JSHeapBroker* broker) const; + Handle SourcePositionTable(JSHeapBroker* broker) const; // Exception handler table. Address handler_table_address() const; @@ -1144,6 +1140,15 @@ class JSTypedArrayRef : public JSObjectRef { HeapObjectRef buffer(JSHeapBroker* broker) const; }; +class JSPrimitiveWrapperRef : public JSObjectRef { + public: + DEFINE_REF_CONSTRUCTOR(JSPrimitiveWrapper, JSObjectRef) + + bool IsStringWrapper(JSHeapBroker* broker) const; + + Handle object() const; +}; + class SourceTextModuleRef : public HeapObjectRef { public: DEFINE_REF_CONSTRUCTOR(SourceTextModule, HeapObjectRef) diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc index 2d6424dd017e51..cfaee38ad55ac5 100644 --- a/deps/v8/src/compiler/int64-lowering.cc +++ b/deps/v8/src/compiler/int64-lowering.cc @@ -4,7 +4,6 @@ #include "src/compiler/int64-lowering.h" -#include "src/base/v8-fallthrough.h" #include "src/compiler/common-operator.h" #include "src/compiler/diamond.h" #include "src/compiler/graph.h" @@ -672,7 +671,7 @@ void Int64Lowering::LowerNode(Node* node) { } case IrOpcode::kWord64RolLowerable: DCHECK(machine()->Word32Rol().IsSupported()); - V8_FALLTHROUGH; + [[fallthrough]]; case IrOpcode::kWord64RorLowerable: { DCHECK_EQ(3, node->InputCount()); Node* input = node->InputAt(0); diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index 32b336b8286b3d..87b31d6dfc01bc 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -677,8 +677,6 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler { // [fast callee, receiver, ... C arguments, // call code, external constant for function, argc, call handler info data, // holder, receiver, ... JS arguments, context, new frame state] - CallHandlerInfoRef call_handler_info = - *function_template_info_.call_code(broker()); bool no_profiling = broker()->dependencies()->DependOnNoProfilingProtector(); Callable call_api_callback = Builtins::CallableFor( @@ -688,7 +686,7 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler { CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(graph()->zone(), cid, arity_ + kReceiver, CallDescriptor::kNeedsFrameState); - ApiFunction api_function(call_handler_info.callback(broker())); + ApiFunction api_function(function_template_info_.callback(broker())); ExternalReference function_reference = ExternalReference::Create( isolate(), &api_function, ExternalReference::DIRECT_API_CALL, function_template_info_.c_functions(broker()).data(), @@ -703,7 +701,8 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler { inputs[cursor++] = HeapConstant(call_api_callback.code()); inputs[cursor++] = ExternalConstant(function_reference); inputs[cursor++] = NumberConstant(arity_); - inputs[cursor++] = Constant(call_handler_info.data(broker())); + inputs[cursor++] = + Constant(function_template_info_.callback_data(broker()).value()); inputs[cursor++] = holder_; inputs[cursor++] = receiver_; for (int i = 0; i < arity_; ++i) { @@ -4030,7 +4029,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node, // TODO(turbofan): Consider introducing a JSCallApiCallback operator for // this and lower it during JSGenericLowering, and unify this with the // JSNativeContextSpecialization::InlineApiCall method a bit. - if (!function_template_info.call_code(broker()).has_value()) { + compiler::OptionalObjectRef maybe_callback_data = + function_template_info.callback_data(broker()); + if (!maybe_callback_data.has_value()) { TRACE_BROKER_MISSING(broker(), "call code for function template info " << function_template_info); return NoChange(); @@ -4056,8 +4057,6 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node, // Slow call - CallHandlerInfoRef call_handler_info = - *function_template_info.call_code(broker()); bool no_profiling = broker()->dependencies()->DependOnNoProfilingProtector(); Callable call_api_callback = Builtins::CallableFor( isolate(), no_profiling ? Builtin::kCallApiCallbackOptimizedNoProfiling @@ -4066,7 +4065,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node, auto call_descriptor = Linkage::GetStubCallDescriptor(graph()->zone(), cid, argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); - ApiFunction api_function(call_handler_info.callback(broker())); + ApiFunction api_function(function_template_info.callback(broker())); ExternalReference function_reference = ExternalReference::Create( &api_function, ExternalReference::DIRECT_API_CALL); @@ -4080,7 +4079,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node, node->InsertInput(graph()->zone(), 2, jsgraph()->ConstantNoHole(argc)); node->InsertInput( graph()->zone(), 3, - jsgraph()->ConstantNoHole(call_handler_info.data(broker()), broker())); + jsgraph()->ConstantNoHole(maybe_callback_data.value(), broker())); node->InsertInput(graph()->zone(), 4, holder); node->ReplaceInput(5, receiver); // Update receiver input. // 6 + argc is context input. diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc index bf2d70168e0dc8..e474fc55864128 100644 --- a/deps/v8/src/compiler/js-context-specialization.cc +++ b/deps/v8/src/compiler/js-context-specialization.cc @@ -254,7 +254,7 @@ Reduction JSContextSpecialization::ReduceJSStoreScriptContext(Node* node) { // The value is not a constant any more, so we don't need to generate // code for invalidating the side data. const Operator* op = - jsgraph_->javascript()->StoreContext(0, access.index()); + jsgraph_->javascript()->StoreContext(access.depth(), access.index()); NodeProperties::ChangeOp(node, op); return Changed(node); } diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc index 592c36f7c300d1..07b6e2cd02f97f 100644 --- a/deps/v8/src/compiler/js-create-lowering.cc +++ b/deps/v8/src/compiler/js-create-lowering.cc @@ -1787,7 +1787,7 @@ base::Optional JSCreateLowering::TryAllocateFastLiteral( builder.Allocate(sizeof(HeapNumber), allocation); builder.Store(AccessBuilder::ForMap(), broker()->heap_number_map()); builder.Store(AccessBuilder::ForHeapNumberValue(), - jsgraph()->ConstantNoHole(number)); + jsgraph()->ConstantMaybeHole(number)); value = effect = builder.Finish(); } else { // It's fine to store the 'uninitialized' marker into a Smi field since diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc index 4ec3ae5712303b..327de9188f914e 100644 --- a/deps/v8/src/compiler/js-graph.cc +++ b/deps/v8/src/compiler/js-graph.cc @@ -60,9 +60,9 @@ Node* JSGraph::ConstantMaybeHole(ObjectRef ref, JSHeapBroker* broker) { } Node* JSGraph::Constant(ObjectRef ref, JSHeapBroker* broker) { - if (ref.IsSmi()) return Constant(ref.AsSmi()); + if (ref.IsSmi()) return ConstantMaybeHole(ref.AsSmi()); if (ref.IsHeapNumber()) { - return Constant(ref.AsHeapNumber().value()); + return ConstantMaybeHole(ref.AsHeapNumber().value()); } switch (ref.AsHeapObject().GetHeapObjectType(broker).hole_type()) { @@ -112,11 +112,11 @@ Node* JSGraph::Constant(ObjectRef ref, JSHeapBroker* broker) { } Node* JSGraph::ConstantNoHole(double value) { - CHECK(value != (double)kHoleNanInt64); - return Constant(value); + CHECK_NE(base::bit_cast(value), kHoleNanInt64); + return ConstantMaybeHole(value); } -Node* JSGraph::Constant(double value) { +Node* JSGraph::ConstantMaybeHole(double value) { if (base::bit_cast(value) == base::bit_cast(0.0)) return ZeroConstant(); if (base::bit_cast(value) == base::bit_cast(1.0)) diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h index c3b7d8b764583c..da5f539537d5ee 100644 --- a/deps/v8/src/compiler/js-graph.h +++ b/deps/v8/src/compiler/js-graph.h @@ -74,8 +74,9 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph { Node* ConstantMaybeHole(ObjectRef ref, JSHeapBroker* broker); // Creates a NumberConstant node, usually canonicalized. - // Checks that we are not emitting a kHoleNanInt64, please use whenever you - // can. + Node* ConstantMaybeHole(double value); + // Same, but checks that we are not emitting a kHoleNanInt64, please use + // whenever you can. Node* ConstantNoHole(double value); // Creates a HeapConstant node for either true or false. @@ -86,7 +87,7 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph { Node* SmiConstant(int32_t immediate) { DCHECK(Smi::IsValid(immediate)); - return Constant(immediate); + return ConstantMaybeHole(immediate); } JSOperatorBuilder* javascript() const { return javascript_; } @@ -160,9 +161,6 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph { // Internal helper to canonicalize a number constant. Node* NumberConstant(double value); - // Internal helper that creates a NumberConstant node, usually canonicalized. - Node* Constant(double value); - // Internal helper that creates a Constant node of the appropriate type for // the given object. Inspect the (serialized) object and determine whether // one of the canonicalized globals or a number constant should be returned. diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc index 804e9a7ceb6a5e..5355f3d93084a8 100644 --- a/deps/v8/src/compiler/js-heap-broker.cc +++ b/deps/v8/src/compiler/js-heap-broker.cc @@ -512,7 +512,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess( if (!maybe_handler.is_null()) { Handle handler = Handle::cast(maybe_handler.object()); - if (!handler->accessor(kAcquireLoad)->IsCleared()) { + if (!handler->accessor(kAcquireLoad).IsCleared()) { FunctionTemplateInfoRef info = MakeRefAssumeMemoryFence( this, FunctionTemplateInfo::cast( handler->accessor(kAcquireLoad).GetHeapObject())); @@ -554,7 +554,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess( nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict); if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); if (nexus.ic_state() != InlineCacheState::MONOMORPHIC || - nexus.GetFeedback()->IsCleared()) { + nexus.GetFeedback().IsCleared()) { return *zone()->New(nexus.kind()); } @@ -684,7 +684,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall( OptionalHeapObjectRef target_ref; { - MaybeObject maybe_target = nexus.GetFeedback(); + Tagged maybe_target = nexus.GetFeedback(); Tagged target_object; if (maybe_target.GetHeapObject(&target_object)) { target_ref = TryMakeRef(this, target_object); diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index bc1f457ddf9171..2066b3f101e8b9 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -2785,13 +2785,13 @@ Node* JSNativeContextSpecialization::InlineApiCall( Node* receiver, Node* api_holder, Node* frame_state, Node* value, Node** effect, Node** control, FunctionTemplateInfoRef function_template_info) { - if (!function_template_info.call_code(broker()).has_value()) { + compiler::OptionalObjectRef maybe_callback_data = + function_template_info.callback_data(broker()); + if (!maybe_callback_data.has_value()) { TRACE_BROKER_MISSING(broker(), "call code for function template info " << function_template_info); return nullptr; } - CallHandlerInfoRef call_handler_info = - *function_template_info.call_code(broker()); // Only setters have a value. int const argc = value == nullptr ? 0 : 1; @@ -2808,9 +2808,8 @@ Node* JSNativeContextSpecialization::InlineApiCall( 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); - Node* data = - jsgraph()->ConstantNoHole(call_handler_info.data(broker()), broker()); - ApiFunction function(call_handler_info.callback(broker())); + Node* data = jsgraph()->ConstantNoHole(maybe_callback_data.value(), broker()); + ApiFunction function(function_template_info.callback(broker())); Node* function_reference = graph()->NewNode(common()->ExternalConstant(ExternalReference::Create( &function, ExternalReference::DIRECT_API_CALL))); @@ -3959,6 +3958,11 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore( // it unclear what the best approach is here. DCHECK_EQ(map.UnusedPropertyFields(), 0); int length = map.NextFreePropertyIndex() - map.GetInObjectProperties(); + // Under normal circumstances, NextFreePropertyIndex() will always be larger + // than GetInObjectProperties(). However, an attacker able to corrupt heap + // memory can break this invariant, in which case we'll get confused here, + // potentially causing a sandbox violation. This CHECK defends against that. + SBXCHECK_GE(length, 0); int new_length = length + JSObject::kFieldsAdded; // Collect the field values from the {properties}. ZoneVector values(zone()); diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc index 4e8b090f306803..fbf1a30a706244 100644 --- a/deps/v8/src/compiler/js-type-hint-lowering.cc +++ b/deps/v8/src/compiler/js-type-hint-lowering.cc @@ -35,6 +35,7 @@ bool BinaryOperationHintToNumberOperationHint( case BinaryOperationHint::kAny: case BinaryOperationHint::kNone: case BinaryOperationHint::kString: + case BinaryOperationHint::kStringOrStringWrapper: case BinaryOperationHint::kBigInt: case BinaryOperationHint::kBigInt64: break; @@ -52,6 +53,7 @@ bool BinaryOperationHintToBigIntOperationHint( case BinaryOperationHint::kAny: case BinaryOperationHint::kNone: case BinaryOperationHint::kString: + case BinaryOperationHint::kStringOrStringWrapper: return false; case BinaryOperationHint::kBigInt64: *bigint_hint = BigIntOperationHint::kBigInt64; diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc index 4725218df4d1ad..e8458b01048f41 100644 --- a/deps/v8/src/compiler/js-typed-lowering.cc +++ b/deps/v8/src/compiler/js-typed-lowering.cc @@ -17,7 +17,9 @@ #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" +#include "src/compiler/node.h" #include "src/compiler/operator-properties.h" +#include "src/compiler/simplified-operator.h" #include "src/compiler/type-cache.h" #include "src/compiler/types.h" #include "src/execution/protectors.h" @@ -245,6 +247,26 @@ class JSBinopReduction final { } } + // Checks that both inputs are String or string wrapper, and if we don't know + // statically that one side is already a String or a string wrapper, insert a + // CheckStringOrStringWrapper node. + void CheckInputsToStringOrStringWrapper() { + if (!left_type().Is(Type::StringOrStringWrapper())) { + Node* left_input = graph()->NewNode( + simplified()->CheckStringOrStringWrapper(FeedbackSource()), left(), + effect(), control()); + node_->ReplaceInput(0, left_input); + update_effect(left_input); + } + if (!right_type().Is(Type::StringOrStringWrapper())) { + Node* right_input = graph()->NewNode( + simplified()->CheckStringOrStringWrapper(FeedbackSource()), right(), + effect(), control()); + node_->ReplaceInput(1, right_input); + update_effect(right_input); + } + } + // Checks that both inputs are InternalizedString, and if we don't know // statically that one side is already an InternalizedString, insert a // CheckInternalizedString node. @@ -549,6 +571,108 @@ Reduction JSTypedLowering::ReduceJSNegate(Node* node) { return NoChange(); } +Reduction JSTypedLowering::GenerateStringAddition( + Node* node, Node* left, Node* right, Node* context, Node* frame_state, + Node** effect, Node** control, bool should_create_cons_string) { + // Compute the resulting length. + Node* left_length = graph()->NewNode(simplified()->StringLength(), left); + Node* right_length = graph()->NewNode(simplified()->StringLength(), right); + Node* length = + graph()->NewNode(simplified()->NumberAdd(), left_length, right_length); + + PropertyCellRef string_length_protector = + MakeRef(broker(), factory()->string_length_protector()); + string_length_protector.CacheAsProtector(broker()); + + if (string_length_protector.value(broker()).AsSmi() == + Protectors::kProtectorValid) { + // We can just deoptimize if the {length} is out-of-bounds. Besides + // generating a shorter code sequence than the version below, this + // has the additional benefit of not holding on to the lazy {frame_state} + // and thus potentially reduces the number of live ranges and allows for + // more truncations. + length = *effect = graph()->NewNode( + simplified()->CheckBounds(FeedbackSource()), length, + jsgraph()->ConstantNoHole(String::kMaxLength + 1), *effect, *control); + } else { + // Check if we would overflow the allowed maximum string length. + Node* check = + graph()->NewNode(simplified()->NumberLessThanOrEqual(), length, + jsgraph()->ConstantNoHole(String::kMaxLength)); + Node* branch = + graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control); + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* efalse = *effect; + { + // Throw a RangeError in case of overflow. + Node* vfalse = efalse = if_false = graph()->NewNode( + javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), + context, frame_state, efalse, if_false); + + // Update potential {IfException} uses of {node} to point to the + // %ThrowInvalidStringLength runtime call node instead. + Node* on_exception = nullptr; + if (NodeProperties::IsExceptionalCall(node, &on_exception)) { + NodeProperties::ReplaceControlInput(on_exception, vfalse); + NodeProperties::ReplaceEffectInput(on_exception, efalse); + if_false = graph()->NewNode(common()->IfSuccess(), vfalse); + Revisit(on_exception); + } + + // The above %ThrowInvalidStringLength runtime call is an unconditional + // throw, making it impossible to return a successful completion in this + // case. We simply connect the successful completion to the graph end. + if_false = graph()->NewNode(common()->Throw(), efalse, if_false); + MergeControlToEnd(graph(), common(), if_false); + } + *control = graph()->NewNode(common()->IfTrue(), branch); + length = *effect = + graph()->NewNode(common()->TypeGuard(type_cache_->kStringLengthType), + length, *effect, *control); + } + // TODO(bmeurer): Ideally this should always use StringConcat and decide to + // optimize to NewConsString later during SimplifiedLowering, but for that + // to work we need to know that it's safe to create a ConsString. + Operator const* const op = should_create_cons_string + ? simplified()->NewConsString() + : simplified()->StringConcat(); + Node* value = graph()->NewNode(op, length, left, right); + ReplaceWithValue(node, value, *effect, *control); + return Replace(value); +} + +Node* JSTypedLowering::UnwrapStringWrapper(Node* string_or_wrapper, + Node** effect, Node** control) { + Node* check = + graph()->NewNode(simplified()->ObjectIsString(), string_or_wrapper); + Node* branch = graph()->NewNode(common()->Branch(), check, *control); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* etrue = *effect; + Node* vtrue = string_or_wrapper; + + // We just checked that the value is a string. + vtrue = etrue = graph()->NewNode(common()->TypeGuard(Type::String()), vtrue, + etrue, if_true); + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* efalse = *effect; + + Node* vfalse = efalse = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSPrimitiveWrapperValue()), + string_or_wrapper, *effect, *control); + + // The value read from a string wrapper is a string. + vfalse = efalse = graph()->NewNode(common()->TypeGuard(Type::String()), + vfalse, efalse, if_false); + + *control = graph()->NewNode(common()->Merge(2), if_true, if_false); + *effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, *control); + + return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue, vfalse, *control); +} + Reduction JSTypedLowering::ReduceJSAdd(Node* node) { JSBinopReduction r(this, node); if (r.BothInputsAre(Type::Number())) { @@ -577,9 +701,21 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) { } } + PropertyCellRef to_primitive_protector = + MakeRef(broker(), factory()->string_wrapper_to_primitive_protector()); + to_primitive_protector.CacheAsProtector(broker()); + bool can_inline_string_wrapper_add = false; + // Always bake in String feedback into the graph. if (r.GetBinaryOperationHint(node) == BinaryOperationHint::kString) { r.CheckInputsToString(); + } else if (r.GetBinaryOperationHint(node) == + BinaryOperationHint::kStringOrStringWrapper) { + can_inline_string_wrapper_add = + dependencies()->DependOnProtector(to_primitive_protector); + if (can_inline_string_wrapper_add) { + r.CheckInputsToStringOrStringWrapper(); + } } // Strength-reduce concatenation of empty strings if both sides are @@ -603,81 +739,27 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) { } } + Node* context = NodeProperties::GetContextInput(node); + Node* frame_state = NodeProperties::GetFrameStateInput(node); + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + // Lower to string addition if both inputs are known to be strings. if (r.BothInputsAre(Type::String())) { - Node* context = NodeProperties::GetContextInput(node); - Node* frame_state = NodeProperties::GetFrameStateInput(node); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); - - // Compute the resulting length. - Node* left_length = - graph()->NewNode(simplified()->StringLength(), r.left()); - Node* right_length = - graph()->NewNode(simplified()->StringLength(), r.right()); - Node* length = - graph()->NewNode(simplified()->NumberAdd(), left_length, right_length); - - PropertyCellRef string_length_protector = - MakeRef(broker(), factory()->string_length_protector()); - string_length_protector.CacheAsProtector(broker()); - - if (string_length_protector.value(broker()).AsSmi() == - Protectors::kProtectorValid) { - // We can just deoptimize if the {length} is out-of-bounds. Besides - // generating a shorter code sequence than the version below, this - // has the additional benefit of not holding on to the lazy {frame_state} - // and thus potentially reduces the number of live ranges and allows for - // more truncations. - length = effect = graph()->NewNode( - simplified()->CheckBounds(FeedbackSource()), length, - jsgraph()->ConstantNoHole(String::kMaxLength + 1), effect, control); - } else { - // Check if we would overflow the allowed maximum string length. - Node* check = - graph()->NewNode(simplified()->NumberLessThanOrEqual(), length, - jsgraph()->ConstantNoHole(String::kMaxLength)); - Node* branch = - graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - Node* efalse = effect; - { - // Throw a RangeError in case of overflow. - Node* vfalse = efalse = if_false = graph()->NewNode( - javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), - context, frame_state, efalse, if_false); - - // Update potential {IfException} uses of {node} to point to the - // %ThrowInvalidStringLength runtime call node instead. - Node* on_exception = nullptr; - if (NodeProperties::IsExceptionalCall(node, &on_exception)) { - NodeProperties::ReplaceControlInput(on_exception, vfalse); - NodeProperties::ReplaceEffectInput(on_exception, efalse); - if_false = graph()->NewNode(common()->IfSuccess(), vfalse); - Revisit(on_exception); - } - - // The above %ThrowInvalidStringLength runtime call is an unconditional - // throw, making it impossible to return a successful completion in this - // case. We simply connect the successful completion to the graph end. - if_false = graph()->NewNode(common()->Throw(), efalse, if_false); - MergeControlToEnd(graph(), common(), if_false); - } - control = graph()->NewNode(common()->IfTrue(), branch); - length = effect = - graph()->NewNode(common()->TypeGuard(type_cache_->kStringLengthType), - length, effect, control); - } - - // TODO(bmeurer): Ideally this should always use StringConcat and decide to - // optimize to NewConsString later during SimplifiedLowering, but for that - // to work we need to know that it's safe to create a ConsString. - Operator const* const op = r.ShouldCreateConsString() - ? simplified()->NewConsString() - : simplified()->StringConcat(); - Node* value = graph()->NewNode(op, length, r.left(), r.right()); - ReplaceWithValue(node, value, effect, control); - return Replace(value); + return GenerateStringAddition(node, r.left(), r.right(), context, + frame_state, &effect, &control, + r.ShouldCreateConsString()); + } else if (r.BothInputsAre(Type::StringOrStringWrapper()) && + can_inline_string_wrapper_add) { + // If the left hand side is a string wrapper, unwrap it. + Node* left_string = UnwrapStringWrapper(r.left(), &effect, &control); + + // If the right hand side is a string wrapper, unwrap it. + Node* right_string = UnwrapStringWrapper(r.right(), &effect, &control); + + // Generate the string addition. + return GenerateStringAddition(node, left_string, right_string, context, + frame_state, &effect, &control, false); } // We never get here when we had String feedback. diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h index c5504d47c02a9e..684ebb41ce4812 100644 --- a/deps/v8/src/compiler/js-typed-lowering.h +++ b/deps/v8/src/compiler/js-typed-lowering.h @@ -90,6 +90,14 @@ class V8_EXPORT_PRIVATE JSTypedLowering final // Helper for ReduceJSLoadModule and ReduceJSStoreModule. Node* BuildGetModuleCell(Node* node); + // Helpers for ReduceJSAdd. + Reduction GenerateStringAddition(Node* node, Node* left, Node* right, + Node* context, Node* frame_state, + Node** effect, Node** control, + bool should_create_cons_string); + Node* UnwrapStringWrapper(Node* string_or_wrapper, Node** effect, + Node** control); + Factory* factory() const; Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc index 4fb338f5c4bad2..85f3fe09ad70fe 100644 --- a/deps/v8/src/compiler/loop-analysis.cc +++ b/deps/v8/src/compiler/loop-analysis.cc @@ -4,7 +4,6 @@ #include "src/compiler/loop-analysis.h" -#include "src/base/v8-fallthrough.h" #include "src/codegen/tick-counter.h" #include "src/compiler/all-nodes.h" #include "src/compiler/common-operator.h" @@ -660,7 +659,7 @@ ZoneUnorderedSet* LoopFinder::FindSmallInnermostLoopFromHeader( // Rationale for PrepareForGetCodeunit: this internal operation is // specifically designed for being hoisted out of loops. has_instruction_worth_peeling = true; - V8_FALLTHROUGH; + [[fallthrough]]; default: ENQUEUE_USES(use, true) break; diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc index 80307fac39e0d4..64e5d542f414b7 100644 --- a/deps/v8/src/compiler/machine-graph-verifier.cc +++ b/deps/v8/src/compiler/machine-graph-verifier.cc @@ -4,7 +4,6 @@ #include "src/compiler/machine-graph-verifier.h" -#include "src/base/v8-fallthrough.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" #include "src/compiler/linkage.h" @@ -284,6 +283,7 @@ class MachineRepresentationInferrer { break; case IrOpcode::kRoundInt64ToFloat64: case IrOpcode::kRoundUint64ToFloat64: + case IrOpcode::kBitcastInt64ToFloat64: case IrOpcode::kChangeFloat32ToFloat64: case IrOpcode::kChangeInt32ToFloat64: case IrOpcode::kChangeUint32ToFloat64: @@ -361,6 +361,7 @@ class MachineRepresentationChecker { case IrOpcode::kRoundInt64ToFloat32: case IrOpcode::kRoundUint64ToFloat32: case IrOpcode::kTruncateInt64ToInt32: + case IrOpcode::kBitcastInt64ToFloat64: case IrOpcode::kWord64Ctz: case IrOpcode::kWord64Clz: case IrOpcode::kWord64Popcnt: @@ -526,6 +527,12 @@ class MachineRepresentationChecker { CheckValueInputForFloat64Op(node, 0); CheckValueInputForInt32Op(node, 1); break; + case IrOpcode::kInt32PairAdd: + case IrOpcode::kInt32PairSub: + for (int j = 0; j < node->op()->ValueInputCount(); ++j) { + CheckValueInputForInt32Op(node, j); + } + break; case IrOpcode::kParameter: case IrOpcode::kProjection: break; @@ -551,7 +558,7 @@ class MachineRepresentationChecker { case IrOpcode::kWord32AtomicPairExchange: CheckValueInputRepresentationIs(node, 3, MachineRepresentation::kWord32); - V8_FALLTHROUGH; + [[fallthrough]]; case IrOpcode::kStore: case IrOpcode::kStoreIndirectPointer: case IrOpcode::kUnalignedStore: @@ -594,12 +601,40 @@ class MachineRepresentationChecker { node, 2, inferrer_->GetRepresentation(node)); } break; + case IrOpcode::kStorePair: { + CheckValueInputIsTaggedOrPointer(node, 0); + CheckValueInputRepresentationIs( + node, 1, MachineType::PointerRepresentation()); + auto CheckInput = [&](MachineRepresentation rep, int input) { + switch (rep) { + case MachineRepresentation::kTagged: + case MachineRepresentation::kTaggedPointer: + case MachineRepresentation::kTaggedSigned: + case MachineRepresentation::kIndirectPointer: + if (COMPRESS_POINTERS_BOOL) { + CheckValueInputIsCompressedOrTagged(node, input); + } else { + CheckValueInputIsTagged(node, input); + } + break; + default: + CheckValueInputRepresentationIs(node, input, rep); + } + }; + auto rep = StorePairRepresentationOf(node->op()); + CHECK_GE(ElementSizeLog2Of(rep.first.representation()), 2); + CHECK_EQ(ElementSizeLog2Of(rep.first.representation()), + ElementSizeLog2Of(rep.second.representation())); + CheckInput(rep.first.representation(), 2); + CheckInput(rep.second.representation(), 3); + break; + } case IrOpcode::kWord32AtomicPairCompareExchange: CheckValueInputRepresentationIs(node, 4, MachineRepresentation::kWord32); CheckValueInputRepresentationIs(node, 5, MachineRepresentation::kWord32); - V8_FALLTHROUGH; + [[fallthrough]]; case IrOpcode::kWord32AtomicCompareExchange: case IrOpcode::kWord64AtomicCompareExchange: CheckValueInputIsTaggedOrPointer(node, 0); diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc index 15b28eae069b01..451133d08e60ca 100644 --- a/deps/v8/src/compiler/machine-operator-reducer.cc +++ b/deps/v8/src/compiler/machine-operator-reducer.cc @@ -2324,15 +2324,17 @@ struct BitfieldCheck { base::Optional TryCombine(const BitfieldCheck& other) { if (source != other.source || - truncate_from_64_bit != other.truncate_from_64_bit) + truncate_from_64_bit != other.truncate_from_64_bit) { return {}; + } uint32_t overlapping_bits = mask & other.mask; // It would be kind of strange to have any overlapping bits, but they can be // allowed as long as they don't require opposite values in the same // positions. if ((masked_value & overlapping_bits) != - (other.masked_value & overlapping_bits)) + (other.masked_value & overlapping_bits)) { return {}; + } return BitfieldCheck{source, mask | other.mask, masked_value | other.masked_value, truncate_from_64_bit}; diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h index 5d267856be52e0..17f2f1f3979534 100644 --- a/deps/v8/src/compiler/machine-operator.h +++ b/deps/v8/src/compiler/machine-operator.h @@ -613,8 +613,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final // Note, that it's illegal to "look" at the pointer bits of non-smi values. const Operator* BitcastTaggedToWordForTagAndSmiBits(); - // This operator reinterprets the bits of a tagged MaybeObject pointer as - // word. + // This operator reinterprets the bits of a tagged Tagged pointer + // as word. const Operator* BitcastMaybeObjectToWord(); // This operator reinterprets the bits of a word as tagged pointer. diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc index 0814955872255d..64426a0e60c823 100644 --- a/deps/v8/src/compiler/node-properties.cc +++ b/deps/v8/src/compiler/node-properties.cc @@ -341,6 +341,8 @@ MachineRepresentation NodeProperties::GetProjectionType( auto call_descriptor = CallDescriptorOf(input->op()); return call_descriptor->GetReturnType(index).representation(); } + case IrOpcode::kInt32PairAdd: + case IrOpcode::kInt32PairSub: case IrOpcode::kWord32AtomicPairLoad: case IrOpcode::kWord32AtomicPairAdd: case IrOpcode::kWord32AtomicPairSub: diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h index c4e1fe51daf7f9..e7a154bb055a5f 100644 --- a/deps/v8/src/compiler/opcodes.h +++ b/deps/v8/src/compiler/opcodes.h @@ -451,6 +451,7 @@ V(CheckReceiverOrNullOrUndefined) \ V(CheckSmi) \ V(CheckString) \ + V(CheckStringOrStringWrapper) \ V(CheckSymbol) \ V(CheckTurboshaftTypeOf) \ V(CompareMaps) \ diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc index c73575b8b50e08..11d83ba7a052f8 100644 --- a/deps/v8/src/compiler/operation-typer.cc +++ b/deps/v8/src/compiler/operation-typer.cc @@ -253,7 +253,7 @@ Type OperationTyper::ConvertReceiver(Type type) { // ConvertReceiver maps null and undefined to the JSGlobalProxy of the // target function, and all other primitives are wrapped into a // JSPrimitiveWrapper. - type = Type::Union(type, Type::OtherObject(), zone()); + type = Type::Union(type, Type::StringWrapperOrOtherObject(), zone()); } return type; } diff --git a/deps/v8/src/compiler/pipeline-data-inl.h b/deps/v8/src/compiler/pipeline-data-inl.h new file mode 100644 index 00000000000000..75b747ee2bb985 --- /dev/null +++ b/deps/v8/src/compiler/pipeline-data-inl.h @@ -0,0 +1,607 @@ + +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_PIPELINE_DATA_INL_H_ +#define V8_COMPILER_PIPELINE_DATA_INL_H_ + +#include "src/builtins/profile-data-reader.h" +#include "src/codegen/assembler.h" +#include "src/codegen/optimized-compilation-info.h" +#include "src/compiler/backend/code-generator.h" +#include "src/compiler/backend/instruction-selector.h" +#include "src/compiler/backend/instruction.h" +#include "src/compiler/backend/register-allocator.h" +#include "src/compiler/compilation-dependencies.h" +#include "src/compiler/compiler-source-position-table.h" +#include "src/compiler/js-context-specialization.h" +#include "src/compiler/js-heap-broker.h" +#include "src/compiler/machine-graph.h" +#include "src/compiler/node-observer.h" +#include "src/compiler/node-origin-table.h" +#include "src/compiler/pipeline-statistics.h" +#include "src/compiler/schedule.h" +#include "src/compiler/turboshaft/phase.h" +#include "src/compiler/typer.h" +#include "src/compiler/zone-stats.h" +#include "src/execution/isolate.h" +#include "src/handles/handles-inl.h" +#include "src/objects/objects-inl.h" + +#if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/wasm-engine.h" +#endif + +namespace v8::internal::compiler { + +static constexpr char kCodegenZoneName[] = "codegen-zone"; +static constexpr char kGraphZoneName[] = "graph-zone"; +static constexpr char kInstructionZoneName[] = "instruction-zone"; +static constexpr char kRegisterAllocationZoneName[] = + "register-allocation-zone"; + +inline Maybe GetModuleContext(OptimizedCompilationInfo* info) { + Tagged current = info->closure()->context(); + size_t distance = 0; + while (!IsNativeContext(*current)) { + if (current->IsModuleContext()) { + return Just(OuterContext( + info->CanonicalHandle(current, current->GetIsolate()), distance)); + } + current = current->previous(); + distance++; + } + return Nothing(); +} + +class PipelineData { + public: + // For main entry point. + PipelineData(ZoneStats* zone_stats, Isolate* isolate, + OptimizedCompilationInfo* info, + TurbofanPipelineStatistics* pipeline_statistics) + : isolate_(isolate), + allocator_(isolate->allocator()), + info_(info), + debug_name_(info_->GetDebugName()), + may_have_unverifiable_graph_(v8_flags.turboshaft), + zone_stats_(zone_stats), + pipeline_statistics_(pipeline_statistics), + graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), + graph_zone_(graph_zone_scope_.zone()), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), + instruction_zone_(instruction_zone_scope_.zone()), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), + codegen_zone_(codegen_zone_scope_.zone()), + broker_(new JSHeapBroker(isolate_, info_->zone(), + info_->trace_heap_broker(), + info->code_kind())), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), + register_allocation_zone_(register_allocation_zone_scope_.zone()), + assembler_options_(AssemblerOptions::Default(isolate)) { + PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData"); + graph_ = graph_zone_->New(graph_zone_); + source_positions_ = graph_zone_->New(graph_); + node_origins_ = info->trace_turbo_json() + ? graph_zone_->New(graph_) + : nullptr; + simplified_ = graph_zone_->New(graph_zone_); + machine_ = graph_zone_->New( + graph_zone_, MachineType::PointerRepresentation(), + InstructionSelector::SupportedMachineOperatorFlags(), + InstructionSelector::AlignmentRequirements()); + common_ = graph_zone_->New(graph_zone_); + javascript_ = graph_zone_->New(graph_zone_); + jsgraph_ = graph_zone_->New(isolate_, graph_, common_, javascript_, + simplified_, machine_); + observe_node_manager_ = + info->node_observer() + ? graph_zone_->New(graph_zone_) + : nullptr; + dependencies_ = + info_->zone()->New(broker_, info_->zone()); + } + +#if V8_ENABLE_WEBASSEMBLY + // For WebAssembly compile entry point. + PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine, + OptimizedCompilationInfo* info, MachineGraph* mcgraph, + TurbofanPipelineStatistics* pipeline_statistics, + SourcePositionTable* source_positions, + NodeOriginTable* node_origins, + const AssemblerOptions& assembler_options) + : isolate_(nullptr), + wasm_engine_(wasm_engine), + allocator_(wasm_engine->allocator()), + info_(info), + debug_name_(info_->GetDebugName()), + may_have_unverifiable_graph_(v8_flags.turboshaft_wasm), + zone_stats_(zone_stats), + pipeline_statistics_(pipeline_statistics), + graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), + graph_zone_(graph_zone_scope_.zone()), + graph_(mcgraph->graph()), + source_positions_(source_positions), + node_origins_(node_origins), + machine_(mcgraph->machine()), + common_(mcgraph->common()), + mcgraph_(mcgraph), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), + instruction_zone_(instruction_zone_scope_.zone()), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), + codegen_zone_(codegen_zone_scope_.zone()), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), + register_allocation_zone_(register_allocation_zone_scope_.zone()), + assembler_options_(assembler_options) { + simplified_ = graph_zone_->New(graph_zone_); + javascript_ = graph_zone_->New(graph_zone_); + jsgraph_ = graph_zone_->New(isolate_, graph_, common_, javascript_, + simplified_, machine_); + } +#endif // V8_ENABLE_WEBASSEMBLY + + // For CodeStubAssembler and machine graph testing entry point. + PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info, + Isolate* isolate, AccountingAllocator* allocator, Graph* graph, + JSGraph* jsgraph, Schedule* schedule, + SourcePositionTable* source_positions, + NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt, + const AssemblerOptions& assembler_options, + const ProfileDataFromFile* profile_data) + : isolate_(isolate), +#if V8_ENABLE_WEBASSEMBLY + // TODO(clemensb): Remove this field, use GetWasmEngine directly + // instead. + wasm_engine_(wasm::GetWasmEngine()), +#endif // V8_ENABLE_WEBASSEMBLY + allocator_(allocator), + info_(info), + debug_name_(info_->GetDebugName()), + zone_stats_(zone_stats), + graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), + graph_zone_(graph_zone_scope_.zone()), + graph_(graph), + source_positions_(source_positions), + node_origins_(node_origins), + schedule_(schedule), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), + instruction_zone_(instruction_zone_scope_.zone()), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), + codegen_zone_(codegen_zone_scope_.zone()), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), + register_allocation_zone_(register_allocation_zone_scope_.zone()), + jump_optimization_info_(jump_opt), + assembler_options_(assembler_options), + profile_data_(profile_data) { + if (jsgraph) { + jsgraph_ = jsgraph; + simplified_ = jsgraph->simplified(); + machine_ = jsgraph->machine(); + common_ = jsgraph->common(); + javascript_ = jsgraph->javascript(); + } else if (graph_) { + simplified_ = graph_zone_->New(graph_zone_); + machine_ = graph_zone_->New( + graph_zone_, MachineType::PointerRepresentation(), + InstructionSelector::SupportedMachineOperatorFlags(), + InstructionSelector::AlignmentRequirements()); + common_ = graph_zone_->New(graph_zone_); + javascript_ = graph_zone_->New(graph_zone_); + jsgraph_ = graph_zone_->New(isolate_, graph_, common_, + javascript_, simplified_, machine_); + } + } + + // For register allocation testing entry point. + PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info, + Isolate* isolate, InstructionSequence* sequence) + : isolate_(isolate), + allocator_(isolate->allocator()), + info_(info), + debug_name_(info_->GetDebugName()), + zone_stats_(zone_stats), + graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), + instruction_zone_(sequence->zone()), + sequence_(sequence), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), + codegen_zone_(codegen_zone_scope_.zone()), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), + register_allocation_zone_(register_allocation_zone_scope_.zone()), + assembler_options_(AssemblerOptions::Default(isolate)) {} + + ~PipelineData() { + // Must happen before zones are destroyed. + delete code_generator_; + code_generator_ = nullptr; + DeleteTyper(); + DeleteRegisterAllocationZone(); + DeleteInstructionZone(); + DeleteCodegenZone(); + DeleteGraphZone(); + } + + PipelineData(const PipelineData&) = delete; + PipelineData& operator=(const PipelineData&) = delete; + + Isolate* isolate() const { return isolate_; } + AccountingAllocator* allocator() const { return allocator_; } + OptimizedCompilationInfo* info() const { return info_; } + ZoneStats* zone_stats() const { return zone_stats_; } + CompilationDependencies* dependencies() const { return dependencies_; } + TurbofanPipelineStatistics* pipeline_statistics() { + return pipeline_statistics_; + } + OsrHelper* osr_helper() { return &(*osr_helper_); } + + bool verify_graph() const { return verify_graph_; } + void set_verify_graph(bool value) { verify_graph_ = value; } + + MaybeHandle code() { return code_; } + void set_code(MaybeHandle code) { + DCHECK(code_.is_null()); + code_ = code; + } + + CodeGenerator* code_generator() const { return code_generator_; } + + // RawMachineAssembler generally produces graphs which cannot be verified. + bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; } + + Zone* graph_zone() const { return graph_zone_; } + Graph* graph() const { return graph_; } + void set_graph(Graph* graph) { graph_ = graph; } + turboshaft::PipelineData& GetTurboshaftPipelineData( + turboshaft::TurboshaftPipelineKind kind) { + if (!ts_data_.has_value()) { + ts_data_.emplace(kind, info_, schedule_, graph_zone_, info_->zone(), + broker_, isolate_, source_positions_, node_origins_, + sequence_, frame_, assembler_options_, + &max_unoptimized_frame_height_, + &max_pushed_argument_count_, instruction_zone_); + } + return ts_data_.value(); + } + SourcePositionTable* source_positions() const { return source_positions_; } + NodeOriginTable* node_origins() const { return node_origins_; } + MachineOperatorBuilder* machine() const { return machine_; } + SimplifiedOperatorBuilder* simplified() const { return simplified_; } + CommonOperatorBuilder* common() const { return common_; } + JSOperatorBuilder* javascript() const { return javascript_; } + JSGraph* jsgraph() const { return jsgraph_; } + MachineGraph* mcgraph() const { return mcgraph_; } + Handle native_context() const { + return handle(info()->native_context(), isolate()); + } + Handle global_object() const { + return handle(info()->global_object(), isolate()); + } + + JSHeapBroker* broker() const { return broker_; } + std::unique_ptr ReleaseBroker() { + std::unique_ptr broker(broker_); + broker_ = nullptr; + return broker; + } + + Schedule* schedule() const { return schedule_; } + void set_schedule(Schedule* schedule) { + DCHECK(!schedule_); + schedule_ = schedule; + } + void reset_schedule() { schedule_ = nullptr; } + + ObserveNodeManager* observe_node_manager() const { + return observe_node_manager_; + } + + Zone* instruction_zone() const { return instruction_zone_; } + Zone* codegen_zone() const { return codegen_zone_; } + InstructionSequence* sequence() const { return sequence_; } + Frame* frame() const { return frame_; } + + Zone* register_allocation_zone() const { return register_allocation_zone_; } + + RegisterAllocationData* register_allocation_data() const { + return register_allocation_data_; + } + + std::string const& source_position_output() const { + return source_position_output_; + } + void set_source_position_output(std::string const& source_position_output) { + source_position_output_ = source_position_output; + } + + JumpOptimizationInfo* jump_optimization_info() const { + return jump_optimization_info_; + } + + const AssemblerOptions& assembler_options() const { + return assembler_options_; + } + + void ChooseSpecializationContext() { + if (info()->function_context_specializing()) { + DCHECK(info()->has_context()); + specialization_context_ = Just(OuterContext( + info()->CanonicalHandle(info()->context(), isolate()), 0)); + } else { + specialization_context_ = GetModuleContext(info()); + } + } + + Maybe specialization_context() const { + return specialization_context_; + } + + size_t* address_of_max_unoptimized_frame_height() { + return &max_unoptimized_frame_height_; + } + size_t max_unoptimized_frame_height() const { + return max_unoptimized_frame_height_; + } + size_t* address_of_max_pushed_argument_count() { + return &max_pushed_argument_count_; + } + size_t max_pushed_argument_count() const { + return max_pushed_argument_count_; + } + + CodeTracer* GetCodeTracer() const { +#if V8_ENABLE_WEBASSEMBLY + if (wasm_engine_) return wasm_engine_->GetCodeTracer(); +#endif // V8_ENABLE_WEBASSEMBLY + return isolate_->GetCodeTracer(); + } + + Typer* CreateTyper() { + DCHECK_NULL(typer_); + typer_ = + new Typer(broker(), typer_flags_, graph(), &info()->tick_counter()); + return typer_; + } + + void AddTyperFlag(Typer::Flag flag) { + DCHECK_NULL(typer_); + typer_flags_ |= flag; + } + + void DeleteTyper() { + delete typer_; + typer_ = nullptr; + } + + void DeleteGraphZone() { + if (graph_zone_ == nullptr) return; + graph_zone_ = nullptr; + graph_ = nullptr; + source_positions_ = nullptr; + node_origins_ = nullptr; + simplified_ = nullptr; + machine_ = nullptr; + common_ = nullptr; + javascript_ = nullptr; + jsgraph_ = nullptr; + mcgraph_ = nullptr; + schedule_ = nullptr; + graph_zone_scope_.Destroy(); + } + + void DeleteInstructionZone() { + if (instruction_zone_ == nullptr) return; + instruction_zone_scope_.Destroy(); + instruction_zone_ = nullptr; + sequence_ = nullptr; + } + + void DeleteCodegenZone() { + if (codegen_zone_ == nullptr) return; + codegen_zone_scope_.Destroy(); + codegen_zone_ = nullptr; + dependencies_ = nullptr; + delete broker_; + broker_ = nullptr; + frame_ = nullptr; + } + + void DeleteRegisterAllocationZone() { + if (register_allocation_zone_ == nullptr) return; + register_allocation_zone_scope_.Destroy(); + register_allocation_zone_ = nullptr; + register_allocation_data_ = nullptr; + } + + void InitializeInstructionSequence(const CallDescriptor* call_descriptor) { + DCHECK_NULL(sequence_); + InstructionBlocks* instruction_blocks = + InstructionSequence::InstructionBlocksFor(instruction_zone(), + schedule()); + sequence_ = instruction_zone()->New( + isolate(), instruction_zone(), instruction_blocks); + if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) { + sequence_->instruction_blocks()[0]->mark_needs_frame(); + } else { + DCHECK(call_descriptor->CalleeSavedFPRegisters().is_empty()); + } + } + + void InitializeFrameData(CallDescriptor* call_descriptor) { + DCHECK_NULL(frame_); + int fixed_frame_size = 0; + if (call_descriptor != nullptr) { + fixed_frame_size = + call_descriptor->CalculateFixedFrameSize(info()->code_kind()); + } + frame_ = codegen_zone()->New(fixed_frame_size, codegen_zone()); + if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame()); + } + + void InitializeRegisterAllocationData(const RegisterConfiguration* config, + CallDescriptor* call_descriptor) { + DCHECK_NULL(register_allocation_data_); + register_allocation_data_ = + register_allocation_zone()->New( + config, register_allocation_zone(), frame(), sequence(), + &info()->tick_counter(), debug_name()); + } + + void InitializeOsrHelper() { + DCHECK(!osr_helper_.has_value()); + osr_helper_.emplace(info()); + } + + void set_start_source_position(int position) { + DCHECK_EQ(start_source_position_, kNoSourcePosition); + start_source_position_ = position; + } + + void InitializeCodeGenerator(Linkage* linkage) { + DCHECK_NULL(code_generator_); +#if V8_ENABLE_WEBASSEMBLY + assembler_options_.is_wasm = + this->info()->IsWasm() || this->info()->IsWasmBuiltin(); +#endif + code_generator_ = new CodeGenerator( + codegen_zone(), frame(), linkage, sequence(), info(), isolate(), + osr_helper_, start_source_position_, jump_optimization_info_, + assembler_options(), info_->builtin(), max_unoptimized_frame_height(), + max_pushed_argument_count(), + v8_flags.trace_turbo_stack_accesses ? debug_name_.get() : nullptr); + } + + void BeginPhaseKind(const char* phase_kind_name) { + if (pipeline_statistics() != nullptr) { + pipeline_statistics()->BeginPhaseKind(phase_kind_name); + } + } + + void EndPhaseKind() { + if (pipeline_statistics() != nullptr) { + pipeline_statistics()->EndPhaseKind(); + } + } + + const char* debug_name() const { return debug_name_.get(); } + + const ProfileDataFromFile* profile_data() const { return profile_data_; } + void set_profile_data(const ProfileDataFromFile* profile_data) { + profile_data_ = profile_data; + } + + // RuntimeCallStats that is only available during job execution but not + // finalization. + // TODO(delphick): Currently even during execution this can be nullptr, due to + // JSToWasmWrapperCompilationUnit::Execute. Once a table can be extracted + // there, this method can DCHECK that it is never nullptr. + RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; } + void set_runtime_call_stats(RuntimeCallStats* stats) { + runtime_call_stats_ = stats; + } + + // Used to skip the "wasm-inlining" phase when there are no JS-to-Wasm calls. + bool has_js_wasm_calls() const { return has_js_wasm_calls_; } + void set_has_js_wasm_calls(bool has_js_wasm_calls) { + has_js_wasm_calls_ = has_js_wasm_calls; + } + +#if V8_ENABLE_WEBASSEMBLY + const wasm::WasmModule* wasm_module_for_inlining() const { + return wasm_module_for_inlining_; + } + void set_wasm_module_for_inlining(const wasm::WasmModule* module) { + wasm_module_for_inlining_ = module; + } +#endif + + private: + Isolate* const isolate_; +#if V8_ENABLE_WEBASSEMBLY + wasm::WasmEngine* const wasm_engine_ = nullptr; + // The wasm module to be used for inlining wasm functions into JS. + // The first module wins and inlining of different modules into the same + // JS function is not supported. This is necessary because the wasm + // instructions use module-specific (non-canonicalized) type indices. + const wasm::WasmModule* wasm_module_for_inlining_ = nullptr; +#endif // V8_ENABLE_WEBASSEMBLY + AccountingAllocator* const allocator_; + OptimizedCompilationInfo* const info_; + std::unique_ptr debug_name_; + bool may_have_unverifiable_graph_ = true; + ZoneStats* const zone_stats_; + TurbofanPipelineStatistics* pipeline_statistics_ = nullptr; + bool verify_graph_ = false; + int start_source_position_ = kNoSourcePosition; + base::Optional osr_helper_; + MaybeHandle code_; + CodeGenerator* code_generator_ = nullptr; + Typer* typer_ = nullptr; + Typer::Flags typer_flags_ = Typer::kNoFlags; + + // All objects in the following group of fields are allocated in graph_zone_. + // They are all set to nullptr when the graph_zone_ is destroyed. + ZoneStats::Scope graph_zone_scope_; + Zone* graph_zone_ = nullptr; + Graph* graph_ = nullptr; + SourcePositionTable* source_positions_ = nullptr; + NodeOriginTable* node_origins_ = nullptr; + SimplifiedOperatorBuilder* simplified_ = nullptr; + MachineOperatorBuilder* machine_ = nullptr; + CommonOperatorBuilder* common_ = nullptr; + JSOperatorBuilder* javascript_ = nullptr; + JSGraph* jsgraph_ = nullptr; + MachineGraph* mcgraph_ = nullptr; + Schedule* schedule_ = nullptr; + ObserveNodeManager* observe_node_manager_ = nullptr; + base::Optional ts_data_; + + // All objects in the following group of fields are allocated in + // instruction_zone_. They are all set to nullptr when the instruction_zone_ + // is destroyed. + ZoneStats::Scope instruction_zone_scope_; + Zone* instruction_zone_; + InstructionSequence* sequence_ = nullptr; + + // All objects in the following group of fields are allocated in + // codegen_zone_. They are all set to nullptr when the codegen_zone_ + // is destroyed. + ZoneStats::Scope codegen_zone_scope_; + Zone* codegen_zone_; + CompilationDependencies* dependencies_ = nullptr; + JSHeapBroker* broker_ = nullptr; + Frame* frame_ = nullptr; + + // All objects in the following group of fields are allocated in + // register_allocation_zone_. They are all set to nullptr when the zone is + // destroyed. + ZoneStats::Scope register_allocation_zone_scope_; + Zone* register_allocation_zone_; + RegisterAllocationData* register_allocation_data_ = nullptr; + + // Source position output for --trace-turbo. + std::string source_position_output_; + + JumpOptimizationInfo* jump_optimization_info_ = nullptr; + AssemblerOptions assembler_options_; + Maybe specialization_context_ = Nothing(); + + // The maximal combined height of all inlined frames in their unoptimized + // state, and the maximal number of arguments pushed during function calls. + // Calculated during instruction selection, applied during code generation. + size_t max_unoptimized_frame_height_ = 0; + size_t max_pushed_argument_count_ = 0; + + RuntimeCallStats* runtime_call_stats_ = nullptr; + const ProfileDataFromFile* profile_data_ = nullptr; + + bool has_js_wasm_calls_ = false; +}; + +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_PIPELINE_DATA_INL_H_ diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index 3ea64d117865b7..0181588337df73 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -72,6 +72,7 @@ #include "src/compiler/osr.h" #include "src/compiler/pair-load-store-reducer.h" #include "src/compiler/phase.h" +#include "src/compiler/pipeline-data-inl.h" #include "src/compiler/pipeline-statistics.h" #include "src/compiler/redundancy-elimination.h" #include "src/compiler/schedule.h" @@ -155,584 +156,13 @@ namespace v8 { namespace internal { namespace compiler { -static constexpr char kCodegenZoneName[] = "codegen-zone"; -static constexpr char kGraphZoneName[] = "graph-zone"; -static constexpr char kInstructionZoneName[] = "instruction-zone"; static constexpr char kMachineGraphVerifierZoneName[] = "machine-graph-verifier-zone"; static constexpr char kPipelineCompilationJobZoneName[] = "pipeline-compilation-job-zone"; -static constexpr char kRegisterAllocationZoneName[] = - "register-allocation-zone"; static constexpr char kRegisterAllocatorVerifierZoneName[] = "register-allocator-verifier-zone"; -namespace { - -Maybe GetModuleContext(OptimizedCompilationInfo* info) { - Tagged current = info->closure()->context(); - size_t distance = 0; - while (!IsNativeContext(*current)) { - if (current->IsModuleContext()) { - return Just(OuterContext( - info->CanonicalHandle(current, current->GetIsolate()), distance)); - } - current = current->previous(); - distance++; - } - return Nothing(); -} - -} // anonymous namespace - -class PipelineData { - public: - // For main entry point. - PipelineData(ZoneStats* zone_stats, Isolate* isolate, - OptimizedCompilationInfo* info, - TurbofanPipelineStatistics* pipeline_statistics) - : isolate_(isolate), - allocator_(isolate->allocator()), - info_(info), - debug_name_(info_->GetDebugName()), - may_have_unverifiable_graph_(v8_flags.turboshaft), - zone_stats_(zone_stats), - pipeline_statistics_(pipeline_statistics), - graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), - graph_zone_(graph_zone_scope_.zone()), - instruction_zone_scope_(zone_stats_, kInstructionZoneName), - instruction_zone_(instruction_zone_scope_.zone()), - codegen_zone_scope_(zone_stats_, kCodegenZoneName), - codegen_zone_(codegen_zone_scope_.zone()), - broker_(new JSHeapBroker(isolate_, info_->zone(), - info_->trace_heap_broker(), - info->code_kind())), - register_allocation_zone_scope_(zone_stats_, - kRegisterAllocationZoneName), - register_allocation_zone_(register_allocation_zone_scope_.zone()), - assembler_options_(AssemblerOptions::Default(isolate)) { - PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData"); - graph_ = graph_zone_->New(graph_zone_); - source_positions_ = graph_zone_->New(graph_); - node_origins_ = info->trace_turbo_json() - ? graph_zone_->New(graph_) - : nullptr; - simplified_ = graph_zone_->New(graph_zone_); - machine_ = graph_zone_->New( - graph_zone_, MachineType::PointerRepresentation(), - InstructionSelector::SupportedMachineOperatorFlags(), - InstructionSelector::AlignmentRequirements()); - common_ = graph_zone_->New(graph_zone_); - javascript_ = graph_zone_->New(graph_zone_); - jsgraph_ = graph_zone_->New(isolate_, graph_, common_, javascript_, - simplified_, machine_); - observe_node_manager_ = - info->node_observer() - ? graph_zone_->New(graph_zone_) - : nullptr; - dependencies_ = - info_->zone()->New(broker_, info_->zone()); - } - -#if V8_ENABLE_WEBASSEMBLY - // For WebAssembly compile entry point. - PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine, - OptimizedCompilationInfo* info, MachineGraph* mcgraph, - TurbofanPipelineStatistics* pipeline_statistics, - SourcePositionTable* source_positions, - NodeOriginTable* node_origins, - const AssemblerOptions& assembler_options) - : isolate_(nullptr), - wasm_engine_(wasm_engine), - allocator_(wasm_engine->allocator()), - info_(info), - debug_name_(info_->GetDebugName()), - may_have_unverifiable_graph_(v8_flags.turboshaft_wasm), - zone_stats_(zone_stats), - pipeline_statistics_(pipeline_statistics), - graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), - graph_zone_(graph_zone_scope_.zone()), - graph_(mcgraph->graph()), - source_positions_(source_positions), - node_origins_(node_origins), - machine_(mcgraph->machine()), - common_(mcgraph->common()), - mcgraph_(mcgraph), - instruction_zone_scope_(zone_stats_, kInstructionZoneName), - instruction_zone_(instruction_zone_scope_.zone()), - codegen_zone_scope_(zone_stats_, kCodegenZoneName), - codegen_zone_(codegen_zone_scope_.zone()), - register_allocation_zone_scope_(zone_stats_, - kRegisterAllocationZoneName), - register_allocation_zone_(register_allocation_zone_scope_.zone()), - assembler_options_(assembler_options) { - simplified_ = graph_zone_->New(graph_zone_); - javascript_ = graph_zone_->New(graph_zone_); - jsgraph_ = graph_zone_->New(isolate_, graph_, common_, javascript_, - simplified_, machine_); - } -#endif // V8_ENABLE_WEBASSEMBLY - - // For CodeStubAssembler and machine graph testing entry point. - PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info, - Isolate* isolate, AccountingAllocator* allocator, Graph* graph, - JSGraph* jsgraph, Schedule* schedule, - SourcePositionTable* source_positions, - NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt, - const AssemblerOptions& assembler_options, - const ProfileDataFromFile* profile_data) - : isolate_(isolate), -#if V8_ENABLE_WEBASSEMBLY - // TODO(clemensb): Remove this field, use GetWasmEngine directly - // instead. - wasm_engine_(wasm::GetWasmEngine()), -#endif // V8_ENABLE_WEBASSEMBLY - allocator_(allocator), - info_(info), - debug_name_(info_->GetDebugName()), - zone_stats_(zone_stats), - graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), - graph_zone_(graph_zone_scope_.zone()), - graph_(graph), - source_positions_(source_positions), - node_origins_(node_origins), - schedule_(schedule), - instruction_zone_scope_(zone_stats_, kInstructionZoneName), - instruction_zone_(instruction_zone_scope_.zone()), - codegen_zone_scope_(zone_stats_, kCodegenZoneName), - codegen_zone_(codegen_zone_scope_.zone()), - register_allocation_zone_scope_(zone_stats_, - kRegisterAllocationZoneName), - register_allocation_zone_(register_allocation_zone_scope_.zone()), - jump_optimization_info_(jump_opt), - assembler_options_(assembler_options), - profile_data_(profile_data) { - if (jsgraph) { - jsgraph_ = jsgraph; - simplified_ = jsgraph->simplified(); - machine_ = jsgraph->machine(); - common_ = jsgraph->common(); - javascript_ = jsgraph->javascript(); - } else if (graph_) { - simplified_ = graph_zone_->New(graph_zone_); - machine_ = graph_zone_->New( - graph_zone_, MachineType::PointerRepresentation(), - InstructionSelector::SupportedMachineOperatorFlags(), - InstructionSelector::AlignmentRequirements()); - common_ = graph_zone_->New(graph_zone_); - javascript_ = graph_zone_->New(graph_zone_); - jsgraph_ = graph_zone_->New(isolate_, graph_, common_, - javascript_, simplified_, machine_); - } - } - - // For register allocation testing entry point. - PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info, - Isolate* isolate, InstructionSequence* sequence) - : isolate_(isolate), - allocator_(isolate->allocator()), - info_(info), - debug_name_(info_->GetDebugName()), - zone_stats_(zone_stats), - graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), - instruction_zone_scope_(zone_stats_, kInstructionZoneName), - instruction_zone_(sequence->zone()), - sequence_(sequence), - codegen_zone_scope_(zone_stats_, kCodegenZoneName), - codegen_zone_(codegen_zone_scope_.zone()), - register_allocation_zone_scope_(zone_stats_, - kRegisterAllocationZoneName), - register_allocation_zone_(register_allocation_zone_scope_.zone()), - assembler_options_(AssemblerOptions::Default(isolate)) {} - - ~PipelineData() { - // Must happen before zones are destroyed. - delete code_generator_; - code_generator_ = nullptr; - DeleteTyper(); - DeleteRegisterAllocationZone(); - DeleteInstructionZone(); - DeleteCodegenZone(); - DeleteGraphZone(); - } - - PipelineData(const PipelineData&) = delete; - PipelineData& operator=(const PipelineData&) = delete; - - Isolate* isolate() const { return isolate_; } - AccountingAllocator* allocator() const { return allocator_; } - OptimizedCompilationInfo* info() const { return info_; } - ZoneStats* zone_stats() const { return zone_stats_; } - CompilationDependencies* dependencies() const { return dependencies_; } - TurbofanPipelineStatistics* pipeline_statistics() { - return pipeline_statistics_; - } - OsrHelper* osr_helper() { return &(*osr_helper_); } - - bool verify_graph() const { return verify_graph_; } - void set_verify_graph(bool value) { verify_graph_ = value; } - - MaybeHandle code() { return code_; } - void set_code(MaybeHandle code) { - DCHECK(code_.is_null()); - code_ = code; - } - - CodeGenerator* code_generator() const { return code_generator_; } - - // RawMachineAssembler generally produces graphs which cannot be verified. - bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; } - - Zone* graph_zone() const { return graph_zone_; } - Graph* graph() const { return graph_; } - void set_graph(Graph* graph) { graph_ = graph; } - turboshaft::PipelineData GetTurboshaftPipelineData( - turboshaft::TurboshaftPipelineKind kind) { - if (!ts_data_.has_value()) { - ts_data_.emplace(kind, info_, schedule_, graph_zone_, info_->zone(), - broker_, isolate_, source_positions_, node_origins_, - sequence_, frame_, assembler_options_, - &max_unoptimized_frame_height_, - &max_pushed_argument_count_, instruction_zone_); - } - return ts_data_.value(); - } - SourcePositionTable* source_positions() const { return source_positions_; } - NodeOriginTable* node_origins() const { return node_origins_; } - MachineOperatorBuilder* machine() const { return machine_; } - SimplifiedOperatorBuilder* simplified() const { return simplified_; } - CommonOperatorBuilder* common() const { return common_; } - JSOperatorBuilder* javascript() const { return javascript_; } - JSGraph* jsgraph() const { return jsgraph_; } - MachineGraph* mcgraph() const { return mcgraph_; } - Handle native_context() const { - return handle(info()->native_context(), isolate()); - } - Handle global_object() const { - return handle(info()->global_object(), isolate()); - } - - JSHeapBroker* broker() const { return broker_; } - std::unique_ptr ReleaseBroker() { - std::unique_ptr broker(broker_); - broker_ = nullptr; - return broker; - } - - Schedule* schedule() const { return schedule_; } - void set_schedule(Schedule* schedule) { - DCHECK(!schedule_); - schedule_ = schedule; - } - void reset_schedule() { schedule_ = nullptr; } - - ObserveNodeManager* observe_node_manager() const { - return observe_node_manager_; - } - - Zone* instruction_zone() const { return instruction_zone_; } - Zone* codegen_zone() const { return codegen_zone_; } - InstructionSequence* sequence() const { return sequence_; } - Frame* frame() const { return frame_; } - - Zone* register_allocation_zone() const { return register_allocation_zone_; } - - RegisterAllocationData* register_allocation_data() const { - return register_allocation_data_; - } - - std::string const& source_position_output() const { - return source_position_output_; - } - void set_source_position_output(std::string const& source_position_output) { - source_position_output_ = source_position_output; - } - - JumpOptimizationInfo* jump_optimization_info() const { - return jump_optimization_info_; - } - - const AssemblerOptions& assembler_options() const { - return assembler_options_; - } - - void ChooseSpecializationContext() { - if (info()->function_context_specializing()) { - DCHECK(info()->has_context()); - specialization_context_ = Just(OuterContext( - info()->CanonicalHandle(info()->context(), isolate()), 0)); - } else { - specialization_context_ = GetModuleContext(info()); - } - } - - Maybe specialization_context() const { - return specialization_context_; - } - - size_t* address_of_max_unoptimized_frame_height() { - return &max_unoptimized_frame_height_; - } - size_t max_unoptimized_frame_height() const { - return max_unoptimized_frame_height_; - } - size_t* address_of_max_pushed_argument_count() { - return &max_pushed_argument_count_; - } - size_t max_pushed_argument_count() const { - return max_pushed_argument_count_; - } - - CodeTracer* GetCodeTracer() const { -#if V8_ENABLE_WEBASSEMBLY - if (wasm_engine_) return wasm_engine_->GetCodeTracer(); -#endif // V8_ENABLE_WEBASSEMBLY - return isolate_->GetCodeTracer(); - } - - Typer* CreateTyper() { - DCHECK_NULL(typer_); - typer_ = - new Typer(broker(), typer_flags_, graph(), &info()->tick_counter()); - return typer_; - } - - void AddTyperFlag(Typer::Flag flag) { - DCHECK_NULL(typer_); - typer_flags_ |= flag; - } - - void DeleteTyper() { - delete typer_; - typer_ = nullptr; - } - - void DeleteGraphZone() { - if (graph_zone_ == nullptr) return; - graph_zone_ = nullptr; - graph_ = nullptr; - source_positions_ = nullptr; - node_origins_ = nullptr; - simplified_ = nullptr; - machine_ = nullptr; - common_ = nullptr; - javascript_ = nullptr; - jsgraph_ = nullptr; - mcgraph_ = nullptr; - schedule_ = nullptr; - DCHECK(!turboshaft::PipelineData::HasScope()); - graph_zone_scope_.Destroy(); - } - - void DeleteInstructionZone() { - if (instruction_zone_ == nullptr) return; - instruction_zone_scope_.Destroy(); - instruction_zone_ = nullptr; - sequence_ = nullptr; - } - - void DeleteCodegenZone() { - if (codegen_zone_ == nullptr) return; - codegen_zone_scope_.Destroy(); - codegen_zone_ = nullptr; - dependencies_ = nullptr; - delete broker_; - broker_ = nullptr; - frame_ = nullptr; - } - - void DeleteRegisterAllocationZone() { - if (register_allocation_zone_ == nullptr) return; - register_allocation_zone_scope_.Destroy(); - register_allocation_zone_ = nullptr; - register_allocation_data_ = nullptr; - } - - void InitializeInstructionSequence(const CallDescriptor* call_descriptor) { - DCHECK_NULL(sequence_); - InstructionBlocks* instruction_blocks = - InstructionSequence::InstructionBlocksFor(instruction_zone(), - schedule()); - sequence_ = instruction_zone()->New( - isolate(), instruction_zone(), instruction_blocks); - if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) { - sequence_->instruction_blocks()[0]->mark_needs_frame(); - } else { - DCHECK(call_descriptor->CalleeSavedFPRegisters().is_empty()); - } - } - - void InitializeFrameData(CallDescriptor* call_descriptor) { - DCHECK_NULL(frame_); - int fixed_frame_size = 0; - if (call_descriptor != nullptr) { - fixed_frame_size = - call_descriptor->CalculateFixedFrameSize(info()->code_kind()); - } - frame_ = codegen_zone()->New(fixed_frame_size, codegen_zone()); - if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame()); - } - - void InitializeRegisterAllocationData(const RegisterConfiguration* config, - CallDescriptor* call_descriptor) { - DCHECK_NULL(register_allocation_data_); - register_allocation_data_ = - register_allocation_zone()->New( - config, register_allocation_zone(), frame(), sequence(), - &info()->tick_counter(), debug_name()); - } - - void InitializeOsrHelper() { - DCHECK(!osr_helper_.has_value()); - osr_helper_.emplace(info()); - } - - void set_start_source_position(int position) { - DCHECK_EQ(start_source_position_, kNoSourcePosition); - start_source_position_ = position; - } - - void InitializeCodeGenerator(Linkage* linkage) { - DCHECK_NULL(code_generator_); -#if V8_ENABLE_WEBASSEMBLY - assembler_options_.is_wasm = - this->info()->IsWasm() || this->info()->IsWasmBuiltin(); -#endif - code_generator_ = new CodeGenerator( - codegen_zone(), frame(), linkage, sequence(), info(), isolate(), - osr_helper_, start_source_position_, jump_optimization_info_, - assembler_options(), info_->builtin(), max_unoptimized_frame_height(), - max_pushed_argument_count(), - v8_flags.trace_turbo_stack_accesses ? debug_name_.get() : nullptr); - } - - void BeginPhaseKind(const char* phase_kind_name) { - if (pipeline_statistics() != nullptr) { - pipeline_statistics()->BeginPhaseKind(phase_kind_name); - } - } - - void EndPhaseKind() { - if (pipeline_statistics() != nullptr) { - pipeline_statistics()->EndPhaseKind(); - } - } - - const char* debug_name() const { return debug_name_.get(); } - - const ProfileDataFromFile* profile_data() const { return profile_data_; } - void set_profile_data(const ProfileDataFromFile* profile_data) { - profile_data_ = profile_data; - } - - // RuntimeCallStats that is only available during job execution but not - // finalization. - // TODO(delphick): Currently even during execution this can be nullptr, due to - // JSToWasmWrapperCompilationUnit::Execute. Once a table can be extracted - // there, this method can DCHECK that it is never nullptr. - RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; } - void set_runtime_call_stats(RuntimeCallStats* stats) { - runtime_call_stats_ = stats; - } - - // Used to skip the "wasm-inlining" phase when there are no JS-to-Wasm calls. - bool has_js_wasm_calls() const { return has_js_wasm_calls_; } - void set_has_js_wasm_calls(bool has_js_wasm_calls) { - has_js_wasm_calls_ = has_js_wasm_calls; - } - -#if V8_ENABLE_WEBASSEMBLY - const wasm::WasmModule* wasm_module_for_inlining() const { - return wasm_module_for_inlining_; - } - void set_wasm_module_for_inlining(const wasm::WasmModule* module) { - wasm_module_for_inlining_ = module; - } -#endif - - private: - Isolate* const isolate_; -#if V8_ENABLE_WEBASSEMBLY - wasm::WasmEngine* const wasm_engine_ = nullptr; - // The wasm module to be used for inlining wasm functions into JS. - // The first module wins and inlining of different modules into the same - // JS function is not supported. This is necessary because the wasm - // instructions use module-specific (non-canonicalized) type indices. - const wasm::WasmModule* wasm_module_for_inlining_ = nullptr; -#endif // V8_ENABLE_WEBASSEMBLY - AccountingAllocator* const allocator_; - OptimizedCompilationInfo* const info_; - std::unique_ptr debug_name_; - bool may_have_unverifiable_graph_ = true; - ZoneStats* const zone_stats_; - TurbofanPipelineStatistics* pipeline_statistics_ = nullptr; - bool verify_graph_ = false; - int start_source_position_ = kNoSourcePosition; - base::Optional osr_helper_; - MaybeHandle code_; - CodeGenerator* code_generator_ = nullptr; - Typer* typer_ = nullptr; - Typer::Flags typer_flags_ = Typer::kNoFlags; - - // All objects in the following group of fields are allocated in graph_zone_. - // They are all set to nullptr when the graph_zone_ is destroyed. - ZoneStats::Scope graph_zone_scope_; - Zone* graph_zone_ = nullptr; - Graph* graph_ = nullptr; - SourcePositionTable* source_positions_ = nullptr; - NodeOriginTable* node_origins_ = nullptr; - SimplifiedOperatorBuilder* simplified_ = nullptr; - MachineOperatorBuilder* machine_ = nullptr; - CommonOperatorBuilder* common_ = nullptr; - JSOperatorBuilder* javascript_ = nullptr; - JSGraph* jsgraph_ = nullptr; - MachineGraph* mcgraph_ = nullptr; - Schedule* schedule_ = nullptr; - ObserveNodeManager* observe_node_manager_ = nullptr; - base::Optional ts_data_; - - // All objects in the following group of fields are allocated in - // instruction_zone_. They are all set to nullptr when the instruction_zone_ - // is destroyed. - ZoneStats::Scope instruction_zone_scope_; - Zone* instruction_zone_; - InstructionSequence* sequence_ = nullptr; - - // All objects in the following group of fields are allocated in - // codegen_zone_. They are all set to nullptr when the codegen_zone_ - // is destroyed. - ZoneStats::Scope codegen_zone_scope_; - Zone* codegen_zone_; - CompilationDependencies* dependencies_ = nullptr; - JSHeapBroker* broker_ = nullptr; - Frame* frame_ = nullptr; - - // All objects in the following group of fields are allocated in - // register_allocation_zone_. They are all set to nullptr when the zone is - // destroyed. - ZoneStats::Scope register_allocation_zone_scope_; - Zone* register_allocation_zone_; - RegisterAllocationData* register_allocation_data_ = nullptr; - - // Source position output for --trace-turbo. - std::string source_position_output_; - - JumpOptimizationInfo* jump_optimization_info_ = nullptr; - AssemblerOptions assembler_options_; - Maybe specialization_context_ = Nothing(); - - // The maximal combined height of all inlined frames in their unoptimized - // state, and the maximal number of arguments pushed during function calls. - // Calculated during instruction selection, applied during code generation. - size_t max_unoptimized_frame_height_ = 0; - size_t max_pushed_argument_count_ = 0; - - RuntimeCallStats* runtime_call_stats_ = nullptr; - const ProfileDataFromFile* profile_data_ = nullptr; - - bool has_js_wasm_calls_ = false; -}; - class PipelineImpl final { public: explicit PipelineImpl(PipelineData* data) : data_(data) {} @@ -783,8 +213,10 @@ class PipelineImpl final { void VerifyGeneratedCodeIsIdempotent(); void RunPrintAndVerify(const char* phase, bool untyped = false); - bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor); - MaybeHandle GenerateCode(CallDescriptor* call_descriptor); + bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor, + bool turboshaft = false); + MaybeHandle GenerateCode(CallDescriptor* call_descriptor, + bool turboshaft = false); void AllocateRegisters(const RegisterConfiguration* config, CallDescriptor* call_descriptor, bool run_verifier); @@ -2722,13 +2154,16 @@ class WasmHeapStubCompilationJob final : public TurbofanCompilationJob { PipelineImpl pipeline_; }; +#if V8_ENABLE_WEBASSEMBLY class WasmTurboshaftWrapperCompilationJob final : public turboshaft::TurboshaftCompilationJob { public: - WasmTurboshaftWrapperCompilationJob( - Isolate* isolate, const wasm::FunctionSig* sig, bool is_import, - const wasm::WasmModule* module, CodeKind kind, - std::unique_ptr debug_name, const AssemblerOptions& options) + WasmTurboshaftWrapperCompilationJob(Isolate* isolate, + const wasm::FunctionSig* sig, + wasm::WrapperCompilationInfo wrapper_info, + const wasm::WasmModule* module, + std::unique_ptr debug_name, + const AssemblerOptions& options) // Note that the OptimizedCompilationInfo is not initialized at the time // we pass it to the CompilationJob constructor, but it is not // dereferenced there. @@ -2736,18 +2171,29 @@ class WasmTurboshaftWrapperCompilationJob final CompilationJob::State::kReadyToExecute), zone_(wasm::GetWasmEngine()->allocator(), ZONE_NAME), debug_name_(std::move(debug_name)), - info_(base::CStrVector(debug_name_.get()), &zone_, kind), + info_(base::CStrVector(debug_name_.get()), &zone_, + wrapper_info.code_kind), sig_(sig), - is_import_(is_import), + wrapper_info_(wrapper_info), module_(module), - call_descriptor_(Linkage::GetJSCallDescriptor( - &zone_, false, static_cast(sig->parameter_count()) + 1, - CallDescriptor::kNoFlags)), zone_stats_(zone_.allocator()), data_(&zone_stats_, &info_, isolate, wasm::GetWasmEngine()->allocator(), nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, options, nullptr), - pipeline_(&data_) {} + pipeline_(&data_) { + if (wrapper_info_.code_kind == CodeKind::WASM_TO_JS_FUNCTION) { + call_descriptor_ = compiler::GetWasmCallDescriptor( + &zone_, sig, WasmCallKind::kWasmImportWrapper); + if (!Is64()) { + call_descriptor_ = GetI32WasmCallDescriptor(&zone_, call_descriptor_); + } + } else { + DCHECK_EQ(wrapper_info_.code_kind, CodeKind::JS_TO_WASM_FUNCTION); + call_descriptor_ = Linkage::GetJSCallDescriptor( + &zone_, false, static_cast(sig->parameter_count()) + 1, + CallDescriptor::kNoFlags); + } + } WasmTurboshaftWrapperCompilationJob( const WasmTurboshaftWrapperCompilationJob&) = delete; @@ -2765,15 +2211,14 @@ class WasmTurboshaftWrapperCompilationJob final std::unique_ptr debug_name_; OptimizedCompilationInfo info_; const wasm::FunctionSig* sig_; - bool is_import_; + wasm::WrapperCompilationInfo wrapper_info_; const wasm::WasmModule* module_; - CallDescriptor* call_descriptor_; + CallDescriptor* call_descriptor_; // Incoming call descriptor. ZoneStats zone_stats_; PipelineData data_; PipelineImpl pipeline_; }; -#if V8_ENABLE_WEBASSEMBLY // static std::unique_ptr Pipeline::NewWasmHeapStubCompilationJob( Isolate* isolate, CallDescriptor* call_descriptor, @@ -2787,11 +2232,11 @@ std::unique_ptr Pipeline::NewWasmHeapStubCompilationJob( // static std::unique_ptr Pipeline::NewWasmTurboshaftWrapperCompilationJob( - Isolate* isolate, const wasm::FunctionSig* sig, bool is_import, - const wasm::WasmModule* module, CodeKind kind, + Isolate* isolate, const wasm::FunctionSig* sig, + wasm::WrapperCompilationInfo wrapper_info, const wasm::WasmModule* module, std::unique_ptr debug_name, const AssemblerOptions& options) { return std::make_unique( - isolate, sig, is_import, module, kind, std::move(debug_name), options); + isolate, sig, wrapper_info, module, std::move(debug_name), options); } #endif @@ -2813,8 +2258,8 @@ void TraceWrapperCompilation(const char* compiler, << "Begin compiling method " << info->GetDebugName().get() << " using " << compiler << std::endl; } - if (info->trace_turbo_graph() && - data->graph() != nullptr) { // Simple textual RPO. + if (!v8_flags.turboshaft_wasm_wrappers && info->trace_turbo_graph()) { + // Simple textual RPO. StdoutStream{} << "-- wasm stub " << CodeKindToString(info->code_kind()) << " graph -- " << std::endl << AsRPO(*data->graph()); @@ -2901,14 +2346,15 @@ CompilationJob::Status WasmTurboshaftWrapperCompilationJob::ExecuteJobImpl( base::Optional turboshaft_scope( pipeline_.GetTurboshaftPipelineData( - turboshaft::TurboshaftPipelineKind::kJSToWasm)); + wrapper_info_.code_kind == CodeKind::JS_TO_WASM_FUNCTION + ? turboshaft::TurboshaftPipelineKind::kJSToWasm + : turboshaft::TurboshaftPipelineKind::kWasm)); auto& turboshaft_pipeline = turboshaft_scope.value(); turboshaft_pipeline.Value().SetIsWasm(module_, sig_); - DCHECK_NOT_NULL(turboshaft::PipelineData::Get().wasm_module()); AccountingAllocator allocator; - BuildWasmWrapper(&allocator, turboshaft_pipeline.Value().graph(), - info_.code_kind(), sig_, is_import_, module_); + BuildWasmWrapper(&allocator, turboshaft_pipeline.Value().graph(), sig_, + wrapper_info_, module_); CodeTracer* code_tracer = nullptr; if (info_.trace_turbo_graph()) { // NOTE: We must not call `GetCodeTracer` if tracing is not enabled, @@ -2920,8 +2366,8 @@ CompilationJob::Status WasmTurboshaftWrapperCompilationJob::ExecuteJobImpl( turboshaft::PrintTurboshaftGraph(&printing_zone, code_tracer, "Graph generation"); - // Skip the LoopUnrolling and WasmGCOptimize phases for wrappers. - pipeline_.Run(); + // Skip the LoopUnrolling, WasmGCOptimize and WasmLowering phases for + // wrappers. // TODO(14108): Do we need value numbering if wasm_opt is turned off? if (v8_flags.wasm_opt) { pipeline_.Run(); @@ -2941,12 +2387,20 @@ CompilationJob::Status WasmTurboshaftWrapperCompilationJob::ExecuteJobImpl( pipeline_.Run(); } - if (v8_flags.turboshaft_instruction_selection) { - // Run Turboshaft instruction selection. - if (!pipeline_.SelectInstructionsTurboshaft(&linkage)) { - return CompilationJob::FAILED; - } + data_.BeginPhaseKind("V8.InstructionSelection"); + +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ + defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ + defined(V8_TARGET_ARCH_PPC64) + bool use_turboshaft_instruction_selection = + v8_flags.turboshaft_wasm_instruction_selection_staged; +#else + bool use_turboshaft_instruction_selection = + v8_flags.turboshaft_wasm_instruction_selection_experimental; +#endif + if (use_turboshaft_instruction_selection) { + CHECK(pipeline_.SelectInstructionsTurboshaft(&linkage)); turboshaft_scope.reset(); data_.DeleteGraphZone(); pipeline_.AllocateRegisters(linkage.GetIncomingDescriptor(), false); @@ -3567,60 +3021,11 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) { } #if V8_ENABLE_WEBASSEMBLY -// static -wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( - CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind, - const char* debug_name, const AssemblerOptions& options, - SourcePositionTable* source_positions) { - Graph* graph = mcgraph->graph(); - OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(), - kind); - // Construct a pipeline for scheduling and code generation. - wasm::WasmEngine* wasm_engine = wasm::GetWasmEngine(); - ZoneStats zone_stats(wasm_engine->allocator()); - NodeOriginTable* node_positions = graph->zone()->New(graph); - PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr, - source_positions, node_positions, options); - std::unique_ptr pipeline_statistics; - if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) { - pipeline_statistics.reset(new TurbofanPipelineStatistics( - &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats)); - pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); - } - - PipelineImpl pipeline(&data); - if (info.trace_turbo_json() || info.trace_turbo_graph()) { - CodeTracer::StreamScope tracing_scope(data.GetCodeTracer()); - tracing_scope.stream() - << "---------------------------------------------------\n" - << "Begin compiling method " << info.GetDebugName().get() - << " using TurboFan" << std::endl; - } - - if (info.trace_turbo_graph()) { // Simple textual RPO. - StdoutStream{} << "-- wasm stub " << CodeKindToString(kind) << " graph -- " - << std::endl - << AsRPO(*graph); - } - - if (info.trace_turbo_json()) { - TurboJsonFile json_of(&info, std::ios_base::trunc); - json_of << "{\"function\":\"" << info.GetDebugName().get() - << "\", \"source\":\"\",\n\"phases\":["; - } - - pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true); - - pipeline.Run(); - pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true); - - pipeline.ComputeScheduledGraph(); - - Linkage linkage(call_descriptor); - CHECK(pipeline.SelectInstructions(&linkage)); - pipeline.AssembleCode(&linkage); +namespace { +wasm::WasmCompilationResult WrapperCompilationResult( + PipelineImpl& pipeline, CallDescriptor* call_descriptor, CodeKind kind) { CodeGenerator* code_generator = pipeline.code_generator(); wasm::WasmCompilationResult result; code_generator->masm()->GetCode( @@ -3636,9 +3041,13 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( if (kind == CodeKind::WASM_TO_JS_FUNCTION) { result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper; } + return result; +} - DCHECK(result.succeeded()); - +void TraceFinishWrapperCompilation(OptimizedCompilationInfo& info, + PipelineData& data, + const wasm::WasmCompilationResult& result, + CodeGenerator* code_generator) { if (info.trace_turbo_json()) { TurboJsonFile json_of(&info, std::ios_base::app); json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\"" @@ -3665,7 +3074,158 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( << "Finished compiling method " << info.GetDebugName().get() << " using TurboFan" << std::endl; } +} + +} // namespace + +// static +wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( + CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind, + const char* debug_name, const AssemblerOptions& options, + SourcePositionTable* source_positions) { + Graph* graph = mcgraph->graph(); + OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(), + kind); + // Construct a pipeline for scheduling and code generation. + wasm::WasmEngine* wasm_engine = wasm::GetWasmEngine(); + ZoneStats zone_stats(wasm_engine->allocator()); + NodeOriginTable* node_positions = graph->zone()->New(graph); + PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr, + source_positions, node_positions, options); + std::unique_ptr pipeline_statistics; + if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) { + pipeline_statistics.reset(new TurbofanPipelineStatistics( + &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats)); + pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); + } + TraceWrapperCompilation("TurboFan", &info, &data); + + PipelineImpl pipeline(&data); + pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true); + + pipeline.Run(); + pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true); + + pipeline.ComputeScheduledGraph(); + + Linkage linkage(call_descriptor); + CHECK(pipeline.SelectInstructions(&linkage)); + pipeline.AssembleCode(&linkage); + + auto result = WrapperCompilationResult(pipeline, call_descriptor, kind); + DCHECK(result.succeeded()); + TraceFinishWrapperCompilation(info, data, result, pipeline.code_generator()); + return result; +} + +// static +wasm::WasmCompilationResult +Pipeline::GenerateCodeForWasmNativeStubFromTurboshaft( + const wasm::WasmModule* module, const wasm::FunctionSig* sig, + wasm::WrapperCompilationInfo wrapper_info, const char* debug_name, + const AssemblerOptions& options, SourcePositionTable* source_positions) { + wasm::WasmEngine* wasm_engine = wasm::GetWasmEngine(); + Zone zone(wasm_engine->allocator(), ZONE_NAME, kCompressGraphZone); + CallDescriptor* call_descriptor = + GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper); + if (!Is64()) { + call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor); + } + Linkage linkage(call_descriptor); + OptimizedCompilationInfo info(base::CStrVector(debug_name), &zone, + wrapper_info.code_kind); + ZoneStats zone_stats(wasm_engine->allocator()); + PipelineData data(&zone_stats, &info, nullptr, + wasm::GetWasmEngine()->allocator(), nullptr, nullptr, + nullptr, nullptr, nullptr, nullptr, options, nullptr); + std::unique_ptr pipeline_statistics; + if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) { + pipeline_statistics.reset(new TurbofanPipelineStatistics( + &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats)); + pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); + } + TraceWrapperCompilation("Turboshaft", &info, &data); + + PipelineImpl pipeline(&data); + + { + base::Optional turboshaft_scope( + pipeline.GetTurboshaftPipelineData( + turboshaft::TurboshaftPipelineKind::kWasm)); + auto& turboshaft_pipeline = turboshaft_scope.value(); + turboshaft_pipeline.Value().SetIsWasm(module, sig); + AccountingAllocator allocator; + BuildWasmWrapper(&allocator, turboshaft_pipeline.Value().graph(), sig, + wrapper_info, module); + CodeTracer* code_tracer = nullptr; + if (info.trace_turbo_graph()) { + // NOTE: We must not call `GetCodeTracer` if tracing is not enabled, + // because it may not yet be initialized then and doing so from the + // background thread is not threadsafe. + code_tracer = data.GetCodeTracer(); + } + Zone printing_zone(&allocator, ZONE_NAME); + turboshaft::PrintTurboshaftGraph(&printing_zone, code_tracer, + "Graph generation"); + + // Skip the LoopUnrolling, WasmGCOptimize and WasmLowering phases for + // wrappers. + // TODO(14108): Do we need value numbering if wasm_opt is turned off? + if (v8_flags.wasm_opt) { + pipeline.Run(); + } + + if (!Is64()) { + pipeline.Run(); + } + + // This is more than an optimization currently: We need it to sort blocks to + // work around a bug in RecreateSchedulePhase. + pipeline.Run(); + + if (V8_UNLIKELY(v8_flags.turboshaft_enable_debug_features)) { + // This phase has to run very late to allow all previous phases to use + // debug features. + pipeline.Run(); + } + + data.BeginPhaseKind("V8.InstructionSelection"); + +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ + defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ + defined(V8_TARGET_ARCH_PPC64) + bool use_turboshaft_instruction_selection = + v8_flags.turboshaft_wasm_instruction_selection_staged; +#else + bool use_turboshaft_instruction_selection = + v8_flags.turboshaft_wasm_instruction_selection_experimental; +#endif + + if (use_turboshaft_instruction_selection) { + // Run Turboshaft instruction selection. + CHECK(pipeline.SelectInstructionsTurboshaft(&linkage)); + turboshaft_scope.reset(); + data.DeleteGraphZone(); + pipeline.AllocateRegisters(linkage.GetIncomingDescriptor(), false); + } else { + auto [new_graph, new_schedule] = + pipeline.Run(&linkage); + data.set_graph(new_graph); + data.set_schedule(new_schedule); + TraceSchedule(data.info(), &data, data.schedule(), + turboshaft::RecreateSchedulePhase::phase_name()); + + turboshaft_scope.reset(); + CHECK(pipeline.SelectInstructions(&linkage)); + } + } + + pipeline.AssembleCode(&linkage); + auto result = WrapperCompilationResult(pipeline, call_descriptor, + wrapper_info.code_kind); + DCHECK(result.succeeded()); + TraceFinishWrapperCompilation(info, data, result, pipeline.code_generator()); return result; } @@ -3967,10 +3527,10 @@ bool Pipeline::GenerateWasmCodeFromTurboshaftGraph( AccountingAllocator allocator; if (!wasm::BuildTSGraph( - &allocator, env->enabled_features, env->module, detected, - turboshaft_pipeline.Value().graph(), compilation_data.func_body, - compilation_data.wire_bytes_storage, compilation_data.assumptions, - &inlining_positions, compilation_data.func_index)) { + &allocator, env, detected, turboshaft_pipeline.Value().graph(), + compilation_data.func_body, compilation_data.wire_bytes_storage, + compilation_data.assumptions, &inlining_positions, + compilation_data.func_index)) { return false; } CodeTracer* code_tracer = nullptr; @@ -4218,6 +3778,52 @@ MaybeHandle Pipeline::GenerateCodeForTesting( return {}; } +// static +MaybeHandle Pipeline::GenerateTurboshaftCodeForTesting( + OptimizedCompilationInfo* info, Isolate* isolate, + CallDescriptor* call_descriptor, PipelineData* data, + const AssemblerOptions& options) { + PipelineJobScope scope(data, isolate->counters()->runtime_call_stats()); + std::unique_ptr pipeline_statistics; + if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) { + pipeline_statistics.reset(new TurbofanPipelineStatistics( + info, isolate->GetTurboStatistics(), data->zone_stats())); + pipeline_statistics->BeginPhaseKind("V8.TFTestCodegen"); + } + + PipelineImpl pipeline(data); + + if (info->trace_turbo_json()) { + { + TurboJsonFile json_of(info, std::ios_base::trunc); + json_of << "{\"function\":\"" << info->GetDebugName().get() + << "\", \"source\":\"\",\n\"phases\":["; + } + { + UnparkedScopeIfNeeded scope(data->broker()); + AllowHandleDereference allow_deref; + + TurboJsonFile json_of(data->info(), std::ios_base::app); + PrintTurboshaftGraphForTurbolizer( + json_of, + data->GetTurboshaftPipelineData( + turboshaft::TurboshaftPipelineKind::kJS) + .graph(), + "V8.TSMachineCode", data->node_origins(), data->graph_zone()); + } + } + + info->tick_counter().TickAndMaybeEnterSafepoint(); + + Handle code; + if (pipeline.GenerateCode(call_descriptor, /*turboshaft*/ true) + .ToHandle(&code) && + pipeline.CommitDependencies(code)) { + return code; + } + return {}; +} + // static std::unique_ptr Pipeline::NewCompilationJob( Isolate* isolate, Handle function, CodeKind code_kind, @@ -4598,19 +4204,25 @@ MaybeHandle PipelineImpl::FinalizeCode(bool retire_broker) { } bool PipelineImpl::SelectInstructionsAndAssemble( - CallDescriptor* call_descriptor) { + CallDescriptor* call_descriptor, bool turboshaft) { Linkage linkage(call_descriptor); // Perform instruction selection and register allocation. - if (!SelectInstructions(&linkage)) return false; + if (turboshaft) { + if (!SelectInstructionsTurboshaft(&linkage)) return false; + AllocateRegisters(linkage.GetIncomingDescriptor(), false); + } else { + if (!SelectInstructions(&linkage)) return false; + } // Generate the final machine code. AssembleCode(&linkage); return true; } -MaybeHandle PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) { - if (!SelectInstructionsAndAssemble(call_descriptor)) { +MaybeHandle PipelineImpl::GenerateCode(CallDescriptor* call_descriptor, + bool turboshaft) { + if (!SelectInstructionsAndAssemble(call_descriptor, turboshaft)) { return MaybeHandle(); } return FinalizeCode(); diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h index c472d7aa9ba3aa..681eae5d5e0081 100644 --- a/deps/v8/src/compiler/pipeline.h +++ b/deps/v8/src/compiler/pipeline.h @@ -14,6 +14,7 @@ #include "src/zone/zone-containers.h" #if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/module-instantiate.h" #include "src/wasm/value-type.h" #endif @@ -51,6 +52,12 @@ class MachineGraph; class Schedule; class SourcePositionTable; struct WasmCompilationData; +class PipelineData; +class ZoneStats; + +namespace turboshaft { +class PipelineData; +} struct InstructionRangesAsJSON { const InstructionSequence* sequence; @@ -67,6 +74,7 @@ class Pipeline : public AllStatic { CodeKind code_kind, bool has_script, BytecodeOffset osr_offset = BytecodeOffset::None()); +#if V8_ENABLE_WEBASSEMBLY // Run the pipeline for the WebAssembly compilation info. // Note: We pass a pointer to {detected} as it might get mutated while // inlining. @@ -83,12 +91,18 @@ class Pipeline : public AllStatic { const char* debug_name, const AssemblerOptions& assembler_options, SourcePositionTable* source_positions = nullptr); + static wasm::WasmCompilationResult + GenerateCodeForWasmNativeStubFromTurboshaft( + const wasm::WasmModule* module, const wasm::FunctionSig* sig, + wasm::WrapperCompilationInfo wrapper_info, const char* debug_name, + const AssemblerOptions& assembler_options, + SourcePositionTable* source_positions); + static bool GenerateWasmCodeFromTurboshaftGraph( OptimizedCompilationInfo* info, wasm::CompilationEnv* env, WasmCompilationData& compilation_data, MachineGraph* mcgraph, wasm::WasmFeatures* detected, CallDescriptor* call_descriptor); -#if V8_ENABLE_WEBASSEMBLY // Returns a new compilation job for a wasm heap stub. static std::unique_ptr NewWasmHeapStubCompilationJob( Isolate* isolate, CallDescriptor* call_descriptor, @@ -97,8 +111,8 @@ class Pipeline : public AllStatic { static std::unique_ptr NewWasmTurboshaftWrapperCompilationJob( - Isolate* isolate, const wasm::FunctionSig* sig, bool is_import, - const wasm::WasmModule* module, CodeKind kind, + Isolate* isolate, const wasm::FunctionSig* sig, + wasm::WrapperCompilationInfo wrapper_info, const wasm::WasmModule* module, std::unique_ptr debug_name, const AssemblerOptions& options); #endif @@ -124,6 +138,12 @@ class Pipeline : public AllStatic { CallDescriptor* call_descriptor, Graph* graph, const AssemblerOptions& options, Schedule* schedule = nullptr); + // Run the instruction selector on a turboshaft graph and generate code. + V8_EXPORT_PRIVATE static MaybeHandle GenerateTurboshaftCodeForTesting( + OptimizedCompilationInfo* info, Isolate* isolate, + CallDescriptor* call_descriptor, PipelineData* data, + const AssemblerOptions& options); + // Run just the register allocator phases. V8_EXPORT_PRIVATE static void AllocateRegistersForTesting( const RegisterConfiguration* config, InstructionSequence* sequence, diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h index 5b932006fb8696..f9eda2d70f1f0f 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.h +++ b/deps/v8/src/compiler/raw-machine-assembler.h @@ -174,6 +174,20 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { return load; } + Node* LoadProtectedPointerFromObject(Node* base, Node* offset) { +#if V8_ENABLE_SANDBOX + static_assert(COMPRESS_POINTERS_BOOL); + Node* tagged = LoadFromObject(MachineType::Int32(), base, offset); + Node* trusted_cage_base = + LoadImmutable(MachineType::Pointer(), LoadRootRegister(), + IntPtrConstant(IsolateData::trusted_cage_base_offset())); + return BitcastWordToTagged( + WordOr(trusted_cage_base, ChangeUint32ToUint64(tagged))); +#else + return LoadFromObject(MachineType::AnyTagged(), base, offset); +#endif // V8_ENABLE_SANDBOX + } + Node* Store(MachineRepresentation rep, Node* base, Node* value, WriteBarrierKind write_barrier) { return Store(rep, base, IntPtrConstant(0), value, write_barrier); @@ -782,7 +796,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { return AddNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), a); } Node* BitcastMaybeObjectToWord(Node* a) { - return AddNode(machine()->BitcastMaybeObjectToWord(), a); + return AddNode(machine()->BitcastMaybeObjectToWord(), a); } Node* BitcastWordToTagged(Node* a) { return AddNode(machine()->BitcastWordToTagged(), a); diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc index 784665cb942777..ab8717176453f2 100644 --- a/deps/v8/src/compiler/redundancy-elimination.cc +++ b/deps/v8/src/compiler/redundancy-elimination.cc @@ -40,6 +40,7 @@ Reduction RedundancyElimination::Reduce(Node* node) { case IrOpcode::kCheckReceiverOrNullOrUndefined: case IrOpcode::kCheckSmi: case IrOpcode::kCheckString: + case IrOpcode::kCheckStringOrStringWrapper: case IrOpcode::kCheckSymbol: // These are not really check nodes, but behave the same in that they can be // folded together if repeated with identical inputs. @@ -179,6 +180,12 @@ Subsumption CheckSubsumes(Node const* a, Node const* b, if (a->opcode() == IrOpcode::kCheckInternalizedString && b->opcode() == IrOpcode::kCheckString) { // CheckInternalizedString(node) implies CheckString(node) + } else if (a->opcode() == IrOpcode::kCheckString && + b->opcode() == IrOpcode::kCheckStringOrStringWrapper) { + // CheckString(node) implies CheckStringOrStringWrapper(node) + } else if (a->opcode() == IrOpcode::kCheckInternalizedString && + b->opcode() == IrOpcode::kCheckStringOrStringWrapper) { + // CheckInteralizedString(node) implies CheckStringOrStringWrapper(node) } else if (a->opcode() == IrOpcode::kCheckSmi && b->opcode() == IrOpcode::kCheckNumber) { // CheckSmi(node) implies CheckNumber(node) @@ -212,6 +219,7 @@ Subsumption CheckSubsumes(Node const* a, Node const* b, case IrOpcode::kCheckBounds: case IrOpcode::kCheckSmi: case IrOpcode::kCheckString: + case IrOpcode::kCheckStringOrStringWrapper: case IrOpcode::kCheckNumber: case IrOpcode::kCheckBigInt: case IrOpcode::kCheckedBigIntToBigInt64: diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc index 834fec40c7228a..cc8221152a5d55 100644 --- a/deps/v8/src/compiler/simplified-lowering.cc +++ b/deps/v8/src/compiler/simplified-lowering.cc @@ -97,6 +97,8 @@ MachineRepresentation MachineRepresentationFromArrayType( case kExternalBigInt64Array: case kExternalBigUint64Array: return MachineRepresentation::kWord64; + case kExternalFloat16Array: + UNIMPLEMENTED(); } UNREACHABLE(); } @@ -760,7 +762,8 @@ class RepresentationSelector { TRACE("--{Verify Phase}--\n"); // Patch pending type overrides. - for (auto [constant, uses] : verifier_->machine_uses_of_constants()) { + for (const auto& [constant, uses] : + verifier_->machine_uses_of_constants()) { Node* typed_constant = InsertTypeOverrideForVerifier(Type::Machine(), constant); for (auto use : uses) { @@ -2501,7 +2504,7 @@ class RepresentationSelector { // on Oddballs, so make sure we don't accidentially sneak in a // hint with Oddball feedback here. DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode()); - V8_FALLTHROUGH; + [[fallthrough]]; case NumberOperationHint::kNumberOrBoolean: case NumberOperationHint::kNumber: VisitBinop(node, @@ -3734,6 +3737,20 @@ class RepresentationSelector { } return; } + case IrOpcode::kCheckStringOrStringWrapper: { + const CheckParameters& params = CheckParametersOf(node->op()); + if (InputIs(node, Type::StringOrStringWrapper())) { + VisitUnop(node, UseInfo::AnyTagged(), + MachineRepresentation::kTaggedPointer); + if (lower()) DeferReplacement(node, node->InputAt(0)); + } else { + VisitUnop( + node, + UseInfo::CheckedHeapObjectAsTaggedPointer(params.feedback()), + MachineRepresentation::kTaggedPointer); + } + return; + } case IrOpcode::kCheckSymbol: { VisitCheck(node, Type::Symbol(), lowering); return; diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc index 9491361be1f9d8..b4c6019e58f574 100644 --- a/deps/v8/src/compiler/simplified-operator.cc +++ b/deps/v8/src/compiler/simplified-operator.cc @@ -934,6 +934,7 @@ bool operator==(AssertNotNullParameters const& lhs, V(CheckNumber, 1, 1) \ V(CheckSmi, 1, 1) \ V(CheckString, 1, 1) \ + V(CheckStringOrStringWrapper, 1, 1) \ V(CheckBigInt, 1, 1) \ V(CheckedBigIntToBigInt64, 1, 1) \ V(CheckedInt32ToTaggedSigned, 1, 1) \ diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h index a4f6da3789a2d3..bf8b43817988c1 100644 --- a/deps/v8/src/compiler/simplified-operator.h +++ b/deps/v8/src/compiler/simplified-operator.h @@ -1008,6 +1008,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* CheckReceiverOrNullOrUndefined(); const Operator* CheckSmi(const FeedbackSource& feedback); const Operator* CheckString(const FeedbackSource& feedback); + const Operator* CheckStringOrStringWrapper(const FeedbackSource& feedback); const Operator* CheckSymbol(); const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode, diff --git a/deps/v8/src/compiler/string-builder-optimizer.cc b/deps/v8/src/compiler/string-builder-optimizer.cc index 73ad2903b888bc..811885206aa797 100644 --- a/deps/v8/src/compiler/string-builder-optimizer.cc +++ b/deps/v8/src/compiler/string-builder-optimizer.cc @@ -497,6 +497,7 @@ bool OpcodeIsAllowed(IrOpcode::Value op) { case IrOpcode::kStringLessThan: case IrOpcode::kStringLessThanOrEqual: case IrOpcode::kCheckString: + case IrOpcode::kCheckStringOrStringWrapper: case IrOpcode::kTypedStateValues: return true; default: diff --git a/deps/v8/src/compiler/turboshaft/assembler.h b/deps/v8/src/compiler/turboshaft/assembler.h index aad6ed7cf5cc04..e324c82b08cf9e 100644 --- a/deps/v8/src/compiler/turboshaft/assembler.h +++ b/deps/v8/src/compiler/turboshaft/assembler.h @@ -26,6 +26,7 @@ #include "src/compiler/simplified-operator.h" #include "src/compiler/turboshaft/builtin-call-descriptors.h" #include "src/compiler/turboshaft/graph.h" +#include "src/compiler/turboshaft/index.h" #include "src/compiler/turboshaft/operation-matcher.h" #include "src/compiler/turboshaft/operations.h" #include "src/compiler/turboshaft/phase.h" @@ -36,6 +37,7 @@ #include "src/compiler/turboshaft/snapshot-table.h" #include "src/compiler/turboshaft/uniform-reducer-adapter.h" #include "src/compiler/turboshaft/utils.h" +#include "src/compiler/write-barrier-kind.h" #include "src/flags/flags.h" #include "src/logging/runtime-call-stats.h" #include "src/objects/elements-kind.h" @@ -161,6 +163,9 @@ class LabelBase { protected: static constexpr size_t size = sizeof...(Ts); + LabelBase(const LabelBase&) = delete; + LabelBase& operator=(const LabelBase&) = delete; + public: static constexpr bool is_loop = loop; using values_t = std::tuple...>; @@ -230,6 +235,10 @@ class LabelBase { DCHECK_NOT_NULL(data_.block); } + LabelBase(LabelBase&& other) V8_NOEXCEPT + : data_(std::move(other.data_)), + has_incoming_jump_(other.has_incoming_jump_) {} + static void RecordValues(Block* source, BlockData& data, const values_t& values) { DCHECK_NOT_NULL(source); @@ -297,9 +306,14 @@ template class Label : public LabelBase { using super = LabelBase; + Label(const Label&) = delete; + Label& operator=(const Label&) = delete; + public: template explicit Label(Reducer* reducer) : super(reducer->Asm().NewBlock()) {} + + Label(Label&& other) V8_NOEXCEPT : super(std::move(other)) {} }; template @@ -307,6 +321,9 @@ class LoopLabel : public LabelBase { using super = LabelBase; using BlockData = typename super::BlockData; + LoopLabel(const LoopLabel&) = delete; + LoopLabel& operator=(const LoopLabel&) = delete; + public: using values_t = typename super::values_t; template @@ -314,6 +331,11 @@ class LoopLabel : public LabelBase { : super(reducer->Asm().NewBlock()), loop_header_data_{reducer->Asm().NewLoopHeader()} {} + LoopLabel(LoopLabel&& other) V8_NOEXCEPT + : super(std::move(other)), + loop_header_data_(std::move(other.loop_header_data_)), + pending_loop_phis_(std::move(other.pending_loop_phis_)) {} + Block* loop_header() const { return loop_header_data_.block; } template @@ -610,18 +632,20 @@ class GenericReducerBase; template class ScopedVariable : Variable { + using value_type = maybe_const_or_v_t; + public: - explicit ScopedVariable(Assembler& assembler) - : Variable(assembler.NewVariable( + template + explicit ScopedVariable(Reducer* reducer) + : Variable(reducer->Asm().NewVariable( static_cast(V::rep))), - assembler_(assembler) {} - ScopedVariable(Assembler& assembler, V initial_value) - : ScopedVariable(assembler) { - assembler.SetVariable(*this, initial_value); + assembler_(reducer->Asm()) {} + template + ScopedVariable(Reducer* reducer, value_type initial_value) + : ScopedVariable(reducer) { + assembler_.SetVariable(*this, assembler_.resolve(initial_value)); } - void operator=(V new_value) { assembler_.SetVariable(*this, new_value); } - V operator*() const { return assembler_.GetVariable(*this); } ScopedVariable(const ScopedVariable&) = delete; ScopedVariable(ScopedVariable&&) = delete; ScopedVariable& operator=(const ScopedVariable) = delete; @@ -632,6 +656,32 @@ class ScopedVariable : Variable { assembler_.SetVariable(*this, OpIndex::Invalid()); } + void Set(value_type new_value) { + assembler_.SetVariable(*this, assembler_.resolve(new_value)); + } + V Get() const { return assembler_.GetVariable(*this); } + + void operator=(value_type new_value) { Set(new_value); } + template ::template implicitly_convertible_to::value>> + operator V() const { + return Get(); + } + template ::template implicitly_convertible_to::value>> + operator OptionalV() const { + return Get(); + } + template && + v_traits::template implicitly_convertible_to::value>> + operator ConstOrV() const { + return Get(); + } + operator OpIndex() const { return Get(); } + operator OptionalOpIndex() const { return Get(); } + private: Assembler& assembler_; }; @@ -988,16 +1038,12 @@ class GenericAssemblerOpInterface : public Next { public: TURBOSHAFT_REDUCER_BOILERPLATE(GenericAssemblerOpInterface) - ~GenericAssemblerOpInterface() { - // If the {if_scope_stack_} is not empty, it means that a END_IF is missing. - DCHECK(if_scope_stack_.empty()); - } - - // These methods are used by the assembler macros (IF, ELSE, ELSE_IF, END_IF). + // These methods are used by the assembler macros (BIND, BIND_LOOP, GOTO, + // GOTO_IF). template auto ControlFlowHelper_Bind(L& label) -> base::prepend_tuple_type { - // LoopLabels need to be bound with `LOOP` instead of `BIND`. + // LoopLabels need to be bound with `BIND_LOOP` instead of `BIND`. static_assert(!L::is_loop); return label.Bind(Asm()); } @@ -1005,7 +1051,7 @@ class GenericAssemblerOpInterface : public Next { template auto ControlFlowHelper_BindLoop(L& label) -> base::prepend_tuple_type { - // Only LoopLabels can be bound with `LOOP`. Otherwise use `BIND`. + // Only LoopLabels can be bound with `BIND_LOOP`. Otherwise use `BIND`. static_assert(L::is_loop); return label.BindLoop(Asm()); } @@ -1016,6 +1062,29 @@ class GenericAssemblerOpInterface : public Next { label.EndLoop(Asm()); } + std::tuple, Label<>> ControlFlowHelper_While( + std::function()> cond_builder) { + LoopLabel<> loop_header(this); + Label<> loop_exit(this); + + ControlFlowHelper_Goto(loop_header, {}); + + auto [bound] = loop_header.BindLoop(Asm()); + V cond = cond_builder(); + ControlFlowHelper_GotoIfNot(cond, loop_exit, {}); + + return std::make_tuple(bound, std::move(loop_header), std::move(loop_exit)); + } + + template + void ControlFlowHelper_EndWhileLoop(L1& header_label, L2& exit_label) { + static_assert(L1::is_loop); + static_assert(!L2::is_loop); + ControlFlowHelper_Goto(header_label, {}); + ControlFlowHelper_EndLoop(header_label); + ControlFlowHelper_Bind(exit_label); + } + template void ControlFlowHelper_Goto(L& label, const typename L::const_or_values_t& values) { @@ -1040,77 +1109,48 @@ class GenericAssemblerOpInterface : public Next { resolved_values); } - bool ControlFlowHelper_If(ConditionWithHint condition, bool negate) { + struct ControlFlowHelper_IfState { + block_t* else_block; + block_t* end_block; + }; + + bool ControlFlowHelper_BindIf(ConditionWithHint condition, + ControlFlowHelper_IfState* state) { block_t* then_block = Asm().NewBlock(); - block_t* else_block = Asm().NewBlock(); - block_t* end_block = Asm().NewBlock(); - if (negate) { - Asm().Branch(condition, else_block, then_block); - } else { - Asm().Branch(condition, then_block, else_block); - } - if_scope_stack_.emplace_back(else_block, end_block); + state->else_block = Asm().NewBlock(); + state->end_block = Asm().NewBlock(); + Asm().Branch(condition, then_block, state->else_block); return Asm().Bind(then_block); } - template - bool ControlFlowHelper_ElseIf(F&& condition_builder) { - DCHECK_LT(0, if_scope_stack_.size()); - auto& info = if_scope_stack_.back(); - block_t* else_block = info.else_block; - DCHECK_NOT_NULL(else_block); - if (!Asm().Bind(else_block)) return false; + bool ControlFlowHelper_BindIfNot(ConditionWithHint condition, + ControlFlowHelper_IfState* state) { block_t* then_block = Asm().NewBlock(); - info.else_block = Asm().NewBlock(); - Asm().Branch(ConditionWithHint{condition_builder()}, then_block, - info.else_block); + state->else_block = Asm().NewBlock(); + state->end_block = Asm().NewBlock(); + Asm().Branch(condition, state->else_block, then_block); return Asm().Bind(then_block); } - bool ControlFlowHelper_Else() { - DCHECK_LT(0, if_scope_stack_.size()); - auto& info = if_scope_stack_.back(); - block_t* else_block = info.else_block; - DCHECK_NOT_NULL(else_block); - info.else_block = nullptr; + bool ControlFlowHelper_BindElse(ControlFlowHelper_IfState* state) { + block_t* else_block = state->else_block; + state->else_block = nullptr; return Asm().Bind(else_block); } - void ControlFlowHelper_EndIf() { - DCHECK_LT(0, if_scope_stack_.size()); - auto& info = if_scope_stack_.back(); - // Do we still have to place an else block (aka we had if's without else). - if (info.else_block) { - if (Asm().Bind(info.else_block)) { - Asm().Goto(info.end_block); - } - } - Asm().Bind(info.end_block); - if_scope_stack_.pop_back(); + void ControlFlowHelper_FinishIfBlock(ControlFlowHelper_IfState* state) { + if (Asm().current_block() == nullptr) return; + Asm().Goto(state->end_block); } - void ControlFlowHelper_GotoEnd() { - DCHECK_LT(0, if_scope_stack_.size()); - auto& info = if_scope_stack_.back(); - - if (!Asm().current_block()) { - // We had an unconditional goto inside the block, so we don't need to add - // a jump to the end block. - return; + void ControlFlowHelper_EndIf(ControlFlowHelper_IfState* state) { + if (state->else_block) { + if (Asm().Bind(state->else_block)) { + Asm().Goto(state->end_block); + } } - // Generate a jump to the end block. - Asm().Goto(info.end_block); + Asm().Bind(state->end_block); } - - private: - struct IfScopeInfo { - block_t* else_block; - block_t* end_block; - - IfScopeInfo(block_t* else_block, block_t* end_block) - : else_block(else_block), end_block(end_block) {} - }; - base::SmallVector if_scope_stack_; }; template @@ -1121,7 +1161,8 @@ class TurboshaftAssemblerOpInterface template explicit TurboshaftAssemblerOpInterface(Args... args) - : matcher_(Asm().output_graph()) {} + : GenericAssemblerOpInterface(args...), + matcher_(Asm().output_graph()) {} const OperationMatcher& matcher() const { return matcher_; } @@ -1140,34 +1181,66 @@ class TurboshaftAssemblerOpInterface return Next::ReduceProjection(tuple, index, rep); } -// Methods to be used by the reducers to reducer operations with the whole -// reducer stack. + // Methods to be used by the reducers to reducer operations with the whole + // reducer stack. + + V GenericBinop(V left, V right, OpIndex frame_state, + OpIndex context, GenericBinopOp::Kind kind) { + return ReduceIfReachableGenericBinop(left, right, frame_state, context, + kind); + } +#define DECL_GENERIC_BINOP(Name) \ + V Generic##Name(V left, V right, \ + OpIndex frame_state, OpIndex context) { \ + return GenericBinop(left, right, frame_state, context, \ + GenericBinopOp::Kind::k##Name); \ + } + GENERIC_BINOP_LIST(DECL_GENERIC_BINOP) +#undef DECL_GENERIC_BINOP + + V GenericUnop(V input, OpIndex frame_state, OpIndex context, + GenericUnopOp::Kind kind) { + return ReduceIfReachableGenericUnop(input, frame_state, context, kind); + } +#define DECL_GENERIC_UNOP(Name) \ + V Generic##Name(V input, OpIndex frame_state, \ + OpIndex context) { \ + return GenericUnop(input, frame_state, context, \ + GenericUnopOp::Kind::k##Name); \ + } + GENERIC_UNOP_LIST(DECL_GENERIC_UNOP) +#undef DECL_GENERIC_UNOP + + V ToNumberOrNumeric(V input, OpIndex frame_state, + OpIndex context, Object::Conversion kind) { + return ReduceIfReachableToNumberOrNumeric(input, frame_state, context, + kind); + } + V ToNumber(V input, OpIndex frame_state, OpIndex context) { + return ToNumberOrNumeric(input, frame_state, context, + Object::Conversion::kToNumber); + } + V ToNumeric(V input, OpIndex frame_state, OpIndex context) { + return ToNumberOrNumeric(input, frame_state, context, + Object::Conversion::kToNumeric); + } + #define DECL_MULTI_REP_BINOP(name, operation, rep_type, kind) \ OpIndex name(OpIndex left, OpIndex right, rep_type rep) { \ return ReduceIfReachable##operation(left, right, \ operation##Op::Kind::k##kind, rep); \ } -#define DECL_SINGLE_REP_BINOP(name, operation, kind, rep) \ - OpIndex name(OpIndex left, OpIndex right) { \ - return ReduceIfReachable##operation(left, right, \ - operation##Op::Kind::k##kind, rep); \ - } + #define DECL_SINGLE_REP_BINOP_V(name, operation, kind, tag) \ V name(ConstOrV left, ConstOrV right) { \ return ReduceIfReachable##operation(resolve(left), resolve(right), \ operation##Op::Kind::k##kind, \ V::rep); \ } -#define DECL_SINGLE_REP_BINOP_NO_KIND(name, operation, rep) \ - OpIndex name(OpIndex left, OpIndex right) { \ - return ReduceIfReachable##operation(left, right, rep); \ - } DECL_MULTI_REP_BINOP(WordAdd, WordBinop, WordRepresentation, Add) DECL_SINGLE_REP_BINOP_V(Word32Add, WordBinop, Add, Word32) DECL_SINGLE_REP_BINOP_V(Word64Add, WordBinop, Add, Word64) DECL_SINGLE_REP_BINOP_V(WordPtrAdd, WordBinop, Add, WordPtr) - DECL_SINGLE_REP_BINOP(PointerAdd, WordBinop, Add, - WordRepresentation::WordPtr()) DECL_MULTI_REP_BINOP(WordMul, WordBinop, WordRepresentation, Mul) DECL_SINGLE_REP_BINOP_V(Word32Mul, WordBinop, Mul, Word32) @@ -1194,8 +1267,6 @@ class TurboshaftAssemblerOpInterface DECL_SINGLE_REP_BINOP_V(Word32Sub, WordBinop, Sub, Word32) DECL_SINGLE_REP_BINOP_V(Word64Sub, WordBinop, Sub, Word64) DECL_SINGLE_REP_BINOP_V(WordPtrSub, WordBinop, Sub, WordPtr) - DECL_SINGLE_REP_BINOP(PointerSub, WordBinop, Sub, - WordRepresentation::WordPtr()) DECL_MULTI_REP_BINOP(IntDiv, WordBinop, WordRepresentation, SignedDiv) DECL_SINGLE_REP_BINOP_V(Int32Div, WordBinop, SignedDiv, Word32) @@ -1371,8 +1442,7 @@ class TurboshaftAssemblerOpInterface UnsignedLessThan) DECL_SINGLE_REP_COMPARISON_V(Uint32LessThan, UnsignedLessThan, Word32) DECL_SINGLE_REP_COMPARISON_V(Uint64LessThan, UnsignedLessThan, Word64) - DECL_SINGLE_REP_BINOP(UintPtrLessThan, Comparison, UnsignedLessThan, - WordRepresentation::WordPtr()) + DECL_SINGLE_REP_COMPARISON_V(UintPtrLessThan, UnsignedLessThan, WordPtr) DECL_MULTI_REP_BINOP(FloatLessThan, Comparison, RegisterRepresentation, SignedLessThan) DECL_SINGLE_REP_COMPARISON_V(Float32LessThan, SignedLessThan, Float32) @@ -1390,8 +1460,8 @@ class TurboshaftAssemblerOpInterface Word32) DECL_SINGLE_REP_COMPARISON_V(Uint64LessThanOrEqual, UnsignedLessThanOrEqual, Word64) - DECL_SINGLE_REP_BINOP(UintPtrLessThanOrEqual, Comparison, - UnsignedLessThanOrEqual, WordRepresentation::WordPtr()) + DECL_SINGLE_REP_COMPARISON_V(UintPtrLessThanOrEqual, UnsignedLessThanOrEqual, + WordPtr) DECL_MULTI_REP_BINOP(FloatLessThanOrEqual, Comparison, RegisterRepresentation, SignedLessThanOrEqual) DECL_SINGLE_REP_COMPARISON_V(Float32LessThanOrEqual, SignedLessThanOrEqual, @@ -1405,10 +1475,17 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableComparison(left, right, kind, rep); } -#undef DECL_SINGLE_REP_BINOP #undef DECL_SINGLE_REP_BINOP_V #undef DECL_MULTI_REP_BINOP -#undef DECL_SINGLE_REP_BINOP_NO_KIND + + OpIndex FloatUnary(OpIndex input, FloatUnaryOp::Kind kind, + FloatRepresentation rep) { + return ReduceIfReachableFloatUnary(input, kind, rep); + } + V Float64Unary(V input, FloatUnaryOp::Kind kind) { + return ReduceIfReachableFloatUnary(input, kind, + FloatRepresentation::Float64()); + } #define DECL_MULTI_REP_UNARY(name, operation, rep_type, kind) \ OpIndex name(OpIndex input, rep_type rep) { \ @@ -1555,10 +1632,18 @@ class TurboshaftAssemblerOpInterface DECL_TAGGED_BITCAST(WordPtr, Smi, kSmi) DECL_TAGGED_BITCAST(WordPtr, HeapObject, kHeapObject) DECL_TAGGED_BITCAST(HeapObject, WordPtr, kHeapObject) - DECL_TAGGED_BITCAST(WordPtr, Tagged, kAny) - DECL_TAGGED_BITCAST(Tagged, WordPtr, kAny) #undef DECL_TAGGED_BITCAST - V BitcastTaggedToWordPtrForTagAndSmiBits(V input) { + V BitcastWordPtrToTagged(V input) { + return TaggedBitcast(input, V::rep, V::rep, + TaggedBitcastOp::Kind::kAny); + } + + V BitcastTaggedToWordPtr(V input) { + return TaggedBitcast(input, V::rep, V::rep, + TaggedBitcastOp::Kind::kAny); + } + + V BitcastTaggedToWordPtrForTagAndSmiBits(V input) { return TaggedBitcast(input, RegisterRepresentation::Tagged(), RegisterRepresentation::WordPtr(), TaggedBitcastOp::Kind::kTagAndSmiBits); @@ -1580,6 +1665,14 @@ class TurboshaftAssemblerOpInterface V Float64IsNaN(V input) { return FloatIs(input, NumericKind::kNaN, FloatRepresentation::Float64()); } + V Float64IsHole(V input) { + return FloatIs(input, NumericKind::kFloat64Hole, + FloatRepresentation::Float64()); + } + // Float64IsSmi returns true if {input} is an integer in smi range. + V Float64IsSmi(V input) { + return FloatIs(input, NumericKind::kSmi, FloatRepresentation::Float64()); + } OpIndex ObjectIsNumericValue(OpIndex input, NumericKind kind, FloatRepresentation input_rep) { @@ -1627,6 +1720,7 @@ class TurboshaftAssemblerOpInterface CONVERT_PRIMITIVE_TO_OBJECT(ConvertInt32ToNumber, Number, Word32, Signed) CONVERT_PRIMITIVE_TO_OBJECT(ConvertUint32ToNumber, Number, Word32, Unsigned) CONVERT_PRIMITIVE_TO_OBJECT(ConvertWord32ToBoolean, Boolean, Word32, Signed) + CONVERT_PRIMITIVE_TO_OBJECT(ConvertCharCodeToString, String, Word32, CharCode) #undef CONVERT_PRIMITIVE_TO_OBJECT V ConvertFloat64ToNumber(V input, CheckForMinusZeroMode minus_zero_mode) { @@ -1691,11 +1785,17 @@ class TurboshaftAssemblerOpInterface V ConvertJSPrimitiveToObject(V value, V native_context, - V global_proxy, + OptionalV global_proxy, ConvertReceiverMode mode) { return ReduceIfReachableConvertJSPrimitiveToObject(value, native_context, global_proxy, mode); } + V ConvertJSPrimitiveToObject(V value, + V native_context, + ConvertReceiverMode mode) { + return ConvertJSPrimitiveToObject(value, native_context, OpIndex::Invalid(), + mode); + } V Word32Constant(uint32_t value) { return ReduceIfReachableConstant(ConstantOp::Kind::kWord32, @@ -1710,6 +1810,9 @@ class TurboshaftAssemblerOpInterface V Word64Constant(int64_t value) { return Word64Constant(static_cast(value)); } + V WordPtrConstant(uintptr_t value) { + return WordConstant(value, WordRepresentation::WordPtr()); + } OpIndex WordConstant(uint64_t value, WordRepresentation rep) { switch (rep.value()) { case WordRepresentation::Word32(): @@ -1787,7 +1890,7 @@ class TurboshaftAssemblerOpInterface } // TODO(nicohartmann@): Might want to get rid of the isolate when supporting // Wasm. - V CEntryStubConstant(Isolate* isolate, int result_size, + V CEntryStubConstant(Isolate* isolate, int result_size, ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false) { if (argv_mode != ArgvMode::kStack) { @@ -1850,7 +1953,7 @@ class TurboshaftAssemblerOpInterface Word64, Float64) DECL_CHANGE_V(ChangeUint32ToFloat64, kUnsignedToFloat, kNoAssumption, Word32, Float64) - DECL_CHANGE_V(ChangeFloat64ToFloat32, kFloatConversion, kNoAssumption, + DECL_CHANGE_V(TruncateFloat64ToFloat32, kFloatConversion, kNoAssumption, Float64, Float32) DECL_CHANGE_V(ChangeFloat32ToFloat64, kFloatConversion, kNoAssumption, Float32, Float64) @@ -1906,7 +2009,7 @@ class TurboshaftAssemblerOpInterface } } - V IsSmi(V object) { + V IsSmi(V object) { if constexpr (COMPRESS_POINTERS_BOOL) { return Word32Equal(Word32BitwiseAnd(V::Cast(object), kSmiTagMask), kSmiTag); @@ -2111,11 +2214,16 @@ class TurboshaftAssemblerOpInterface rep.SizeInBytesLog2()); } - OpIndex LoadProtectedPointerField(OpIndex base, int32_t offset) { + // Load a protected (trusted -> trusted) pointer field. The read value is + // either a Smi or a TrustedObject. + V LoadProtectedPointerField( + V base, OptionalV index, + LoadOp::Kind kind = LoadOp::Kind::TaggedBase(), int offset = 0, + int element_size_log2 = kTaggedSizeLog2) { #if V8_ENABLE_SANDBOX static_assert(COMPRESS_POINTERS_BOOL); - OpIndex tagged = Load(base, LoadOp::Kind::TaggedBase(), - MemoryRepresentation::Uint32(), offset); + V tagged = Load(base, index, kind, MemoryRepresentation::Uint32(), + offset, index.valid() ? element_size_log2 : 0); OpIndex trusted_cage_base = Load(LoadRootRegister(), LoadOp::Kind::RawAligned().Immutable(), MemoryRepresentation::UintPtr(), @@ -2123,20 +2231,69 @@ class TurboshaftAssemblerOpInterface // The bit cast is needed to change the type of the node to Tagged. This is // necessary so that if this value gets spilled on the stack, then the GC // will process it. + // TODO(clemensb): Can an addition instead of bitwise-or generate better + // code? return BitcastWordPtrToTagged( WordPtrBitwiseOr(ChangeUint32ToUintPtr(tagged), trusted_cage_base)); #else - return Load(base, LoadOp::Kind::TaggedBase(), - MemoryRepresentation::TaggedPointer(), offset); + return Load(base, index, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::TaggedPointer(), offset, + index.valid() ? element_size_log2 : 0); +#endif // V8_ENABLE_SANDBOX + } + + // Load a protected (trusted -> trusted) pointer field. The read value is + // either a Smi or a TrustedObject. + V LoadProtectedPointerField(V base, LoadOp::Kind kind, + int32_t offset) { + return LoadProtectedPointerField(base, OpIndex::Invalid(), kind, offset); + } + + // Load a trusted (indirect) pointer. Returns Smi or ExposedTrustedObject. + V LoadTrustedPointerField(V base, OptionalV index, + LoadOp::Kind kind, IndirectPointerTag tag, + int offset = 0) { +#if V8_ENABLE_SANDBOX + static_assert(COMPRESS_POINTERS_BOOL); + V handle = + Load(base, index, kind, MemoryRepresentation::Uint32(), offset); + V table_index = + Word32ShiftRightLogical(handle, kTrustedPointerHandleShift); + V table_offset = __ ChangeUint32ToUint64( + Word32ShiftLeft(table_index, kTrustedPointerTableEntrySizeLog2)); + V table = + Load(LoadRootRegister(), LoadOp::Kind::RawAligned().Immutable(), + MemoryRepresentation::UintPtr(), + IsolateData::trusted_pointer_table_offset() + + Internals::kTrustedPointerTableBasePointerOffset); + V decoded_ptr = + Load(table, table_offset, LoadOp::Kind::RawAligned(), + MemoryRepresentation::UintPtr()); + + // Untag the pointer and remove the marking bit in one operation. + decoded_ptr = + __ Word64BitwiseAnd(decoded_ptr, ~(tag | kTrustedPointerTableMarkBit)); + + // Bitcast to tagged to this gets scanned by the GC properly. + return BitcastWordPtrToTagged(decoded_ptr); +#else + return Load(base, index, kind, MemoryRepresentation::TaggedPointer(), + offset); #endif // V8_ENABLE_SANDBOX } - V LoadFixedArrayElement(V array, int index) { + // Load a trusted (indirect) pointer. Returns Smi or ExposedTrustedObject. + V LoadTrustedPointerField(V base, LoadOp::Kind kind, + IndirectPointerTag tag, int offset = 0) { + return LoadTrustedPointerField(base, OpIndex::Invalid(), kind, tag, offset); + } + + V LoadFixedArrayElement(V array, int index) { return Load(array, LoadOp::Kind::TaggedBase(), MemoryRepresentation::AnyTagged(), FixedArray::OffsetOfElementAt(index)); } - V LoadFixedArrayElement(V array, V index) { + V LoadFixedArrayElement(V array, V index) { return Load(array, index, LoadOp::Kind::TaggedBase(), MemoryRepresentation::AnyTagged(), FixedArray::OffsetOfElementAt(0), kTaggedSizeLog2); @@ -2149,14 +2306,27 @@ class TurboshaftAssemblerOpInterface } V LoadFixedDoubleArrayElement(V array, V index) { - DCHECK_EQ(ElementsKindToShiftSize(PACKED_DOUBLE_ELEMENTS), - ElementsKindToShiftSize(HOLEY_DOUBLE_ELEMENTS)); + static_assert(ElementsKindToShiftSize(PACKED_DOUBLE_ELEMENTS) == + ElementsKindToShiftSize(HOLEY_DOUBLE_ELEMENTS)); return Load(array, index, LoadOp::Kind::TaggedBase(), MemoryRepresentation::Float64(), FixedDoubleArray::OffsetOfElementAt(0), ElementsKindToShiftSize(PACKED_DOUBLE_ELEMENTS)); } + V LoadProtectedFixedArrayElement(V array, + V index) { + return LoadProtectedPointerField(array, index, LoadOp::Kind::TaggedBase(), + ProtectedFixedArray::OffsetOfElementAt(0)); + } + + V LoadProtectedFixedArrayElement(V array, + int index) { + return LoadProtectedPointerField( + array, LoadOp::Kind::TaggedBase(), + ProtectedFixedArray::OffsetOfElementAt(index)); + } + void Store( OpIndex base, OptionalOpIndex index, OpIndex value, StoreOp::Kind kind, MemoryRepresentation stored_rep, WriteBarrierKind write_barrier, @@ -2198,14 +2368,20 @@ class TurboshaftAssemblerOpInterface WriteBarrierKind::kNoWriteBarrier, offset, rep.SizeInBytesLog2()); } - template - V LoadField(V object, const FieldAccess& access) { - if constexpr (is_taggable_v) { - DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase); - } else { - static_assert(std::is_same_v); - DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kUntaggedBase); - } + template + V LoadField(V object, const FieldAccess& access) { + DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase); + return LoadFieldImpl(object, access); + } + + template + V LoadField(V raw_base, const FieldAccess& access) { + DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kUntaggedBase); + return LoadFieldImpl(raw_base, access); + } + + template + V LoadFieldImpl(OpIndex object, const FieldAccess& access) { MachineType machine_type = access.machine_type; if (machine_type.IsMapWord()) { machine_type = MachineType::TaggedPointer(); @@ -2244,19 +2420,26 @@ class TurboshaftAssemblerOpInterface // Helpers to read the most common fields. // TODO(nicohartmann@): Strengthen this to `V`. - V LoadMapField(V object) { - return LoadField(object, AccessBuilder::ForMap()); + V LoadMapField(V object) { + return LoadField(object, AccessBuilder::ForMap()); } - V LoadInstanceTypeField(V map) { + V LoadInstanceTypeField(V map) { return LoadField(map, AccessBuilder::ForMapInstanceType()); } - V HasInstanceType(V object, InstanceType instance_type) { + V HasInstanceType(V object, InstanceType instance_type) { return Word32Equal(LoadInstanceTypeField(LoadMapField(object)), Word32Constant(instance_type)); } + template >> + V LoadTaggedField(V object, int field_offset) { + return Load(object, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::AnyTagged(), field_offset); + } + template void StoreField(V object, const FieldAccess& access, V value) { StoreFieldImpl(object, access, value, @@ -2305,6 +2488,30 @@ class TurboshaftAssemblerOpInterface maybe_initializing_or_transitioning); } + void StoreFixedArrayElement(V array, int index, V value, + compiler::WriteBarrierKind write_barrier) { + Store(array, value, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::AnyTagged(), write_barrier, + FixedArray::kHeaderSize + index * kTaggedSize); + } + + void StoreFixedArrayElement(V array, V index, + V value, + compiler::WriteBarrierKind write_barrier) { + Store(array, index, value, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::AnyTagged(), write_barrier, + FixedArray::kHeaderSize, kTaggedSizeLog2); + } + void StoreFixedDoubleArrayElement(V array, V index, + V value) { + static_assert(ElementsKindToShiftSize(PACKED_DOUBLE_ELEMENTS) == + ElementsKindToShiftSize(HOLEY_DOUBLE_ELEMENTS)); + Store(array, index, value, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::Float64(), WriteBarrierKind::kNoWriteBarrier, + FixedDoubleArray::kHeaderSize, + ElementsKindToShiftSize(PACKED_DOUBLE_ELEMENTS)); + } + template V LoadArrayBufferElement(V object, const ElementAccess& access, V index) { @@ -2365,25 +2572,25 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableDecodeExternalPointer(handle, tag); } - OpIndex StackCheck(StackCheckOp::CheckOrigin origin, - StackCheckOp::CheckKind kind) { - return ReduceIfReachableStackCheck(origin, kind); + void StackCheck(StackCheckOp::CheckOrigin origin, + StackCheckOp::CheckKind kind) { + ReduceIfReachableStackCheck(origin, kind); } void Retain(OpIndex value) { ReduceIfReachableRetain(value); } - OpIndex StackPointerGreaterThan(OpIndex limit, StackCheckKind kind) { + V StackPointerGreaterThan(V limit, StackCheckKind kind) { return ReduceIfReachableStackPointerGreaterThan(limit, kind); } - OpIndex StackCheckOffset() { + V StackCheckOffset() { return ReduceIfReachableFrameConstant( FrameConstantOp::Kind::kStackCheckOffset); } - OpIndex FramePointer() { + V FramePointer() { return ReduceIfReachableFrameConstant(FrameConstantOp::Kind::kFramePointer); } - OpIndex ParentFramePointer() { + V ParentFramePointer() { return ReduceIfReachableFrameConstant( FrameConstantOp::Kind::kParentFramePointer); } @@ -2392,6 +2599,21 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableStackSlot(size, alignment, is_tagged); } + V AdaptLocalArgument(V argument) { +#ifdef V8_ENABLE_DIRECT_LOCAL + // With direct locals, the argument can be passed directly. + return BitcastTaggedToWordPtr(argument); +#else + // With indirect locals, the argument has to be stored on the stack and the + // slot address is passed. + V stack_slot = + StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true); + StoreOffHeap(stack_slot, __ BitcastTaggedToWordPtr(argument), + MemoryRepresentation::UintPtr()); + return stack_slot; +#endif + } + OpIndex LoadRootRegister() { return ReduceIfReachableLoadRootRegister(); } OpIndex Select(OpIndex cond, OpIndex vtrue, OpIndex vfalse, @@ -2399,14 +2621,28 @@ class TurboshaftAssemblerOpInterface SelectOp::Implementation implem) { return ReduceIfReachableSelect(cond, vtrue, vfalse, rep, hint, implem); } +#define DEF_SELECT(Rep) \ + V Rep##Select(ConstOrV cond, ConstOrV vtrue, \ + ConstOrV vfalse) { \ + return Select(resolve(cond), resolve(vtrue), resolve(vfalse), \ + RegisterRepresentation::Rep(), BranchHint::kNone, \ + SelectOp::Implementation::kCMove); \ + } + DEF_SELECT(Word32) + DEF_SELECT(Word64) + DEF_SELECT(Float32) + DEF_SELECT(Float64) +#undef DEF_SELECT + template - V> Conditional(V cond, V vtrue, + V> Conditional(ConstOrV cond, V vtrue, V vfalse, BranchHint hint = BranchHint::kNone) { - return Select(cond, vtrue, vfalse, V>::rep, hint, + return Select(resolve(cond), vtrue, vfalse, + V>::rep, hint, SelectOp::Implementation::kBranch); } - void Switch(OpIndex input, base::Vector cases, + void Switch(V input, base::Vector cases, Block* default_case, BranchHint default_hint = BranchHint::kNone) { ReduceIfReachableSwitch(input, cases, default_case, default_hint); @@ -2433,7 +2669,7 @@ class TurboshaftAssemblerOpInterface return cached_param; } OpIndex OsrValue(int index) { return ReduceIfReachableOsrValue(index); } - void Return(OpIndex pop_count, base::Vector return_values) { + void Return(V pop_count, base::Vector return_values) { ReduceIfReachableReturn(pop_count, return_values); } void Return(OpIndex result) { @@ -2466,8 +2702,8 @@ class TurboshaftAssemblerOpInterface DCHECK(context.valid()); auto arguments = std::apply( [context](auto&&... as) { - return base::SmallVector + 1>{ + return base::SmallVector< + OpIndex, std::tuple_size_v + 1>{ std::forward(as)..., context}; }, args); @@ -2606,6 +2842,36 @@ class TurboshaftAssemblerOpInterface arguments, desc, effects); } +#define DECL_GENERIC_BINOP_BUILTIN_CALL(Name) \ + V CallBuiltin_##Name(Isolate* isolate, OpIndex frame_state, \ + V context, V lhs, \ + V rhs) { \ + return CallBuiltin( \ + isolate, frame_state, context, {lhs, rhs}); \ + } + GENERIC_BINOP_LIST(DECL_GENERIC_BINOP_BUILTIN_CALL) +#undef DECL_GENERIC_BINOP_BUILTIN_CALL + +#define DECL_GENERIC_UNOP_BUILTIN_CALL(Name) \ + V CallBuiltin_##Name(Isolate* isolate, OpIndex frame_state, \ + V context, V input) { \ + return CallBuiltin( \ + isolate, frame_state, context, {input}); \ + } + GENERIC_UNOP_LIST(DECL_GENERIC_UNOP_BUILTIN_CALL) +#undef DECL_GENERIC_UNOP_BUILTIN_CALL + + V CallBuiltin_ToNumber(Isolate* isolate, OpIndex frame_state, + V context, V input) { + return CallBuiltin( + isolate, frame_state, context, {input}); + } + V CallBuiltin_ToNumeric(Isolate* isolate, OpIndex frame_state, + V context, V input) { + return CallBuiltin( + isolate, frame_state, context, {input}); + } + void CallBuiltin_CheckTurbofanType(Isolate* isolate, V context, V object, V allocated_type, @@ -2826,7 +3092,7 @@ class TurboshaftAssemblerOpInterface return CallRuntime( isolate, context, {gap}); } - V CallRuntime_StringCharCodeAt(Isolate* isolate, V context, + V CallRuntime_StringCharCodeAt(Isolate* isolate, V context, V string, V index) { return CallRuntime( isolate, context, {string, index}); @@ -2839,7 +3105,7 @@ class TurboshaftAssemblerOpInterface isolate, context, {string}); } #endif // V8_INTL_SUPPORT - V CallRuntime_TerminateExecution(Isolate* isolate, + V CallRuntime_TerminateExecution(Isolate* isolate, OpIndex frame_state, V context) { return CallRuntime( @@ -2848,7 +3114,7 @@ class TurboshaftAssemblerOpInterface V CallRuntime_TransitionElementsKind(Isolate* isolate, V context, V object, - V target_map) { + V target_map) { return CallRuntime( isolate, context, {object, target_map}); } @@ -2867,15 +3133,15 @@ class TurboshaftAssemblerOpInterface const FrameStateData* data) { return ReduceIfReachableFrameState(inputs, inlined, data); } - void DeoptimizeIf(OpIndex condition, OpIndex frame_state, + void DeoptimizeIf(V condition, OpIndex frame_state, const DeoptimizeParameters* parameters) { ReduceIfReachableDeoptimizeIf(condition, frame_state, false, parameters); } - void DeoptimizeIfNot(OpIndex condition, OpIndex frame_state, + void DeoptimizeIfNot(V condition, OpIndex frame_state, const DeoptimizeParameters* parameters) { ReduceIfReachableDeoptimizeIf(condition, frame_state, true, parameters); } - void DeoptimizeIf(OpIndex condition, OpIndex frame_state, + void DeoptimizeIf(V condition, OpIndex frame_state, DeoptimizeReason reason, const FeedbackSource& feedback) { if (V8_UNLIKELY(Asm().generating_unreachable_operations())) { return; @@ -2885,7 +3151,7 @@ class TurboshaftAssemblerOpInterface zone->New(reason, feedback); DeoptimizeIf(condition, frame_state, params); } - void DeoptimizeIfNot(OpIndex condition, OpIndex frame_state, + void DeoptimizeIfNot(V condition, OpIndex frame_state, DeoptimizeReason reason, const FeedbackSource& feedback) { if (V8_UNLIKELY(Asm().generating_unreachable_operations())) { @@ -2921,7 +3187,7 @@ class TurboshaftAssemblerOpInterface } #endif // V8_ENABLE_WEBASSEMBLY - void StaticAssert(OpIndex condition, const char* source) { + void StaticAssert(V condition, const char* source) { ReduceIfReachableStaticAssert(condition, source); } @@ -3043,28 +3309,28 @@ class TurboshaftAssemblerOpInterface return Asm().Call(callee, frame_state, arguments, ts_call_descriptor); } - V NewConsString(V length, V first, V second) { + V NewConsString(V length, V first, V second) { return ReduceIfReachableNewConsString(length, first, second); } - V NewArray(V length, NewArrayOp::Kind kind, + V NewArray(V length, NewArrayOp::Kind kind, AllocationType allocation_type) { return ReduceIfReachableNewArray(length, kind, allocation_type); } - V NewDoubleArray(V length, AllocationType allocation_type) { + V NewDoubleArray(V length, AllocationType allocation_type) { return NewArray(length, NewArrayOp::Kind::kDouble, allocation_type); } - V DoubleArrayMinMax(V array, DoubleArrayMinMaxOp::Kind kind) { + V DoubleArrayMinMax(V array, DoubleArrayMinMaxOp::Kind kind) { return ReduceIfReachableDoubleArrayMinMax(array, kind); } - V DoubleArrayMin(V array) { + V DoubleArrayMin(V array) { return DoubleArrayMinMax(array, DoubleArrayMinMaxOp::Kind::kMin); } - V DoubleArrayMax(V array) { + V DoubleArrayMax(V array) { return DoubleArrayMinMax(array, DoubleArrayMinMaxOp::Kind::kMax); } - V LoadFieldByIndex(V object, V index) { + V LoadFieldByIndex(V object, V index) { return ReduceIfReachableLoadFieldByIndex(object, index); } @@ -3088,7 +3354,7 @@ class TurboshaftAssemblerOpInterface Comment("ASSERT FAILED"); DebugBreak(); } - END_IF + #endif } @@ -3105,12 +3371,12 @@ class TurboshaftAssemblerOpInterface void Comment(const char* message) { ReduceIfReachableComment(message); } - V BigIntBinop(V left, V right, OpIndex frame_state, + V BigIntBinop(V left, V right, OpIndex frame_state, BigIntBinopOp::Kind kind) { return ReduceIfReachableBigIntBinop(left, right, frame_state, kind); } #define BIGINT_BINOP(kind) \ - V BigInt##kind(V left, V right, \ + V BigInt##kind(V left, V right, \ OpIndex frame_state) { \ return BigIntBinop(left, right, frame_state, \ BigIntBinopOp::Kind::k##kind); \ @@ -3127,25 +3393,25 @@ class TurboshaftAssemblerOpInterface BIGINT_BINOP(ShiftRightArithmetic) #undef BIGINT_BINOP - V BigIntComparison(V left, V right, + V BigIntComparison(V left, V right, BigIntComparisonOp::Kind kind) { return ReduceIfReachableBigIntComparison(left, right, kind); } - V BigIntEqual(V left, V right) { + V BigIntEqual(V left, V right) { return BigIntComparison(left, right, BigIntComparisonOp::Kind::kEqual); } - V BigIntLessThan(V left, V right) { + V BigIntLessThan(V left, V right) { return BigIntComparison(left, right, BigIntComparisonOp::Kind::kLessThan); } - V BigIntLessThanOrEqual(V left, V right) { + V BigIntLessThanOrEqual(V left, V right) { return BigIntComparison(left, right, BigIntComparisonOp::Kind::kLessThanOrEqual); } - V BigIntUnary(V input, BigIntUnaryOp::Kind kind) { + V BigIntUnary(V input, BigIntUnaryOp::Kind kind) { return ReduceIfReachableBigIntUnary(input, kind); } - V BigIntNegate(V input) { + V BigIntNegate(V input) { return BigIntUnary(input, BigIntUnaryOp::Kind::kNegate); } @@ -3266,8 +3532,8 @@ class TurboshaftAssemblerOpInterface void TransitionAndStoreArrayElement( V array, V index, OpIndex value, - TransitionAndStoreArrayElementOp::Kind kind, MaybeHandle fast_map, - MaybeHandle double_map) { + TransitionAndStoreArrayElementOp::Kind kind, MaybeHandle fast_map, + MaybeHandle double_map) { ReduceIfReachableTransitionAndStoreArrayElement(array, index, value, kind, fast_map, double_map); } @@ -3280,17 +3546,17 @@ class TurboshaftAssemblerOpInterface } V CompareMaps(V heap_object, - const ZoneRefSet& maps) { + const ZoneRefSet& maps) { return ReduceIfReachableCompareMaps(heap_object, maps); } void CheckMaps(V heap_object, OpIndex frame_state, - const ZoneRefSet& maps, CheckMapsFlags flags, + const ZoneRefSet& maps, CheckMapsFlags flags, const FeedbackSource& feedback) { ReduceIfReachableCheckMaps(heap_object, frame_state, maps, flags, feedback); } - void AssumeMap(V heap_object, const ZoneRefSet& maps) { + void AssumeMap(V heap_object, const ZoneRefSet& maps) { ReduceIfReachableAssumeMap(heap_object, maps); } @@ -3390,34 +3656,34 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableNull(type); } - V IsNull(V input, wasm::ValueType type) { + V IsNull(V input, wasm::ValueType type) { return ReduceIfReachableIsNull(input, type); } - V AssertNotNull(V object, wasm::ValueType type, + V AssertNotNull(V object, wasm::ValueType type, TrapId trap_id) { return ReduceIfReachableAssertNotNull(object, type, trap_id); } - V RttCanon(V rtts, uint32_t type_index) { + V RttCanon(V rtts, uint32_t type_index) { return ReduceIfReachableRttCanon(rtts, type_index); } - V WasmTypeCheck(V object, OptionalV rtt, + V WasmTypeCheck(V object, OptionalV rtt, WasmTypeCheckConfig config) { return ReduceIfReachableWasmTypeCheck(object, rtt, config); } - V WasmTypeCast(V object, OptionalV rtt, + V WasmTypeCast(V object, OptionalV rtt, WasmTypeCheckConfig config) { return ReduceIfReachableWasmTypeCast(object, rtt, config); } - V AnyConvertExtern(V input) { + V AnyConvertExtern(V input) { return ReduceIfReachableAnyConvertExtern(input); } - V ExternConvertAny(V input) { + V ExternConvertAny(V input) { return ReduceIfReachableExternConvertAny(input); } @@ -3453,25 +3719,25 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableArrayLength(array, null_check); } - V WasmAllocateArray(V rtt, ConstOrV length, + V WasmAllocateArray(V rtt, ConstOrV length, const wasm::ArrayType* array_type) { return ReduceIfReachableWasmAllocateArray(rtt, resolve(length), array_type); } - V WasmAllocateStruct(V rtt, + V WasmAllocateStruct(V rtt, const wasm::StructType* struct_type) { return ReduceIfReachableWasmAllocateStruct(rtt, struct_type); } - V WasmRefFunc(V wasm_instance, uint32_t function_index) { + V WasmRefFunc(V wasm_instance, uint32_t function_index) { return ReduceIfReachableWasmRefFunc(wasm_instance, function_index); } - V StringAsWtf16(V string) { + V StringAsWtf16(V string) { return ReduceIfReachableStringAsWtf16(string); } - V StringPrepareForGetCodeUnit(V string) { + V StringPrepareForGetCodeUnit(V string) { return ReduceIfReachableStringPrepareForGetCodeUnit(string); } @@ -3488,6 +3754,10 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableSimd128Unary(input, kind); } + V Simd128ReverseBytes(V input) { + return Simd128Unary(input, Simd128UnaryOp::Kind::kSimd128ReverseBytes); + } + V Simd128Shift(V input, V shift, Simd128ShiftOp::Kind kind) { return ReduceIfReachableSimd128Shift(input, shift, kind); @@ -3538,30 +3808,43 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableSimd128Shuffle(left, right, shuffle); } - V WasmInstanceParameter() { - return Parameter(wasm::kWasmInstanceParameterIndex, - RegisterRepresentation::Tagged()); + // SIMD256 +#if V8_ENABLE_WASM_SIMD256_REVEC + OpIndex Simd256Extract128Lane(V source, uint8_t lane) { + return ReduceIfReachableSimd256Extract128Lane(source, lane); } - V LoadProtectedFixedArrayElement(V array, - int index) { - return LoadProtectedPointerField( - array, ProtectedFixedArray::OffsetOfElementAt(index)); + V Simd256LoadTransform( + V base, V index, + Simd256LoadTransformOp::LoadKind load_kind, + Simd256LoadTransformOp::TransformKind transform_kind, int offset) { + return ReduceIfReachableSimd256LoadTransform(base, index, load_kind, + transform_kind, offset); } - void StoreFixedArrayElement(V array, int index, V value, - compiler::WriteBarrierKind write_barrier) { - Store(array, value, LoadOp::Kind::TaggedBase(), - MemoryRepresentation::AnyTagged(), write_barrier, - FixedArray::kHeaderSize + index * kTaggedSize); + V Simd256Unary(V input, Simd256UnaryOp::Kind kind) { + return ReduceIfReachableSimd256Unary(input, kind); } - void StoreFixedArrayElement(V array, V index, - V value, - compiler::WriteBarrierKind write_barrier) { - Store(array, index, value, LoadOp::Kind::TaggedBase(), - MemoryRepresentation::AnyTagged(), write_barrier, - FixedArray::kHeaderSize, kTaggedSizeLog2); + V Simd256Binop(V left, V right, + Simd256BinopOp::Kind kind) { + return ReduceIfReachableSimd256Binop(left, right, kind); + } + + V Simd256Shift(V input, V shift, + Simd256ShiftOp::Kind kind) { + return ReduceIfReachableSimd256Shift(input, shift, kind); + } + + V Simd256Ternary(V first, V second, + V third, Simd256TernaryOp::Kind kind) { + return ReduceIfReachableSimd256Ternary(first, second, third, kind); + } +#endif // V8_ENABLE_WASM_SIMD256_REVEC + + V WasmInstanceParameter() { + return Parameter(wasm::kWasmInstanceParameterIndex, + RegisterRepresentation::Tagged()); } OpIndex LoadStackPointer() { return ReduceIfReachableLoadStackPointer(); } @@ -3770,7 +4053,6 @@ class Assembler : public AssemblerData, #endif Block* current_block() const { return current_block_; } - Block* current_catch_block() const { return current_catch_block_; } bool generating_unreachable_operations() const { return current_block() == nullptr; } @@ -3780,6 +4062,11 @@ class Assembler : public AssemblerData, return this->output_graph().Get(op_idx); } + Block* current_catch_block() const { return current_catch_block_; } + // CatchScope should be used in most cases to set the current catch block, but + // this is sometimes impractical. + void set_current_catch_block(Block* block) { current_catch_block_ = block; } + #ifdef DEBUG int& intermediate_tracing_depth() { return intermediate_tracing_depth_; } #endif @@ -4043,14 +4330,8 @@ class TSAssembler : public Assembler> { public: -#ifdef _WIN32 - explicit TSAssembler(Graph& input_graph, Graph& output_graph, - Zone* phase_zone) - : Assembler(input_graph, output_graph, phase_zone) {} -#else - using Assembler>::Assembler; -#endif + using Assembler>::Assembler; }; #include "src/compiler/turboshaft/undef-assembler-macros.inc" diff --git a/deps/v8/src/compiler/turboshaft/assert-types-reducer.h b/deps/v8/src/compiler/turboshaft/assert-types-reducer.h index f28b5b0c0c9aa2..191e196967dbb2 100644 --- a/deps/v8/src/compiler/turboshaft/assert-types-reducer.h +++ b/deps/v8/src/compiler/turboshaft/assert-types-reducer.h @@ -136,6 +136,7 @@ class AssertTypesReducer case RegisterRepresentation::Tagged(): case RegisterRepresentation::Compressed(): case RegisterRepresentation::Simd128(): + case RegisterRepresentation::Simd256(): // TODO(nicohartmann@): Handle remaining cases. break; } diff --git a/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h b/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h index 3d6f8d61c76266..5dbe9a3088c210 100644 --- a/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h +++ b/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h @@ -109,6 +109,58 @@ struct BuiltinCallDescriptor { base_effects.CanReadMemory().RequiredWhenUnused(); }; +#define DECL_GENERIC_BINOP(Name) \ + struct Name : public Descriptor { \ + static constexpr auto kFunction = Builtin::k##Name; \ + using arguments_t = std::tuple, V>; \ + using results_t = std::tuple>; \ + \ + static constexpr bool kNeedsFrameState = true; \ + static constexpr bool kNeedsContext = true; \ + static constexpr Operator::Properties kProperties = \ + Operator::kNoProperties; \ + static constexpr OpEffects kEffects = base_effects.CanCallAnything(); \ + }; + GENERIC_BINOP_LIST(DECL_GENERIC_BINOP) +#undef DECL_GENERIC_BINOP + +#define DECL_GENERIC_UNOP(Name) \ + struct Name : public Descriptor { \ + static constexpr auto kFunction = Builtin::k##Name; \ + using arguments_t = std::tuple>; \ + using results_t = std::tuple>; \ + \ + static constexpr bool kNeedsFrameState = true; \ + static constexpr bool kNeedsContext = true; \ + static constexpr Operator::Properties kProperties = \ + Operator::kNoProperties; \ + static constexpr OpEffects kEffects = base_effects.CanCallAnything(); \ + }; + GENERIC_UNOP_LIST(DECL_GENERIC_UNOP) +#undef DECL_GENERIC_UNOP + + struct ToNumber : public Descriptor { + static constexpr auto kFunction = Builtin::kToNumber; + using arguments_t = std::tuple>; + using results_t = std::tuple>; + + static constexpr bool kNeedsFrameState = true; + static constexpr bool kNeedsContext = true; + static constexpr Operator::Properties kProperties = Operator::kNoProperties; + static constexpr OpEffects kEffects = base_effects.CanCallAnything(); + }; + + struct ToNumeric : public Descriptor { + static constexpr auto kFunction = Builtin::kToNumeric; + using arguments_t = std::tuple>; + using results_t = std::tuple>; + + static constexpr bool kNeedsFrameState = true; + static constexpr bool kNeedsContext = true; + static constexpr Operator::Properties kProperties = Operator::kNoProperties; + static constexpr OpEffects kEffects = base_effects.CanCallAnything(); + }; + struct CopyFastSmiOrObjectElements : public Descriptor { static constexpr auto kFunction = Builtin::kCopyFastSmiOrObjectElements; @@ -484,7 +536,7 @@ struct BuiltinCallDescriptor { struct WasmRefFunc : public Descriptor { static constexpr auto kFunction = Builtin::kWasmRefFunc; using arguments_t = std::tuple>; - using results_t = std::tuple>; + using results_t = std::tuple>; static constexpr bool kNeedsFrameState = false; static constexpr bool kNeedsContext = false; @@ -849,7 +901,7 @@ struct BuiltinCallDescriptor { : public Descriptor { static constexpr auto kFunction = Builtin::kWasmStringFromDataSegment; using arguments_t = - std::tuple, V, V, V, V>; + std::tuple, V, V, V, V, V>; using results_t = std::tuple>; static constexpr bool kNeedsFrameState = false; @@ -1142,6 +1194,20 @@ struct BuiltinCallDescriptor { static constexpr OpEffects kEffects = base_effects.CanChangeControlFlow(); }; + struct WasmFastApiCallTypeCheckAndUpdateIC + : public Descriptor { + static constexpr auto kFunction = + Builtin::kWasmFastApiCallTypeCheckAndUpdateIC; + using arguments_t = std::tuple, V>; + using results_t = std::tuple>; + + static constexpr bool kNeedsFrameState = false; + static constexpr bool kNeedsContext = true; + static constexpr Operator::Properties kProperties = Operator::kNoWrite; + static constexpr OpEffects kEffects = + base_effects.CanLeaveCurrentFunction(); + }; + #endif // V8_ENABLE_WEBASSEMBLY }; diff --git a/deps/v8/src/compiler/turboshaft/dataview-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/dataview-lowering-reducer.h index 3747533c816dbc..5b569dfda30dac 100644 --- a/deps/v8/src/compiler/turboshaft/dataview-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/dataview-lowering-reducer.h @@ -50,6 +50,8 @@ class DataViewLoweringReducer : public Next { case kExternalBigInt64Array: case kExternalBigUint64Array: return __ Word64ReverseBytes(value); + case kExternalFloat16Array: + UNIMPLEMENTED(); } } @@ -75,15 +77,13 @@ class DataViewLoweringReducer : public Next { #else Asm().SetVariable(result, BuildReverseBytes(element_type, value)); #endif // V8_TARGET_LITTLE_ENDIAN - } - ELSE { + } ELSE { #if V8_TARGET_LITTLE_ENDIAN Asm().SetVariable(result, BuildReverseBytes(element_type, value)); #else Asm().SetVariable(result, value); #endif // V8_TARGET_LITTLE_ENDIAN } - END_IF // We need to keep the {object} (either the JSArrayBuffer or the JSDataView) // alive so that the GC will not release the JSArrayBuffer (if there's any) @@ -107,15 +107,14 @@ class DataViewLoweringReducer : public Next { #else Asm().SetVariable(value_to_store, BuildReverseBytes(element_type, value)); #endif // V8_TARGET_LITTLE_ENDIAN - } - ELSE { + } ELSE { #if V8_TARGET_LITTLE_ENDIAN Asm().SetVariable(value_to_store, BuildReverseBytes(element_type, value)); #else Asm().SetVariable(value_to_store, value); #endif // V8_TARGET_LITTLE_ENDIAN } - END_IF + const MemoryRepresentation memory_rep = MemoryRepresentation::FromMachineType(machine_type); __ Store(storage, index, Asm().GetVariable(value_to_store), diff --git a/deps/v8/src/compiler/turboshaft/decompression-optimization.cc b/deps/v8/src/compiler/turboshaft/decompression-optimization.cc index 0dacfb5486d190..b938f8df200485 100644 --- a/deps/v8/src/compiler/turboshaft/decompression-optimization.cc +++ b/deps/v8/src/compiler/turboshaft/decompression-optimization.cc @@ -4,7 +4,6 @@ #include "src/compiler/turboshaft/decompression-optimization.h" -#include "src/base/v8-fallthrough.h" #include "src/codegen/machine-type.h" #include "src/compiler/turboshaft/copying-phase.h" #include "src/compiler/turboshaft/operations.h" diff --git a/deps/v8/src/compiler/turboshaft/define-assembler-macros.inc b/deps/v8/src/compiler/turboshaft/define-assembler-macros.inc index baf129a5aeb6a6..0628282d3d82b8 100644 --- a/deps/v8/src/compiler/turboshaft/define-assembler-macros.inc +++ b/deps/v8/src/compiler/turboshaft/define-assembler-macros.inc @@ -23,11 +23,24 @@ Asm().ControlFlowHelper_Bind(label); \ (::v8::internal::compiler::turboshaft::detail::SuppressUnusedWarning( \ CONCAT(is_bound_, __LINE__))) -#define LOOP(loop_label, ...) \ +#define BIND_LOOP(loop_label, ...) \ for(auto [CONCAT(run_loop_, __LINE__), ##__VA_ARGS__] = \ Asm().ControlFlowHelper_BindLoop(loop_label); CONCAT(run_loop_, __LINE__); \ Asm().ControlFlowHelper_EndLoop(loop_label), \ CONCAT(run_loop_, __LINE__) = false) + +#define WHILE(...) \ + for (auto [CONCAT(run_loop_, __LINE__), loop_header_xx, loop_exit_xx] \ + = Asm().ControlFlowHelper_While([&]() { \ + return __VA_ARGS__; \ + }); \ + CONCAT(run_loop_, __LINE__); \ + Asm().ControlFlowHelper_EndWhileLoop(loop_header_xx, loop_exit_xx), \ + CONCAT(run_loop_, __LINE__) = false) + +#define BREAK Asm().ControlFlowHelper_Goto(loop_exit_xx, {}) +#define CONTINUE Asm().ControlFlowHelper_Goto(loop_header_xx, {}) + #define GOTO(label, ...) \ Asm().ControlFlowHelper_Goto(label, {__VA_ARGS__}) #define GOTO_IF(cond, label, ...) \ @@ -35,20 +48,64 @@ #define GOTO_IF_NOT(cond, label, ...) \ Asm().ControlFlowHelper_GotoIfNot(cond, label, {__VA_ARGS__}) -#define IF(...) \ - for (bool bound = Asm().ControlFlowHelper_If(__VA_ARGS__, false); bound; \ - (bound = false), Asm().ControlFlowHelper_GotoEnd()) -#define IF_NOT(...) \ - for (bool bound = Asm().ControlFlowHelper_If(__VA_ARGS__, true); bound; \ - (bound = false), Asm().ControlFlowHelper_GotoEnd()) -#define ELSE_IF(...) \ - for (bool bound = Asm().ControlFlowHelper_ElseIf( \ - [&]() { return __VA_ARGS__; }); \ - bound; (bound = false), Asm().ControlFlowHelper_GotoEnd()) -#define ELSE \ - for (bool bound = Asm().ControlFlowHelper_Else(); bound; \ - (bound = false), Asm().ControlFlowHelper_GotoEnd()) -#define END_IF Asm().ControlFlowHelper_EndIf(); +// Clang/GCC helpfully warn us about dangling else in nested if statements. This +// dangling is intentional for the way these macros work, so suppress the +// warning with Pragmas. Clang and GCC helpfully disagree on where the warning +// is (on the if or the else), so they need separate macros. +#if defined(__clang__) +#define FORCE_UNROLL_LOOP _Pragma("clang loop unroll(full)") +#define SUPPRESSED_DANGLING_ELSE_WARNING_IF(...) if (__VA_ARGS__) +#define SUPPRESSED_DANGLING_ELSE_WARNING_ELSE \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdangling-else\"") else _Pragma( \ + "GCC diagnostic pop") +#elif defined(__GNUC__) +#define FORCE_UNROLL_LOOP +#define SUPPRESSED_DANGLING_ELSE_WARNING_IF(...) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdangling-else\"") if (__VA_ARGS__) \ + _Pragma("GCC diagnostic pop") +#define SUPPRESSED_DANGLING_ELSE_WARNING_ELSE else +#else +#define FORCE_UNROLL_LOOP +#define SUPPRESSED_DANGLING_ELSE_WARNING_IF(...) if (__VA_ARGS__) +#define SUPPRESSED_DANGLING_ELSE_WARNING_ELSE else +#endif + +// IF/ELSE macros. These expand to a real C++ if-else, so that we can get +// similar block syntax behaviour (with an optional `ELSE`). Since C++ will only +// evaluate one side of the if-else, wrap it in a for loop that executes the +// if-else twice, once for each side of the branch. Each iteration also emits a +// goto-end if the corresponding branch target was bound. An if around the for +// loop encapsulates the state -- this is outside the for loop to make it easier +// for the compiler to unroll the two loop iterations. +#define IF(...) \ + SUPPRESSED_DANGLING_ELSE_WARNING_IF( \ + typename std::decay_t::ControlFlowHelper_IfState state; \ + true) \ + FORCE_UNROLL_LOOP \ + for (int iteration = 0, bound = false; \ + iteration < 2 || (Asm().ControlFlowHelper_EndIf(&state), false); \ + (bound ? Asm().ControlFlowHelper_FinishIfBlock(&state) : (void)0), \ + bound = false, iteration++) \ + if (iteration == 0 && \ + (bound = Asm().ControlFlowHelper_BindIf(__VA_ARGS__, &state))) + +#define IF_NOT(...) \ + SUPPRESSED_DANGLING_ELSE_WARNING_IF( \ + typename std::decay_t::ControlFlowHelper_IfState state; \ + true) \ + FORCE_UNROLL_LOOP \ + for (int iteration = 0, bound = false; \ + iteration < 2 || (Asm().ControlFlowHelper_EndIf(&state), false); \ + (bound ? Asm().ControlFlowHelper_FinishIfBlock(&state) : (void)0), \ + bound = false, iteration++) \ + if (iteration == 0 && \ + (bound = Asm().ControlFlowHelper_BindIfNot(__VA_ARGS__, &state))) + +#define ELSE \ + SUPPRESSED_DANGLING_ELSE_WARNING_ELSE if ( \ + iteration == 1 && (bound = Asm().ControlFlowHelper_BindElse(&state))) #define Assert(condition) AssertImpl(condition, #condition, __FILE__, __LINE__) diff --git a/deps/v8/src/compiler/turboshaft/fast-api-call-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/fast-api-call-lowering-reducer.h index c7121bffb957bd..4b5d9f8df89284 100644 --- a/deps/v8/src/compiler/turboshaft/fast-api-call-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/fast-api-call-lowering-reducer.h @@ -87,7 +87,7 @@ class FastApiCallLoweringReducer : public Next { MemoryRepresentation::Int32(), offsetof(v8::FastApiCallbackOptions, fallback)); // data = data_argument - OpIndex data_argument_to_pass = AdaptLocalArgument(data_argument); + OpIndex data_argument_to_pass = __ AdaptLocalArgument(data_argument); __ StoreOffHeap(stack_slot, data_argument_to_pass, MemoryRepresentation::UintPtr(), offsetof(v8::FastApiCallbackOptions, data)); @@ -129,21 +129,6 @@ class FastApiCallLoweringReducer : public Next { } private: - OpIndex AdaptLocalArgument(OpIndex argument) { -#ifdef V8_ENABLE_DIRECT_LOCAL - // With direct locals, the argument can be passed directly. - return __ BitcastTaggedToWordPtr(argument); -#else - // With indirect locals, the argument has to be stored on the stack and the - // slot address is passed. - OpIndex stack_slot = - __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true); - __ StoreOffHeap(stack_slot, __ BitcastTaggedToWordPtr(argument), - MemoryRepresentation::UintPtr()); - return stack_slot; -#endif - } - std::pair AdaptOverloadedFastCallArgument( OpIndex argument, const FastApiCallFunctionVector& c_functions, const fast_api_call::OverloadsResolutionResult& resolution_result, @@ -169,7 +154,7 @@ class FastApiCallLoweringReducer : public Next { V instance_type = __ LoadInstanceTypeField(map); GOTO_IF_NOT(__ Word32Equal(instance_type, JS_ARRAY_TYPE), next); - OpIndex argument_to_pass = AdaptLocalArgument(argument); + OpIndex argument_to_pass = __ AdaptLocalArgument(argument); OpIndex target_address = __ ExternalConstant( ExternalReference::Create(c_functions[func_index].address, ExternalReference::FAST_C_CALL)); @@ -246,10 +231,10 @@ class FastApiCallLoweringReducer : public Next { } else { switch (arg_type.GetType()) { case CTypeInfo::Type::kV8Value: { - return AdaptLocalArgument(argument); + return __ AdaptLocalArgument(argument); } case CTypeInfo::Type::kFloat32: { - return __ ChangeFloat64ToFloat32(argument); + return __ TruncateFloat64ToFloat32(argument); } case CTypeInfo::Type::kPointer: { // Check that the value is a HeapObject. @@ -325,7 +310,7 @@ class FastApiCallLoweringReducer : public Next { V instance_type = __ LoadInstanceTypeField(map); GOTO_IF_NOT(__ Word32Equal(instance_type, JS_ARRAY_TYPE), handle_error); - return AdaptLocalArgument(argument); + return __ AdaptLocalArgument(argument); } case CTypeInfo::SequenceType::kIsTypedArray: { // Check that the value is a HeapObject. @@ -617,7 +602,6 @@ class FastApiCallLoweringReducer : public Next { // We expect that JS execution is enabled, otherwise assert. __ Unreachable(); } - END_IF } __ StoreOffHeap(js_execution_assert, __ Word32Constant(0), MemoryRepresentation::Int8()); diff --git a/deps/v8/src/compiler/turboshaft/graph-builder.cc b/deps/v8/src/compiler/turboshaft/graph-builder.cc index e98254babf8a10..4703885b5d6649 100644 --- a/deps/v8/src/compiler/turboshaft/graph-builder.cc +++ b/deps/v8/src/compiler/turboshaft/graph-builder.cc @@ -597,7 +597,7 @@ OpIndex GraphBuilder::Process( UNARY_CASE(RoundUint32ToFloat32, ChangeUint32ToFloat32) UNARY_CASE(RoundUint64ToFloat32, ChangeUint64ToFloat32) UNARY_CASE(RoundUint64ToFloat64, ChangeUint64ToFloat64) - UNARY_CASE(TruncateFloat64ToFloat32, ChangeFloat64ToFloat32) + UNARY_CASE(TruncateFloat64ToFloat32, TruncateFloat64ToFloat32) UNARY_CASE(TruncateFloat64ToUint32, TruncateFloat64ToUint32OverflowUndefined) UNARY_CASE(TruncateFloat64ToWord32, JSTruncateFloat64ToWord32) @@ -726,7 +726,7 @@ OpIndex GraphBuilder::Process( #define CHECK_OBJECT_IS_CASE(code, kind, input_assumptions, reason, feedback) \ case IrOpcode::k##code: { \ DCHECK(dominating_frame_state.valid()); \ - V input = Map(node->InputAt(0)); \ + V input = Map(node->InputAt(0)); \ V check = \ __ ObjectIs(input, ObjectIsOp::Kind::k##kind, \ ObjectIsOp::InputAssumptions::k##input_assumptions); \ @@ -745,6 +745,9 @@ OpIndex GraphBuilder::Process( NotAJavaScriptObjectOrNullOrUndefined, {}) CHECK_OBJECT_IS_CASE(CheckString, String, HeapObject, NotAString, CheckParametersOf(op).feedback()) + CHECK_OBJECT_IS_CASE(CheckStringOrStringWrapper, StringOrStringWrapper, + HeapObject, NotAStringOrStringWrapper, + CheckParametersOf(op).feedback()) CHECK_OBJECT_IS_CASE(CheckSymbol, Symbol, HeapObject, NotASymbol, {}) CHECK_OBJECT_IS_CASE(CheckBigInt, BigInt, None, NotABigInt, CheckParametersOf(op).feedback()) @@ -1786,7 +1789,7 @@ OpIndex GraphBuilder::Process( V check = __ UintLessThan(index, limit, rep); if ((params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds) != 0) { IF_NOT(LIKELY(check)) { __ Unreachable(); } - END_IF + } else { DCHECK(dominating_frame_state.valid()); __ DeoptimizeIfNot(check, dominating_frame_state, @@ -1911,10 +1914,9 @@ OpIndex GraphBuilder::Process( V result_state = __ template Projection(fast_call_result, 0); - IF(LIKELY(__ Word32Equal(result_state, FastApiCallOp::kSuccessValue))) { + IF (LIKELY(__ Word32Equal(result_state, FastApiCallOp::kSuccessValue))) { GOTO(done, __ template Projection(fast_call_result, 1)); - } - ELSE { + } ELSE { // We need to generate a fallback (both fast and slow call) in case: // 1) the generated code might fail, in case e.g. a Smi was passed where // a JSObject was expected and an error must be thrown or @@ -1928,7 +1930,6 @@ OpIndex GraphBuilder::Process( CanThrow::kYes, __ graph_zone())); GOTO(done, slow_call_result); } - END_IF BIND(done, result); return result; @@ -2257,8 +2258,9 @@ OpIndex GraphBuilder::Process( case IrOpcode::kJSStackCheck: { DCHECK_EQ(OpParameter(node->op()), StackCheckKind::kJSFunctionEntry); - return __ StackCheck(StackCheckOp::CheckOrigin::kFromJS, - StackCheckOp::CheckKind::kFunctionHeaderCheck); + __ StackCheck(StackCheckOp::CheckOrigin::kFromJS, + StackCheckOp::CheckKind::kFunctionHeaderCheck); + return OpIndex::Invalid(); } default: diff --git a/deps/v8/src/compiler/turboshaft/index.h b/deps/v8/src/compiler/turboshaft/index.h index 2b6102e74d3c0d..86fdab72b769c4 100644 --- a/deps/v8/src/compiler/turboshaft/index.h +++ b/deps/v8/src/compiler/turboshaft/index.h @@ -36,10 +36,19 @@ class ConstOrV; // Compared to `Operation*`, it is more memory efficient (32bit) and stable when // the operations buffer is re-allocated. class OpIndex { - public: + protected: + // We make this constructor protected so that integers are not easily + // convertible to OpIndex. FromOffset should be used instead to create an + // OpIndex from an offset. explicit constexpr OpIndex(uint32_t offset) : offset_(offset) { DCHECK(CheckInvariants()); } + friend class OperationBuffer; + + public: + static constexpr OpIndex FromOffset(uint32_t offset) { + return OpIndex(offset); + } constexpr OpIndex() : offset_(std::numeric_limits::max()) {} template OpIndex(const ConstOrV&) { // NOLINT(runtime/explicit) @@ -192,7 +201,7 @@ struct Any {}; template struct WordWithBits : public Any { static constexpr int bits = Bits; - static_assert(Bits == 32 || Bits == 64 || Bits == 128); + static_assert(Bits == 32 || Bits == 64 || Bits == 128 || Bits == 256); }; using Word32 = WordWithBits<32>; @@ -209,9 +218,7 @@ using Float32 = FloatWithBits<32>; using Float64 = FloatWithBits<64>; using Simd128 = WordWithBits<128>; - -// TODO(nicohartmann@): Replace all uses of `V` by `V`. -using Tagged = Object; +using Simd256 = WordWithBits<256>; struct Compressed : public Any {}; @@ -318,6 +325,21 @@ struct v_traits { : std::bool_constant> {}; }; +template <> +struct v_traits { + static constexpr bool is_abstract_tag = true; + static constexpr RegisterRepresentation rep = + RegisterRepresentation::Simd256(); + using constexpr_type = uint8_t[kSimd256Size]; + static constexpr bool allows_representation(RegisterRepresentation rep) { + return rep == RegisterRepresentation::Simd256(); + } + + template + struct implicitly_convertible_to + : std::bool_constant> {}; +}; + template struct v_traits>> { static constexpr bool is_abstract_tag = false; @@ -408,8 +430,8 @@ class OptionalV : public OptionalOpIndex { // different conversion rules in the corresponding `v_traits` when necessary. template ::template implicitly_convertible_to::value>> - OptionalV(OptionalV index) - : OptionalOpIndex(index) {} // NOLINT(runtime/explicit) + OptionalV(OptionalV index) // NOLINT(runtime/explicit) + : OptionalOpIndex(index) {} template ::template implicitly_convertible_to::value>> OptionalV(V index) : OptionalOpIndex(index) {} // NOLINT(runtime/explicit) @@ -485,6 +507,24 @@ V8_INLINE size_t hash_value(OptionalOpIndex op) { return base::hash_value(op.hash()); } +namespace detail { +template +struct ConstOrVTypeHelper { + static constexpr bool exists = false; + using type = V; +}; +template +struct ConstOrVTypeHelper>> { + static constexpr bool exists = true; + using type = ConstOrV; +}; +} // namespace detail + +template +using maybe_const_or_v_t = typename detail::ConstOrVTypeHelper::type; +template +constexpr bool const_or_v_exists_v = detail::ConstOrVTypeHelper::exists; + // `BlockIndex` is the index of a bound block. // A dominating block always has a smaller index. // It corresponds to the ordering of basic blocks in the operations buffer. diff --git a/deps/v8/src/compiler/turboshaft/instruction-selection-phase.cc b/deps/v8/src/compiler/turboshaft/instruction-selection-phase.cc index abc59812cfd25f..7d74db5de7dabc 100644 --- a/deps/v8/src/compiler/turboshaft/instruction-selection-phase.cc +++ b/deps/v8/src/compiler/turboshaft/instruction-selection-phase.cc @@ -46,310 +46,212 @@ void TraceSequence(OptimizedCompilationInfo* info, } // namespace -// Compute the special reverse-post-order block ordering, which is essentially -// a RPO of the graph where loop bodies are contiguous. Properties: -// 1. If block A is a predecessor of B, then A appears before B in the order, -// unless B is a loop header and A is in the loop headed at B -// (i.e. A -> B is a backedge). -// => If block A dominates block B, then A appears before B in the order. -// => If block A is a loop header, A appears before all blocks in the loop -// headed at A. -// 2. All loops are contiguous in the order (i.e. no intervening blocks that -// do not belong to the loop.) -// Note a simple RPO traversal satisfies (1) but not (2). -// TODO(nicohartmann@): Investigate faster and simpler alternatives. -class TurboshaftSpecialRPONumberer { - public: - // Numbering for BasicBlock::rpo_number for this block traversal: - static const int kBlockOnStack = -2; - static const int kBlockVisited1 = -3; - static const int kBlockVisited2 = -4; - static const int kBlockUnvisited = -1; - - using Backedge = std::pair; - - struct SpecialRPOStackFrame { - const Block* block = nullptr; - size_t index = 0; - base::SmallVector successors; - - SpecialRPOStackFrame(const Block* block, size_t index, - base::SmallVector successors) - : block(block), index(index), successors(std::move(successors)) {} +ZoneVector TurboshaftSpecialRPONumberer::ComputeSpecialRPO() { + ZoneVector stack(zone()); + ZoneVector backedges(zone()); + // Determined empirically on a large Wasm module. Since they are allocated + // only once per function compilation, the memory usage is not critical. + stack.reserve(64); + backedges.reserve(32); + size_t num_loops = 0; + + auto Push = [&](const Block* block) { + auto succs = SuccessorBlocks(*block, *graph_); + stack.emplace_back(block, 0, std::move(succs)); + set_rpo_number(block, kBlockOnStack); }; - struct LoopInfo { - const Block* header; - base::SmallVector outgoing; - BitVector* members; - LoopInfo* prev; - const Block* end; - const Block* start; + const Block* entry = &graph_->StartBlock(); - void AddOutgoing(Zone* zone, const Block* block) { - outgoing.push_back(block); - } - }; - - struct BlockData { - static constexpr size_t kNoLoopNumber = std::numeric_limits::max(); - int32_t rpo_number = kBlockUnvisited; - size_t loop_number = kNoLoopNumber; - const Block* rpo_next = nullptr; - }; - - TurboshaftSpecialRPONumberer(const Graph& graph, Zone* zone) - : graph_(&graph), block_data_(graph.block_count(), zone), loops_(zone) {} + // Find correct insertion point within existing order. + const Block* order = nullptr; - ZoneVector ComputeSpecialRPO() { - ZoneVector stack(zone()); - ZoneVector backedges(zone()); - // Determined empirically on a large Wasm module. Since they are allocated - // only once per function compilation, the memory usage is not critical. - stack.reserve(64); - backedges.reserve(32); - size_t num_loops = 0; + Push(&graph_->StartBlock()); - auto Push = [&](const Block* block) { - auto succs = SuccessorBlocks(*block, *graph_); - stack.emplace_back(block, 0, std::move(succs)); - set_rpo_number(block, kBlockOnStack); - }; + while (!stack.empty()) { + SpecialRPOStackFrame& frame = stack.back(); - const Block* entry = &graph_->StartBlock(); - - // Find correct insertion point within existing order. - const Block* order = nullptr; - - Push(&graph_->StartBlock()); - - while (!stack.empty()) { - SpecialRPOStackFrame& frame = stack.back(); - - if (frame.index < frame.successors.size()) { - // Process the next successor. - const Block* succ = frame.successors[frame.index++]; - if (rpo_number(succ) == kBlockVisited1) continue; - if (rpo_number(succ) == kBlockOnStack) { - // The successor is on the stack, so this is a backedge (cycle). - DCHECK_EQ(frame.index - 1, 0); - backedges.emplace_back(frame.block, frame.index - 1); - // Assign a new loop number to the header. - DCHECK(!has_loop_number(succ)); - set_loop_number(succ, num_loops++); - } else { - // Push the successor onto the stack. - DCHECK_EQ(rpo_number(succ), kBlockUnvisited); - Push(succ); - } + if (frame.index < frame.successors.size()) { + // Process the next successor. + const Block* succ = frame.successors[frame.index++]; + if (rpo_number(succ) == kBlockVisited1) continue; + if (rpo_number(succ) == kBlockOnStack) { + // The successor is on the stack, so this is a backedge (cycle). + DCHECK_EQ(frame.index - 1, 0); + backedges.emplace_back(frame.block, frame.index - 1); + // Assign a new loop number to the header. + DCHECK(!has_loop_number(succ)); + set_loop_number(succ, num_loops++); } else { - // Finished with all successors; pop the stack and add the block. - order = PushFront(order, frame.block); - set_rpo_number(frame.block, kBlockVisited1); - stack.pop_back(); + // Push the successor onto the stack. + DCHECK_EQ(rpo_number(succ), kBlockUnvisited); + Push(succ); } + } else { + // Finished with all successors; pop the stack and add the block. + order = PushFront(order, frame.block); + set_rpo_number(frame.block, kBlockVisited1); + stack.pop_back(); } + } - // If no loops were encountered, then the order we computed was correct. - if (num_loops == 0) return ComputeBlockPermutation(entry); - - // Otherwise, compute the loop information from the backedges in order - // to perform a traversal that groups loop bodies together. - ComputeLoopInfo(num_loops, backedges); - - // Initialize the "loop stack". We assume that the entry cannot be a loop - // header. - CHECK(!has_loop_number(entry)); - LoopInfo* loop = nullptr; - order = nullptr; - - // Perform an iterative post-order traversal, visiting loop bodies before - // edges that lead out of loops. Visits each block once, but linking loop - // sections together is linear in the loop size, so overall is - // O(|B| + max(loop_depth) * max(|loop|)) - DCHECK(stack.empty()); - Push(&graph_->StartBlock()); - while (!stack.empty()) { - SpecialRPOStackFrame& frame = stack.back(); - const Block* block = frame.block; - const Block* succ = nullptr; - - if (frame.index < frame.successors.size()) { - // Process the next normal successor. - succ = frame.successors[frame.index++]; - } else if (has_loop_number(block)) { - // Process additional outgoing edges from the loop header. - if (rpo_number(block) == kBlockOnStack) { - // Finish the loop body the first time the header is left on the - // stack. - DCHECK_NOT_NULL(loop); - DCHECK_EQ(loop->header, block); - loop->start = PushFront(order, block); - order = loop->end; - set_rpo_number(block, kBlockVisited2); - // Pop the loop stack and continue visiting outgoing edges within - // the context of the outer loop, if any. - loop = loop->prev; - // We leave the loop header on the stack; the rest of this iteration - // and later iterations will go through its outgoing edges list. - } + // If no loops were encountered, then the order we computed was correct. + if (num_loops == 0) return ComputeBlockPermutation(entry); + + // Otherwise, compute the loop information from the backedges in order + // to perform a traversal that groups loop bodies together. + ComputeLoopInfo(num_loops, backedges); + + // Initialize the "loop stack". We assume that the entry cannot be a loop + // header. + CHECK(!has_loop_number(entry)); + LoopInfo* loop = nullptr; + order = nullptr; + + // Perform an iterative post-order traversal, visiting loop bodies before + // edges that lead out of loops. Visits each block once, but linking loop + // sections together is linear in the loop size, so overall is + // O(|B| + max(loop_depth) * max(|loop|)) + DCHECK(stack.empty()); + Push(&graph_->StartBlock()); + while (!stack.empty()) { + SpecialRPOStackFrame& frame = stack.back(); + const Block* block = frame.block; + const Block* succ = nullptr; + + if (frame.index < frame.successors.size()) { + // Process the next normal successor. + succ = frame.successors[frame.index++]; + } else if (has_loop_number(block)) { + // Process additional outgoing edges from the loop header. + if (rpo_number(block) == kBlockOnStack) { + // Finish the loop body the first time the header is left on the + // stack. + DCHECK_NOT_NULL(loop); + DCHECK_EQ(loop->header, block); + loop->start = PushFront(order, block); + order = loop->end; + set_rpo_number(block, kBlockVisited2); + // Pop the loop stack and continue visiting outgoing edges within + // the context of the outer loop, if any. + loop = loop->prev; + // We leave the loop header on the stack; the rest of this iteration + // and later iterations will go through its outgoing edges list. + } - // Use the next outgoing edge if there are any. - size_t outgoing_index = frame.index - frame.successors.size(); - LoopInfo* info = &loops_[loop_number(block)]; - DCHECK_NE(loop, info); - if (block != entry && outgoing_index < info->outgoing.size()) { - succ = info->outgoing[outgoing_index]; - ++frame.index; - } + // Use the next outgoing edge if there are any. + size_t outgoing_index = frame.index - frame.successors.size(); + LoopInfo* info = &loops_[loop_number(block)]; + DCHECK_NE(loop, info); + if (block != entry && outgoing_index < info->outgoing.size()) { + succ = info->outgoing[outgoing_index]; + ++frame.index; } + } - if (succ != nullptr) { - // Process the next successor. - if (rpo_number(succ) == kBlockOnStack) continue; - if (rpo_number(succ) == kBlockVisited2) continue; - DCHECK_EQ(kBlockVisited1, rpo_number(succ)); - if (loop != nullptr && !loop->members->Contains(succ->index().id())) { - // The successor is not in the current loop or any nested loop. - // Add it to the outgoing edges of this loop and visit it later. - loop->AddOutgoing(zone(), succ); - } else { - // Push the successor onto the stack. - Push(succ); - if (has_loop_number(succ)) { - // Push the inner loop onto the loop stack. - DCHECK_LT(loop_number(succ), num_loops); - LoopInfo* next = &loops_[loop_number(succ)]; - next->end = order; - next->prev = loop; - loop = next; - } - } + if (succ != nullptr) { + // Process the next successor. + if (rpo_number(succ) == kBlockOnStack) continue; + if (rpo_number(succ) == kBlockVisited2) continue; + DCHECK_EQ(kBlockVisited1, rpo_number(succ)); + if (loop != nullptr && !loop->members->Contains(succ->index().id())) { + // The successor is not in the current loop or any nested loop. + // Add it to the outgoing edges of this loop and visit it later. + loop->AddOutgoing(zone(), succ); } else { - // Finish with all successors of the current block. - if (has_loop_number(block)) { - // If we are going to pop a loop header, then add its entire body. - LoopInfo* info = &loops_[loop_number(block)]; - for (const Block* b = info->start; true; - b = block_data_[b->index()].rpo_next) { - if (block_data_[b->index()].rpo_next == info->end) { - PushFront(order, b); - info->end = order; - break; - } + // Push the successor onto the stack. + Push(succ); + if (has_loop_number(succ)) { + // Push the inner loop onto the loop stack. + DCHECK_LT(loop_number(succ), num_loops); + LoopInfo* next = &loops_[loop_number(succ)]; + next->end = order; + next->prev = loop; + loop = next; + } + } + } else { + // Finish with all successors of the current block. + if (has_loop_number(block)) { + // If we are going to pop a loop header, then add its entire body. + LoopInfo* info = &loops_[loop_number(block)]; + for (const Block* b = info->start; true; + b = block_data_[b->index()].rpo_next) { + if (block_data_[b->index()].rpo_next == info->end) { + PushFront(order, b); + info->end = order; + break; } - order = info->start; - } else { - // Pop a single node off the stack and add it to the order. - order = PushFront(order, block); - set_rpo_number(block, kBlockVisited2); } - stack.pop_back(); + order = info->start; + } else { + // Pop a single node off the stack and add it to the order. + order = PushFront(order, block); + set_rpo_number(block, kBlockVisited2); } + stack.pop_back(); } - - return ComputeBlockPermutation(entry); } - private: - // Computes loop membership from the backedges of the control flow graph. - void ComputeLoopInfo(size_t num_loops, ZoneVector& backedges) { - ZoneVector stack(zone()); - - // Extend loop information vector. - loops_.resize(num_loops, LoopInfo{}); - - // Compute loop membership starting from backedges. - // O(max(loop_depth) * |loop|) - for (auto [backedge, header_index] : backedges) { - const Block* header = SuccessorBlocks(*backedge, *graph_)[header_index]; - DCHECK(header->IsLoop()); - size_t loop_num = loop_number(header); - DCHECK_NULL(loops_[loop_num].header); - loops_[loop_num].header = header; - loops_[loop_num].members = - zone()->New(graph_->block_count(), zone()); - - if (backedge != header) { - // As long as the header doesn't have a backedge to itself, - // Push the member onto the queue and process its predecessors. - DCHECK(!loops_[loop_num].members->Contains(backedge->index().id())); - loops_[loop_num].members->Add(backedge->index().id()); - stack.push_back(backedge); - } + return ComputeBlockPermutation(entry); +} - // Propagate loop membership backwards. All predecessors of M up to the - // loop header H are members of the loop too. O(|blocks between M and H|). - while (!stack.empty()) { - const Block* block = stack.back(); - stack.pop_back(); - for (const Block* pred : block->PredecessorsIterable()) { - if (pred != header) { - if (!loops_[loop_num].members->Contains(pred->index().id())) { - loops_[loop_num].members->Add(pred->index().id()); - stack.push_back(pred); - } +// Computes loop membership from the backedges of the control flow graph. +void TurboshaftSpecialRPONumberer::ComputeLoopInfo( + size_t num_loops, ZoneVector& backedges) { + ZoneVector stack(zone()); + + // Extend loop information vector. + loops_.resize(num_loops, LoopInfo{}); + + // Compute loop membership starting from backedges. + // O(max(loop_depth) * |loop|) + for (auto [backedge, header_index] : backedges) { + const Block* header = SuccessorBlocks(*backedge, *graph_)[header_index]; + DCHECK(header->IsLoop()); + size_t loop_num = loop_number(header); + DCHECK_NULL(loops_[loop_num].header); + loops_[loop_num].header = header; + loops_[loop_num].members = + zone()->New(graph_->block_count(), zone()); + + if (backedge != header) { + // As long as the header doesn't have a backedge to itself, + // Push the member onto the queue and process its predecessors. + DCHECK(!loops_[loop_num].members->Contains(backedge->index().id())); + loops_[loop_num].members->Add(backedge->index().id()); + stack.push_back(backedge); + } + + // Propagate loop membership backwards. All predecessors of M up to the + // loop header H are members of the loop too. O(|blocks between M and H|). + while (!stack.empty()) { + const Block* block = stack.back(); + stack.pop_back(); + for (const Block* pred : block->PredecessorsIterable()) { + if (pred != header) { + if (!loops_[loop_num].members->Contains(pred->index().id())) { + loops_[loop_num].members->Add(pred->index().id()); + stack.push_back(pred); } } } } } +} - ZoneVector ComputeBlockPermutation(const Block* entry) { - ZoneVector result(graph_->block_count(), zone()); - size_t i = 0; - for (const Block* b = entry; b; b = block_data_[b->index()].rpo_next) { - result[i++] = b->index().id(); - } - DCHECK_EQ(i, graph_->block_count()); - return result; - } - - int32_t rpo_number(const Block* block) const { - return block_data_[block->index()].rpo_number; - } - - void set_rpo_number(const Block* block, int32_t rpo_number) { - block_data_[block->index()].rpo_number = rpo_number; - } - - bool has_loop_number(const Block* block) const { - return block_data_[block->index()].loop_number != BlockData::kNoLoopNumber; - } - - size_t loop_number(const Block* block) const { - DCHECK(has_loop_number(block)); - return block_data_[block->index()].loop_number; - } - - void set_loop_number(const Block* block, size_t loop_number) { - block_data_[block->index()].loop_number = loop_number; - } - - const Block* PushFront(const Block* head, const Block* block) { - block_data_[block->index()].rpo_next = head; - return block; +ZoneVector TurboshaftSpecialRPONumberer::ComputeBlockPermutation( + const Block* entry) { + ZoneVector result(graph_->block_count(), zone()); + size_t i = 0; + for (const Block* b = entry; b; b = block_data_[b->index()].rpo_next) { + result[i++] = b->index().id(); } + DCHECK_EQ(i, graph_->block_count()); + return result; +} - Zone* zone() const { return loops_.zone(); } - - const Graph* graph_; - FixedBlockSidetable block_data_; - ZoneVector loops_; -}; - -base::Optional InstructionSelectionPhase::Run( - Zone* temp_zone, const CallDescriptor* call_descriptor, Linkage* linkage, - CodeTracer* code_tracer) { - PipelineData* data = &PipelineData::Get(); - Graph& graph = PipelineData::Get().graph(); - - // Compute special RPO order.... - TurboshaftSpecialRPONumberer numberer(graph, temp_zone); - auto schedule = numberer.ComputeSpecialRPO(); - graph.ReorderBlocks(base::VectorOf(schedule)); - - // Determine deferred blocks. +void PropagateDeferred(Graph& graph) { graph.StartBlock().set_custom_data( 0, Block::CustomDataKind::kDeferredInSchedule); for (Block& block : graph.blocks()) { @@ -388,6 +290,21 @@ base::Optional InstructionSelectionPhase::Run( } } } +} + +base::Optional InstructionSelectionPhase::Run( + Zone* temp_zone, const CallDescriptor* call_descriptor, Linkage* linkage, + CodeTracer* code_tracer) { + PipelineData* data = &PipelineData::Get(); + Graph& graph = PipelineData::Get().graph(); + + // Compute special RPO order.... + TurboshaftSpecialRPONumberer numberer(graph, temp_zone); + auto schedule = numberer.ComputeSpecialRPO(); + graph.ReorderBlocks(base::VectorOf(schedule)); + + // Determine deferred blocks. + PropagateDeferred(graph); // Print graph once before instruction selection. turboshaft::PrintTurboshaftGraph(temp_zone, code_tracer, diff --git a/deps/v8/src/compiler/turboshaft/instruction-selection-phase.h b/deps/v8/src/compiler/turboshaft/instruction-selection-phase.h index a01957a5a5daa6..7189bd8e27baf8 100644 --- a/deps/v8/src/compiler/turboshaft/instruction-selection-phase.h +++ b/deps/v8/src/compiler/turboshaft/instruction-selection-phase.h @@ -9,6 +9,102 @@ namespace v8::internal::compiler::turboshaft { +// Compute the special reverse-post-order block ordering, which is essentially +// a RPO of the graph where loop bodies are contiguous. Properties: +// 1. If block A is a predecessor of B, then A appears before B in the order, +// unless B is a loop header and A is in the loop headed at B +// (i.e. A -> B is a backedge). +// => If block A dominates block B, then A appears before B in the order. +// => If block A is a loop header, A appears before all blocks in the loop +// headed at A. +// 2. All loops are contiguous in the order (i.e. no intervening blocks that +// do not belong to the loop.) +// Note a simple RPO traversal satisfies (1) but not (2). +// TODO(nicohartmann@): Investigate faster and simpler alternatives. +class V8_EXPORT_PRIVATE TurboshaftSpecialRPONumberer { + public: + // Numbering for BasicBlock::rpo_number for this block traversal: + static const int kBlockOnStack = -2; + static const int kBlockVisited1 = -3; + static const int kBlockVisited2 = -4; + static const int kBlockUnvisited = -1; + + using Backedge = std::pair; + + struct SpecialRPOStackFrame { + const Block* block = nullptr; + size_t index = 0; + base::SmallVector successors; + + SpecialRPOStackFrame(const Block* block, size_t index, + base::SmallVector successors) + : block(block), index(index), successors(std::move(successors)) {} + }; + + struct LoopInfo { + const Block* header; + base::SmallVector outgoing; + BitVector* members; + LoopInfo* prev; + const Block* end; + const Block* start; + + void AddOutgoing(Zone* zone, const Block* block) { + outgoing.push_back(block); + } + }; + + struct BlockData { + static constexpr size_t kNoLoopNumber = std::numeric_limits::max(); + int32_t rpo_number = kBlockUnvisited; + size_t loop_number = kNoLoopNumber; + const Block* rpo_next = nullptr; + }; + + TurboshaftSpecialRPONumberer(const Graph& graph, Zone* zone) + : graph_(&graph), block_data_(graph.block_count(), zone), loops_(zone) {} + + ZoneVector ComputeSpecialRPO(); + + private: + void ComputeLoopInfo(size_t num_loops, ZoneVector& backedges); + ZoneVector ComputeBlockPermutation(const Block* entry); + + int32_t rpo_number(const Block* block) const { + return block_data_[block->index()].rpo_number; + } + + void set_rpo_number(const Block* block, int32_t rpo_number) { + block_data_[block->index()].rpo_number = rpo_number; + } + + bool has_loop_number(const Block* block) const { + return block_data_[block->index()].loop_number != BlockData::kNoLoopNumber; + } + + size_t loop_number(const Block* block) const { + DCHECK(has_loop_number(block)); + return block_data_[block->index()].loop_number; + } + + void set_loop_number(const Block* block, size_t loop_number) { + block_data_[block->index()].loop_number = loop_number; + } + + const Block* PushFront(const Block* head, const Block* block) { + block_data_[block->index()].rpo_next = head; + return block; + } + + Zone* zone() const { return loops_.zone(); } + + const Graph* graph_; + FixedBlockSidetable block_data_; + ZoneVector loops_; +}; + +V8_EXPORT_PRIVATE void PropagateDeferred(Graph& graph); + struct InstructionSelectionPhase { DECL_TURBOSHAFT_PHASE_CONSTANTS(InstructionSelection) diff --git a/deps/v8/src/compiler/turboshaft/int64-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/int64-lowering-reducer.h index 7128da58e61ca7..a236c7b8906ebf 100644 --- a/deps/v8/src/compiler/turboshaft/int64-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/int64-lowering-reducer.h @@ -500,29 +500,27 @@ class Int64LoweringReducer : public Next { OpIndex LowerClz(V input) { auto [low, high] = Unpack(input); - ScopedVar result(Asm()); + ScopedVar result(this); IF (__ Word32Equal(high, 0)) { result = __ Word32Add(32, __ Word32CountLeadingZeros(low)); - } - ELSE { + } ELSE { result = __ Word32CountLeadingZeros(high); } - END_IF - return __ Tuple(*result, __ Word32Constant(0)); + + return __ Tuple(result, __ Word32Constant(0)); } OpIndex LowerCtz(V input) { DCHECK(SupportedOperations::word32_ctz()); auto [low, high] = Unpack(input); - ScopedVar result(Asm()); + ScopedVar result(this); IF (__ Word32Equal(low, 0)) { result = __ Word32Add(32, __ Word32CountTrailingZeros(high)); - } - ELSE { + } ELSE { result = __ Word32CountTrailingZeros(low); } - END_IF - return __ Tuple(*result, __ Word32Constant(0)); + + return __ Tuple(result, __ Word32Constant(0)); } OpIndex LowerPopCount(V input) { @@ -625,16 +623,15 @@ class Int64LoweringReducer : public Next { // The low word and the high word can be swapped either at the input or // at the output. We swap the inputs so that shift does not have to be // kept for so long in a register. - ScopedVar var_low(Asm(), left_high); - ScopedVar var_high(Asm(), left_low); + ScopedVar var_low(this, left_high); + ScopedVar var_high(this, left_low); IF (less_than_32) { var_low = left_low; var_high = left_high; } - END_IF - V rotate_low = __ Word32RotateRight(*var_low, safe_shift); - V rotate_high = __ Word32RotateRight(*var_high, safe_shift); + V rotate_low = __ Word32RotateRight(var_low, safe_shift); + V rotate_high = __ Word32RotateRight(var_high, safe_shift); V low_node = __ Word32BitwiseOr(__ Word32BitwiseAnd(rotate_low, bit_mask), diff --git a/deps/v8/src/compiler/turboshaft/js-generic-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/js-generic-lowering-reducer.h new file mode 100644 index 00000000000000..8a0bddb4d0e755 --- /dev/null +++ b/deps/v8/src/compiler/turboshaft/js-generic-lowering-reducer.h @@ -0,0 +1,88 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_TURBOSHAFT_JS_GENERIC_LOWERING_REDUCER_H_ +#define V8_COMPILER_TURBOSHAFT_JS_GENERIC_LOWERING_REDUCER_H_ + +#include "src/compiler/turboshaft/assembler.h" +#include "src/compiler/turboshaft/index.h" +#include "src/compiler/turboshaft/operations.h" + +namespace v8::internal::compiler::turboshaft { + +#include "src/compiler/turboshaft/define-assembler-macros.inc" + +// JSGenericLowering lowers JS operators to generic builtin calls (possibly with +// some small inlined fast paths). +// +// It should run after SimplifiedLowering, which should have already replaced +// most of the JS operations with lower levels (Simplified or Machine) more +// specialized operations. However, SimplifiedLowering won't be able to remove +// all JS operators; the remaining JS operations will thus be replaced by +// builtin calls here in JSGenericLowering. + +template +class JSGenericLoweringReducer : public Next { + public: + TURBOSHAFT_REDUCER_BOILERPLATE(JSGenericLowering) + + OpIndex REDUCE(GenericBinop)(V left, V right, + OpIndex frame_state, V context, + GenericBinopOp::Kind kind) { + // Note that we're **not** calling the __WithFeedback variants of the + // generic builtins, on purpose. There have been several experiments with + // this in the past, and we always concluded that it wasn't worth it. The + // latest experiment was ended with this commit: + // https://crrev.com/c/4110858. + switch (kind) { +#define CASE(Name) \ + case GenericBinopOp::Kind::k##Name: \ + return __ CallBuiltin_##Name(isolate_, frame_state, context, left, right); + GENERIC_BINOP_LIST(CASE) +#undef CASE + } + } + + OpIndex REDUCE(GenericUnop)(V input, OpIndex frame_state, + V context, GenericUnopOp::Kind kind) { + switch (kind) { +#define CASE(Name) \ + case GenericUnopOp::Kind::k##Name: \ + return __ CallBuiltin_##Name(isolate_, frame_state, context, input); + GENERIC_UNOP_LIST(CASE) +#undef CASE + } + } + + OpIndex REDUCE(ToNumberOrNumeric)(V input, OpIndex frame_state, + V context, + Object::Conversion kind) { + Label done(this); + // Avoid builtin call for Smis and HeapNumbers. + GOTO_IF(__ ObjectIs(input, ObjectIsOp::Kind::kNumber, + ObjectIsOp::InputAssumptions::kNone), + done, input); + switch (kind) { + case Object::Conversion::kToNumber: + GOTO(done, + __ CallBuiltin_ToNumber(isolate_, frame_state, context, input)); + break; + case Object::Conversion::kToNumeric: + GOTO(done, + __ CallBuiltin_ToNumeric(isolate_, frame_state, context, input)); + break; + } + BIND(done, result); + return result; + } + + private: + Isolate* isolate_ = PipelineData::Get().isolate(); +}; + +#include "src/compiler/turboshaft/undef-assembler-macros.inc" + +} // namespace v8::internal::compiler::turboshaft + +#endif // V8_COMPILER_TURBOSHAFT_JS_GENERIC_LOWERING_REDUCER_H_ diff --git a/deps/v8/src/compiler/turboshaft/load-store-simplification-reducer.h b/deps/v8/src/compiler/turboshaft/load-store-simplification-reducer.h index 2268f9cc40d261..ead732702ecd4c 100644 --- a/deps/v8/src/compiler/turboshaft/load-store-simplification-reducer.h +++ b/deps/v8/src/compiler/turboshaft/load-store-simplification-reducer.h @@ -30,6 +30,13 @@ struct LoadStoreSimplificationConfiguration { // *(base + index * element_size_log2 + displacement), but architectures // typically support only a limited `element_size_log2`. static constexpr int kMaxElementSizeLog2 = 0; +#elif V8_TARGET_ARCH_S390X + static constexpr bool kNeedsUntaggedBase = false; + // s390x supports *(base + index + displacement), element_size isn't + // supported. + static constexpr int32_t kMinOffset = std::numeric_limits::min() + 1; + static constexpr int32_t kMaxOffset = std::numeric_limits::max(); + static constexpr int kMaxElementSizeLog2 = 0; #else static constexpr bool kNeedsUntaggedBase = false; // We don't want to encode INT32_MIN in the offset becauce instruction diff --git a/deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc b/deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc index 049b521b20a52d..777c0da838a6f0 100644 --- a/deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc +++ b/deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc @@ -7,6 +7,7 @@ #include "src/compiler/turboshaft/copying-phase.h" #include "src/compiler/turboshaft/dataview-lowering-reducer.h" #include "src/compiler/turboshaft/fast-api-call-lowering-reducer.h" +#include "src/compiler/turboshaft/js-generic-lowering-reducer.h" #include "src/compiler/turboshaft/machine-lowering-reducer-inl.h" #include "src/compiler/turboshaft/machine-optimization-reducer.h" #include "src/compiler/turboshaft/required-optimization-reducer.h" @@ -16,8 +17,14 @@ namespace v8::internal::compiler::turboshaft { void MachineLoweringPhase::Run(Zone* temp_zone) { - CopyingPhase::Run(temp_zone); } diff --git a/deps/v8/src/compiler/turboshaft/machine-lowering-reducer-inl.h b/deps/v8/src/compiler/turboshaft/machine-lowering-reducer-inl.h index e9d74121972524..8f37ef00f7edc1 100644 --- a/deps/v8/src/compiler/turboshaft/machine-lowering-reducer-inl.h +++ b/deps/v8/src/compiler/turboshaft/machine-lowering-reducer-inl.h @@ -7,7 +7,6 @@ #include "src/base/logging.h" #include "src/base/optional.h" -#include "src/base/v8-fallthrough.h" #include "src/codegen/external-reference.h" #include "src/codegen/machine-type.h" #include "src/common/globals.h" @@ -97,7 +96,7 @@ class MachineLoweringReducer : public Next { if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) { // Check if {value} is -0. - IF(UNLIKELY(__ Word32Equal(i32, 0))) { + IF (UNLIKELY(__ Word32Equal(i32, 0))) { // In case of 0, we need to check the high bits for the IEEE -0 // pattern. V check_negative = @@ -105,7 +104,6 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIf(check_negative, frame_state, DeoptimizeReason::kMinusZero, feedback); } - END_IF } return i32; @@ -118,7 +116,7 @@ class MachineLoweringReducer : public Next { if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) { // Check if {value} is -0. - IF(UNLIKELY(__ Word64Equal(i64, 0))) { + IF (UNLIKELY(__ Word64Equal(i64, 0))) { // In case of 0, we need to check the high bits for the IEEE -0 // pattern. V check_negative = @@ -126,7 +124,6 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIf(check_negative, frame_state, DeoptimizeReason::kMinusZero, feedback); } - END_IF } return i64; @@ -140,7 +137,7 @@ class MachineLoweringReducer : public Next { kHoleNanUpper32), frame_state, DeoptimizeReason::kHole, feedback); } - END_IF + return input; } } @@ -165,19 +162,18 @@ class MachineLoweringReducer : public Next { IF_NOT (LIKELY(condition)) { __ Deoptimize(frame_state, parameters); } - END_IF + } else { IF (UNLIKELY(condition)) { __ Deoptimize(frame_state, parameters); } - END_IF } return OpIndex::Invalid(); } goto no_change; } - V REDUCE(ObjectIs)(V input, ObjectIsOp::Kind kind, + V REDUCE(ObjectIs)(V input, ObjectIsOp::Kind kind, ObjectIsOp::InputAssumptions input_assumptions) { switch (kind) { case ObjectIsOp::Kind::kBigInt: @@ -247,7 +243,7 @@ class MachineLoweringReducer : public Next { __ TaggedEqual(input, __ HeapConstant(factory_->null_value())); return __ Word32BitwiseOr(is_undefined, is_null); } - V8_FALLTHROUGH; + [[fallthrough]]; case ObjectIsOp::Kind::kCallable: case ObjectIsOp::Kind::kConstructor: case ObjectIsOp::Kind::kDetectableCallable: @@ -293,7 +289,7 @@ class MachineLoweringReducer : public Next { Map::Bits1::IsCallableBit::kMask)); GOTO_IF_NOT(check, done, 0); // Fallthrough into receiver check. - V8_FALLTHROUGH; + [[fallthrough]]; case ObjectIsOp::Kind::kReceiver: check = JSAnyIsNotPrimitiveHeapObject(input, map); break; @@ -445,12 +441,48 @@ class MachineLoweringReducer : public Next { (kIsNotStringMask | kIsNotInternalizedMask)), kInternalizedTag); } - } + case ObjectIsOp::Kind::kStringOrStringWrapper: { + Label done(this); + + // Check for Smi if necessary. + if (NeedsHeapObjectCheck(input_assumptions)) { + GOTO_IF(__ IsSmi(input), done, 0); + } + + // Load instance type from map. + V map = __ LoadMapField(input); + V instance_type = __ LoadInstanceTypeField(map); + + GOTO_IF(__ Uint32LessThan(instance_type, FIRST_NONSTRING_TYPE), done, + 1); + GOTO_IF_NOT(__ Word32Equal(instance_type, JS_PRIMITIVE_WRAPPER_TYPE), + done, 0); + V bitfield2 = __ template LoadField( + map, AccessBuilder::ForMapBitField2()); + + V elements_kind = + __ Word32BitwiseAnd(bitfield2, Map::Bits2::ElementsKindBits::kMask); + + GOTO_IF(__ Word32Equal(FAST_STRING_WRAPPER_ELEMENTS + << Map::Bits2::ElementsKindBits::kShift, + elements_kind), + done, 1); + + V check = + __ Word32Equal(SLOW_STRING_WRAPPER_ELEMENTS + << Map::Bits2::ElementsKindBits::kShift, + elements_kind); + GOTO(done, check); + + BIND(done, result); + return result; + } + } UNREACHABLE(); } - V REDUCE(FloatIs)(OpIndex value, NumericKind kind, + V REDUCE(FloatIs)(V value, NumericKind kind, FloatRepresentation input_rep) { DCHECK_EQ(input_rep, FloatRepresentation::Float64()); switch (kind) { @@ -486,6 +518,29 @@ class MachineLoweringReducer : public Next { BIND(done, result); return result; } + case NumericKind::kSmi: { + Label done(this); + V v32 = __ TruncateFloat64ToInt32OverflowUndefined(value); + GOTO_IF_NOT(__ Float64Equal(value, __ ChangeInt32ToFloat64(v32)), done, + 0); + IF (__ Word32Equal(v32, 0)) { + // Checking -0. + GOTO_IF(__ Int32LessThan(__ Float64ExtractHighWord32(value), 0), done, + 0); + } + + if constexpr (SmiValuesAre32Bits()) { + GOTO(done, 1); + } else { + OpIndex add = __ Int32AddCheckOverflow(v32, v32); + V overflow = __ template Projection(add, 1); + GOTO_IF(overflow, done, 0); + GOTO(done, 1); + } + + BIND(done, result); + return result; + } case NumericKind::kMinusZero: { if (Is64()) { V value64 = __ BitcastFloat64ToWord64(value); @@ -519,6 +574,7 @@ class MachineLoweringReducer : public Next { case NumericKind::kFinite: case NumericKind::kInteger: case NumericKind::kSafeInteger: + case NumericKind::kSmi: GOTO_IF(__ IsSmi(input), done, 1); break; case NumericKind::kMinusZero: @@ -590,7 +646,7 @@ class MachineLoweringReducer : public Next { case ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind::kBigInt: { DCHECK(Is64()); DCHECK_EQ(input_rep, RegisterRepresentation::Word64()); - Label done(this); + Label done(this); // BigInts with value 0 must be of size 0 (canonical form). GOTO_IF(__ Word64Equal(input, int64_t{0}), done, @@ -634,7 +690,7 @@ class MachineLoweringReducer : public Next { } DCHECK(SmiValuesAre31Bits()); - Label done(this); + Label done(this); Label<> overflow(this); TagSmiOrOverflow(input, &overflow, &done); @@ -649,7 +705,7 @@ class MachineLoweringReducer : public Next { } case ConvertUntaggedToJSPrimitiveOp::InputInterpretation:: kUnsigned: { - Label done(this); + Label done(this); GOTO_IF(__ Uint32LessThanOrEqual(input, Smi::kMaxValue), done, __ TagSmi(input)); @@ -667,7 +723,7 @@ class MachineLoweringReducer : public Next { } else if (input_rep == RegisterRepresentation::Word64()) { switch (input_interpretation) { case ConvertUntaggedToJSPrimitiveOp::InputInterpretation::kSigned: { - Label done(this); + Label done(this); Label<> outside_smi_range(this); V v32 = __ TruncateWord64ToWord32(input); @@ -690,7 +746,7 @@ class MachineLoweringReducer : public Next { } case ConvertUntaggedToJSPrimitiveOp::InputInterpretation:: kUnsigned: { - Label done(this); + Label done(this); GOTO_IF(__ Uint64LessThanOrEqual(input, Smi::kMaxValue), done, __ TagSmi(__ TruncateWord64ToWord32(input))); @@ -707,7 +763,7 @@ class MachineLoweringReducer : public Next { } } else { DCHECK_EQ(input_rep, RegisterRepresentation::Float64()); - Label done(this); + Label done(this); Label<> outside_smi_range(this); V v32 = __ TruncateFloat64ToInt32OverflowUndefined(input); @@ -717,11 +773,10 @@ class MachineLoweringReducer : public Next { if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) { // In case of 0, we need to check the high bits for the IEEE -0 // pattern. - IF(__ Word32Equal(v32, 0)) { + IF (__ Word32Equal(v32, 0)) { GOTO_IF(__ Int32LessThan(__ Float64ExtractHighWord32(input), 0), outside_smi_range); } - END_IF } if constexpr (SmiValuesAre32Bits()) { @@ -751,7 +806,7 @@ class MachineLoweringReducer : public Next { DCHECK_EQ(input_rep, RegisterRepresentation::Float64()); DCHECK_EQ(input_interpretation, ConvertUntaggedToJSPrimitiveOp::InputInterpretation::kSigned); - Label done(this); + Label done(this); Label<> allocate_heap_number(this); // First check whether {input} is a NaN at all... @@ -762,11 +817,9 @@ class MachineLoweringReducer : public Next { kHoleNanUpper32), allocate_heap_number); GOTO(done, __ HeapConstant(factory_->undefined_value())); - } - ELSE { + } ELSE { GOTO(allocate_heap_number); } - END_IF if (BIND(allocate_heap_number)) { GOTO(done, AllocateHeapNumberWithValue(input)); @@ -785,18 +838,20 @@ class MachineLoweringReducer : public Next { DCHECK_EQ(input_rep, RegisterRepresentation::Word32()); DCHECK_EQ(input_interpretation, ConvertUntaggedToJSPrimitiveOp::InputInterpretation::kSigned); - Label done(this); + Label done(this); - IF(input) { GOTO(done, __ HeapConstant(factory_->true_value())); } - ELSE { GOTO(done, __ HeapConstant(factory_->false_value())); } - END_IF + IF (input) { + GOTO(done, __ HeapConstant(factory_->true_value())); + } ELSE { + GOTO(done, __ HeapConstant(factory_->false_value())); + } BIND(done, result); return result; } case ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind::kString: { Label single_code(this); - Label done(this); + Label done(this); if (input_interpretation == ConvertUntaggedToJSPrimitiveOp::InputInterpretation::kCharCode) { @@ -859,10 +914,10 @@ class MachineLoweringReducer : public Next { if (BIND(single_code, code)) { // Check if the {code} is a one byte character. - IF(LIKELY( - __ Uint32LessThanOrEqual(code, String::kMaxOneByteCharCode))) { + IF (LIKELY(__ Uint32LessThanOrEqual(code, + String::kMaxOneByteCharCode))) { // Load the isolate wide single character string table. - V table = + V table = __ HeapConstant(factory_->single_character_string_table()); // Compute the {table} index for {code}. @@ -875,8 +930,7 @@ class MachineLoweringReducer : public Next { // Use the {entry} from the {table}. GOTO(done, entry); - } - ELSE { + } ELSE { // Allocate a new SeqTwoBytesString for {code}. auto string = __ template Allocate( __ IntPtrConstant(SeqTwoByteString::SizeFor(1)), @@ -899,7 +953,6 @@ class MachineLoweringReducer : public Next { __ IntPtrConstant(0), code); GOTO(done, __ FinishInitialization(std::move(string))); } - END_IF } BIND(done, result); @@ -983,13 +1036,11 @@ class MachineLoweringReducer : public Next { IF (LIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ UntagSmi(V::Cast(object))); - } - ELSE { + } ELSE { V value = __ template LoadField( object, AccessBuilder::ForHeapNumberOrOddballOrHoleValue()); GOTO(done, __ ReversibleFloat64ToInt32(value)); } - END_IF BIND(done, result); return result; @@ -1021,13 +1072,11 @@ class MachineLoweringReducer : public Next { IF (LIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ ChangeInt32ToInt64(__ UntagSmi(V::Cast(object)))); - } - ELSE { + } ELSE { V value = __ template LoadField( object, AccessBuilder::ForHeapNumberOrOddballOrHoleValue()); GOTO(done, __ ReversibleFloat64ToInt64(value)); } - END_IF BIND(done, result); return result; @@ -1041,13 +1090,11 @@ class MachineLoweringReducer : public Next { IF (LIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ UntagSmi(V::Cast(object))); - } - ELSE { + } ELSE { V value = __ template LoadField( object, AccessBuilder::ForHeapNumberOrOddballOrHoleValue()); GOTO(done, __ ReversibleFloat64ToUint32(value)); } - END_IF BIND(done, result); return result; @@ -1064,13 +1111,11 @@ class MachineLoweringReducer : public Next { IF (LIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ ChangeInt32ToFloat64(__ UntagSmi(V::Cast(object)))); - } - ELSE { + } ELSE { V value = __ template LoadField( object, AccessBuilder::ForHeapNumberOrOddballOrHoleValue()); GOTO(done, value); } - END_IF BIND(done, result); return result; @@ -1096,7 +1141,7 @@ class MachineLoweringReducer : public Next { } OpIndex REDUCE(ConvertJSPrimitiveToUntaggedOrDeopt)( - V object, OpIndex frame_state, + V object, OpIndex frame_state, ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind from_kind, ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind to_kind, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource& feedback) { @@ -1113,10 +1158,9 @@ class MachineLoweringReducer : public Next { ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::kNumber); Label done(this); - IF(LIKELY(__ ObjectIsSmi(object))) { + IF (LIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ UntagSmi(V::Cast(object))); - } - ELSE { + } ELSE { V map = __ LoadMapField(object); __ DeoptimizeIfNot( __ TaggedEqual(map, @@ -1129,7 +1173,6 @@ class MachineLoweringReducer : public Next { __ ChangeFloat64ToInt32OrDeopt(heap_number_value, frame_state, minus_zero_mode, feedback)); } - END_IF BIND(done, result); return result; @@ -1141,10 +1184,9 @@ class MachineLoweringReducer : public Next { ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::kNumber); Label done(this); - IF(LIKELY(__ ObjectIsSmi(object))) { + IF (LIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ ChangeInt32ToInt64(__ UntagSmi(V::Cast(object)))); - } - ELSE { + } ELSE { V map = __ LoadMapField(object); __ DeoptimizeIfNot( __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())), @@ -1155,7 +1197,6 @@ class MachineLoweringReducer : public Next { __ ChangeFloat64ToInt64OrDeopt(heap_number_value, frame_state, minus_zero_mode, feedback)); } - END_IF BIND(done, result); return result; @@ -1165,15 +1206,13 @@ class MachineLoweringReducer : public Next { // In the Smi case, just convert to int32 and then float64. // Otherwise, check heap numberness and load the number. - IF(__ ObjectIsSmi(object)) { + IF (__ ObjectIsSmi(object)) { GOTO(done, __ ChangeInt32ToFloat64(__ UntagSmi(V::Cast(object)))); - } - ELSE { + } ELSE { GOTO(done, ConvertHeapObjectToFloat64OrDeopt(object, frame_state, from_kind, feedback)); } - END_IF BIND(done, result); return result; @@ -1183,14 +1222,13 @@ class MachineLoweringReducer : public Next { JSPrimitiveKind::kNumberOrString); Label done(this); - IF(LIKELY(__ ObjectIsSmi(object))) { + IF (LIKELY(__ ObjectIsSmi(object))) { // In the Smi case, just convert to intptr_t. GOTO(done, __ ChangeInt32ToIntPtr(__ UntagSmi(V::Cast(object)))); - } - ELSE { + } ELSE { V map = __ LoadMapField(object); - IF(LIKELY(__ TaggedEqual( - map, __ HeapConstant(factory_->heap_number_map())))) { + IF (LIKELY(__ TaggedEqual( + map, __ HeapConstant(factory_->heap_number_map())))) { V heap_number_value = __ template LoadField( object, AccessBuilder::ForHeapNumberValue()); // Perform Turbofan's "CheckedFloat64ToIndex" @@ -1226,8 +1264,7 @@ class MachineLoweringReducer : public Next { GOTO(done, i32); } } - } - ELSE { + } ELSE { #if V8_STATIC_ROOTS_BOOL V is_string_map = __ Uint32LessThanOrEqual( __ TruncateWordPtrToWord32(__ BitcastHeapObjectToWordPtr(map)), @@ -1258,9 +1295,7 @@ class MachineLoweringReducer : public Next { DeoptimizeReason::kNotAnArrayIndex, feedback); GOTO(done, index); } - END_IF } - END_IF BIND(done, result); return result; @@ -1280,13 +1315,11 @@ class MachineLoweringReducer : public Next { IF (LIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ UntagSmi(V::Cast(object))); - } - ELSE { + } ELSE { V number_value = __ template LoadField( object, AccessBuilder::ForHeapNumberOrOddballOrHoleValue()); GOTO(done, __ JSTruncateFloat64ToWord32(number_value)); } - END_IF BIND(done, result); return result; @@ -1299,17 +1332,19 @@ class MachineLoweringReducer : public Next { V bitfield = __ template LoadField( object, AccessBuilder::ForBigIntBitfield()); - IF(__ Word32Equal(bitfield, 0)) { GOTO(done, 0); } - ELSE { + IF (__ Word32Equal(bitfield, 0)) { + GOTO(done, 0); + } ELSE { V lsd = __ template LoadField( object, AccessBuilder::ForBigIntLeastSignificantDigit64()); V sign = __ Word32BitwiseAnd(bitfield, BigInt::SignBits::kMask); - IF(__ Word32Equal(sign, 1)) { GOTO(done, __ Word64Sub(0, lsd)); } - END_IF + IF (__ Word32Equal(sign, 1)) { + GOTO(done, __ Word64Sub(0, lsd)); + } + GOTO(done, lsd); } - END_IF BIND(done, result); return result; @@ -1323,7 +1358,7 @@ class MachineLoweringReducer : public Next { IF (UNLIKELY(__ ObjectIsSmi(object))) { GOTO(done, __ Word32Equal(__ TaggedEqual(object, __ TagSmi(0)), 0)); } - END_IF + // Otherwise fall through into HeapObject case. } else { DCHECK_EQ( @@ -1394,25 +1429,23 @@ class MachineLoweringReducer : public Next { } // Check if {object} is a HeapNumber. - IF(UNLIKELY(__ TaggedEqual( - map, __ HeapConstant(factory_->heap_number_map())))) { + IF (UNLIKELY(__ TaggedEqual( + map, __ HeapConstant(factory_->heap_number_map())))) { // For HeapNumber {object}, just check that its value is not 0.0, -0.0 // or NaN. V number_value = __ template LoadField( object, AccessBuilder::ForHeapNumberValue()); GOTO(done, __ Float64LessThan(0.0, __ Float64Abs(number_value))); } - END_IF // Check if {object} is a BigInt. - IF(UNLIKELY( - __ TaggedEqual(map, __ HeapConstant(factory_->bigint_map())))) { + IF (UNLIKELY( + __ TaggedEqual(map, __ HeapConstant(factory_->bigint_map())))) { V bitfield = __ template LoadField( object, AccessBuilder::ForBigIntBitfield()); GOTO(done, IsNonZero(__ Word32BitwiseAnd(bitfield, BigInt::LengthBits::kMask))); } - END_IF // All other values that reach here are true. GOTO(done, 1); @@ -1470,11 +1503,12 @@ class MachineLoweringReducer : public Next { OpIndex REDUCE(ConvertJSPrimitiveToObject)(V value, V native_context, - V global_proxy, + OptionalV global_proxy, ConvertReceiverMode mode) { switch (mode) { case ConvertReceiverMode::kNullOrUndefined: - return global_proxy; + DCHECK(global_proxy.valid()); + return global_proxy.value(); case ConvertReceiverMode::kNotNullOrUndefined: case ConvertReceiverMode::kAny: { Label done(this); @@ -1489,13 +1523,14 @@ class MachineLoweringReducer : public Next { // Wrap the primitive {value} into a JSPrimitiveWrapper. if (BIND(convert_to_object)) { if (mode != ConvertReceiverMode::kNotNullOrUndefined) { + DCHECK(global_proxy.valid()); // Replace the {value} with the {global_proxy}. GOTO_IF(UNLIKELY(__ TaggedEqual( value, __ HeapConstant(factory_->undefined_value()))), - done, global_proxy); + done, global_proxy.value()); GOTO_IF(UNLIKELY(__ TaggedEqual( value, __ HeapConstant(factory_->null_value()))), - done, global_proxy); + done, global_proxy.value()); } GOTO(done, __ CallBuiltin_ToObject( isolate_, V::Cast(native_context), value)); @@ -1515,7 +1550,7 @@ class MachineLoweringReducer : public Next { V second_map = __ LoadMapField(second); V second_type = __ LoadInstanceTypeField(second_map); - Label allocate_string(this); + Label allocate_string(this); // Determine the proper map for the resulting ConsString. // If both {first} and {second} are one-byte strings, we // create a new ConsOneByteString, otherwise we create a @@ -1525,15 +1560,13 @@ class MachineLoweringReducer : public Next { V instance_type = __ Word32BitwiseAnd(first_type, second_type); V encoding = __ Word32BitwiseAnd(instance_type, kStringEncodingMask); - IF(__ Word32Equal(encoding, kTwoByteStringTag)) { + IF (__ Word32Equal(encoding, kTwoByteStringTag)) { GOTO(allocate_string, __ HeapConstant(factory_->cons_two_byte_string_map())); - } - ELSE { + } ELSE { GOTO(allocate_string, __ HeapConstant(factory_->cons_one_byte_string_map())); } - END_IF // Allocate the resulting ConsString. BIND(allocate_string, map); @@ -1550,7 +1583,7 @@ class MachineLoweringReducer : public Next { OpIndex REDUCE(NewArray)(V length, NewArrayOp::Kind kind, AllocationType allocation_type) { - Label done(this); + Label done(this); GOTO_IF(__ WordPtrEqual(length, 0), done, __ HeapConstant(factory_->empty_fixed_array())); @@ -1598,62 +1631,51 @@ class MachineLoweringReducer : public Next { // have been initialized. auto array = __ FinishInitialization(std::move(uninitialized_array)); - // Initialize the backing store with holes. - LoopLabel loop(this); - GOTO(loop, intptr_t{0}); - - LOOP(loop, index) { - GOTO_IF_NOT(LIKELY(__ UintPtrLessThan(index, length)), done, array); + ScopedVar index(this, 0); + WHILE(__ UintPtrLessThan(index, length)) { __ StoreNonArrayBufferElement(array, access, index, the_hole_value); - // Advance the {index}. - GOTO(loop, __ WordPtrAdd(index, 1)); + index = __ WordPtrAdd(index, 1); } + GOTO(done, array); + BIND(done, result); return result; } - OpIndex REDUCE(DoubleArrayMinMax)(V array, + OpIndex REDUCE(DoubleArrayMinMax)(V array, DoubleArrayMinMaxOp::Kind kind) { DCHECK(kind == DoubleArrayMinMaxOp::Kind::kMin || kind == DoubleArrayMinMaxOp::Kind::kMax); const bool is_max = kind == DoubleArrayMinMaxOp::Kind::kMax; // Iterate the elements and find the result. - V empty_value = - __ Float64Constant(is_max ? -V8_INFINITY : V8_INFINITY); V array_length = __ ChangeInt32ToIntPtr(__ UntagSmi(__ template LoadField( array, AccessBuilder::ForJSArrayLength( ElementsKind::PACKED_DOUBLE_ELEMENTS)))); - V elements = __ template LoadField( + V elements = __ template LoadField( array, AccessBuilder::ForJSObjectElements()); - Label<> done(this); - LoopLabel loop(this); - ScopedVar result(Asm(), empty_value); - - GOTO(loop, intptr_t{0}); - - LOOP(loop, index) { - GOTO_IF_NOT(LIKELY(__ UintPtrLessThan(index, array_length)), done); + ScopedVar result(this, is_max ? -V8_INFINITY : V8_INFINITY); + ScopedVar index(this, 0); + WHILE(__ UintPtrLessThan(index, array_length)) { V element = __ template LoadNonArrayBufferElement( elements, AccessBuilder::ForFixedDoubleArrayElement(), index); - result = is_max ? __ Float64Max(*result, element) - : __ Float64Min(*result, element); - GOTO(loop, __ WordPtrAdd(index, 1)); + result = is_max ? __ Float64Max(result, element) + : __ Float64Min(result, element); + index = __ WordPtrAdd(index, 1); } - BIND(done); - return __ ConvertFloat64ToNumber(*result, + return __ ConvertFloat64ToNumber(result, CheckForMinusZeroMode::kCheckForMinusZero); } - OpIndex REDUCE(LoadFieldByIndex)(V object, V field_index) { + OpIndex REDUCE(LoadFieldByIndex)(V object, V field_index) { // Index encoding (see `src/objects/field-index-inl.h`): // For efficiency, the LoadByFieldIndex instruction takes an index that is // optimized for quick access. If the property is inline, the index is @@ -1664,7 +1686,7 @@ class MachineLoweringReducer : public Next { V index = __ ChangeInt32ToIntPtr(field_index); Label<> double_field(this); - Label done(this); + Label done(this); // Check if field is a mutable double field. GOTO_IF( @@ -1677,60 +1699,56 @@ class MachineLoweringReducer : public Next { // shifted to the left by one in the code below. // Check if field is in-object or out-of-object. - IF(__ IntPtrLessThan(index, 0)) { + IF (__ IntPtrLessThan(index, 0)) { // The field is located in the properties backing store of {object}. // The {index} is equal to the negated out of property index plus 1. - V properties = __ template LoadField( + V properties = __ template LoadField( object, AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()); V out_of_object_index = __ WordPtrSub(0, index); - V result = + V result = __ Load(properties, out_of_object_index, LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase), MemoryRepresentation::AnyTagged(), FixedArray::kHeaderSize - kTaggedSize, kTaggedSizeLog2 - 1); GOTO(done, result); - } - ELSE { + } ELSE { // This field is located in the {object} itself. - V result = __ Load( + V result = __ Load( object, index, LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase), MemoryRepresentation::AnyTagged(), JSObject::kHeaderSize, kTaggedSizeLog2 - 1); GOTO(done, result); } - END_IF } if (BIND(double_field)) { // If field is a Double field, either unboxed in the object on 64 bit // architectures, or a mutable HeapNumber. V double_index = __ WordPtrShiftRightArithmetic(index, 1); - Label loaded_field(this); + Label loaded_field(this); // Check if field is in-object or out-of-object. - IF(__ IntPtrLessThan(double_index, 0)) { - V properties = __ template LoadField( + IF (__ IntPtrLessThan(double_index, 0)) { + V properties = __ template LoadField( object, AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()); V out_of_object_index = __ WordPtrSub(0, double_index); - V result = + V result = __ Load(properties, out_of_object_index, LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase), MemoryRepresentation::AnyTagged(), FixedArray::kHeaderSize - kTaggedSize, kTaggedSizeLog2); GOTO(loaded_field, result); - } - ELSE { + } ELSE { // The field is located in the {object} itself. - V result = + V result = __ Load(object, double_index, LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase), MemoryRepresentation::AnyTagged(), JSObject::kHeaderSize, kTaggedSizeLog2); GOTO(loaded_field, result); } - END_IF if (BIND(loaded_field, field)) { // We may have transitioned in-place away from double, so check that @@ -1793,7 +1811,6 @@ class MachineLoweringReducer : public Next { __ Int32LessThan(__ Word32BitwiseOr(left, right), 0), frame_state, DeoptimizeReason::kMinusZero, feedback); } - END_IF } return value; @@ -1830,8 +1847,7 @@ class MachineLoweringReducer : public Next { // Check if {rhs} is positive (and not zero). IF (__ Int32LessThan(0, right)) { GOTO(done, __ Int32Div(left, right)); - } - ELSE { + } ELSE { // Check if {rhs} is zero. __ DeoptimizeIf(__ Word32Equal(right, 0), frame_state, DeoptimizeReason::kDivisionByZero, feedback); @@ -1846,11 +1862,9 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIf(__ Word32Equal(right, -1), frame_state, DeoptimizeReason::kOverflow, feedback); } - END_IF GOTO(done, __ Int32Div(left, right)); } - END_IF BIND(done, value); V lossless = @@ -1872,7 +1886,6 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIf(__ Word64Equal(right, int64_t{-1}), frame_state, DeoptimizeReason::kOverflow, feedback); } - END_IF return __ Int64Div(left, right); } @@ -1909,11 +1922,9 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIfNot(temp, frame_state, DeoptimizeReason::kDivisionByZero, feedback); GOTO(rhs_checked, temp); - } - ELSE { + } ELSE { GOTO(rhs_checked, right); } - END_IF BIND(rhs_checked, rhs_value); @@ -1928,12 +1939,10 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIf(__ Word32Equal(temp, 0), frame_state, DeoptimizeReason::kMinusZero, feedback); GOTO(done, __ Word32Sub(0, temp)); - } - ELSE { + } ELSE { // The {lhs} is a non-negative integer. GOTO(done, BuildUint32Mod(left, rhs_value)); } - END_IF BIND(done, result); return result; @@ -1951,7 +1960,6 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIf(__ Word64Equal(right, int64_t{-1}), frame_state, DeoptimizeReason::kOverflow, feedback); } - END_IF return __ Int64Mod(left, right); } @@ -1998,7 +2006,7 @@ class MachineLoweringReducer : public Next { } } - OpIndex REDUCE(BigIntBinop)(V left, V right, + OpIndex REDUCE(BigIntBinop)(V left, V right, OpIndex frame_state, BigIntBinopOp::Kind kind) { const Builtin builtin = GetBuiltinForBigIntBinop(kind); switch (kind) { @@ -2008,7 +2016,7 @@ class MachineLoweringReducer : public Next { case BigIntBinopOp::Kind::kBitwiseXor: case BigIntBinopOp::Kind::kShiftLeft: case BigIntBinopOp::Kind::kShiftRightArithmetic: { - V result = CallBuiltinForBigIntOp(builtin, {left, right}); + V result = CallBuiltinForBigIntOp(builtin, {left, right}); // Check for exception sentinel: Smi 0 is returned to signal // BigIntTooBig. @@ -2019,7 +2027,7 @@ class MachineLoweringReducer : public Next { case BigIntBinopOp::Kind::kMul: case BigIntBinopOp::Kind::kDiv: case BigIntBinopOp::Kind::kMod: { - V result = CallBuiltinForBigIntOp(builtin, {left, right}); + V result = CallBuiltinForBigIntOp(builtin, {left, right}); // Check for exception sentinel: Smi 1 is returned to signal // TerminationRequested. @@ -2027,7 +2035,6 @@ class MachineLoweringReducer : public Next { __ CallRuntime_TerminateExecution(isolate_, frame_state, __ NoContextConstant()); } - END_IF // Check for exception sentinel: Smi 0 is returned to signal // BigIntTooBig or DivisionByZero. @@ -2047,7 +2054,7 @@ class MachineLoweringReducer : public Next { UNREACHABLE(); } - V REDUCE(BigIntComparison)(V left, V right, + V REDUCE(BigIntComparison)(V left, V right, BigIntComparisonOp::Kind kind) { switch (kind) { case BigIntComparisonOp::Kind::kEqual: @@ -2060,7 +2067,7 @@ class MachineLoweringReducer : public Next { } } - V REDUCE(BigIntUnary)(V input, BigIntUnaryOp::Kind kind) { + V REDUCE(BigIntUnary)(V input, BigIntUnaryOp::Kind kind) { DCHECK_EQ(kind, BigIntUnaryOp::Kind::kNegate); return CallBuiltinForBigIntOp(Builtin::kBigIntUnaryMinus, {input}); } @@ -2090,7 +2097,6 @@ class MachineLoweringReducer : public Next { GOTO(done, LoadFromSeqString(string, pos, __ Word32Constant(one_byte))); } - END_IF } } } @@ -2105,12 +2111,12 @@ class MachineLoweringReducer : public Next { // We need a loop here to properly deal with indirect strings // (SlicedString, ConsString and ThinString). LoopLabel<> loop(this); - ScopedVar receiver(Asm(), string); - ScopedVar position(Asm(), pos); + ScopedVar receiver(this, string); + ScopedVar position(this, pos); GOTO(loop); - LOOP(loop) { - V map = __ LoadMapField(*receiver); + BIND_LOOP(loop) { + V map = __ LoadMapField(receiver); V instance_type = __ LoadInstanceTypeField(map); V representation = __ Word32BitwiseAnd(instance_type, kStringRepresentationMask); @@ -2121,35 +2127,31 @@ class MachineLoweringReducer : public Next { IF (__ Word32Equal(representation, kConsStringTag)) { // if_consstring V second = __ template LoadField( - *receiver, AccessBuilder::ForConsStringSecond()); + receiver, AccessBuilder::ForConsStringSecond()); GOTO_IF_NOT( LIKELY(__ TaggedEqual( second, __ HeapConstant(factory_->empty_string()))), runtime); receiver = __ template LoadField( - *receiver, AccessBuilder::ForConsStringFirst()); + receiver, AccessBuilder::ForConsStringFirst()); GOTO(loop); - } - ELSE { + } ELSE { // if_seqstring V onebyte = __ Word32Equal( __ Word32BitwiseAnd(instance_type, kStringEncodingMask), kOneByteStringTag); - GOTO(done, LoadFromSeqString(*receiver, *position, onebyte)); + GOTO(done, LoadFromSeqString(receiver, position, onebyte)); } - END_IF } - } - ELSE { + } ELSE { // if_greaterthan_cons { IF (__ Word32Equal(representation, kThinStringTag)) { // if_thinstring receiver = __ template LoadField( - *receiver, AccessBuilder::ForThinStringActual()); + receiver, AccessBuilder::ForThinStringActual()); GOTO(loop); - } - ELSE_IF (__ Word32Equal(representation, kExternalStringTag)) { + } ELSE IF (__ Word32Equal(representation, kExternalStringTag)) { // if_externalstring // We need to bailout to the runtime for uncached external // strings. @@ -2160,52 +2162,47 @@ class MachineLoweringReducer : public Next { runtime); OpIndex data = __ LoadField( - *receiver, AccessBuilder::ForExternalStringResourceData()); + receiver, AccessBuilder::ForExternalStringResourceData()); IF (__ Word32Equal( __ Word32BitwiseAnd(instance_type, kStringEncodingMask), kTwoByteStringTag)) { // if_twobyte constexpr uint8_t twobyte_size_log2 = 1; V value = __ Load( - data, *position, + data, position, LoadOp::Kind::Aligned(BaseTaggedness::kUntaggedBase), MemoryRepresentation::Uint16(), 0, twobyte_size_log2); GOTO(done, value); - } - ELSE { + } ELSE { // if_onebyte constexpr uint8_t onebyte_size_log2 = 0; V value = __ Load( - data, *position, + data, position, LoadOp::Kind::Aligned(BaseTaggedness::kUntaggedBase), MemoryRepresentation::Uint8(), 0, onebyte_size_log2); GOTO(done, value); } - END_IF - } - ELSE_IF (LIKELY(__ Word32Equal(representation, kSlicedStringTag))) { + } ELSE IF (LIKELY( + __ Word32Equal(representation, kSlicedStringTag))) { // if_slicedstring V offset = __ template LoadField( - *receiver, AccessBuilder::ForSlicedStringOffset()); + receiver, AccessBuilder::ForSlicedStringOffset()); receiver = __ template LoadField( - *receiver, AccessBuilder::ForSlicedStringParent()); + receiver, AccessBuilder::ForSlicedStringParent()); position = __ WordPtrAdd( - *position, __ ChangeInt32ToIntPtr(__ UntagSmi(offset))); + position, __ ChangeInt32ToIntPtr(__ UntagSmi(offset))); GOTO(loop); - } - ELSE { + } ELSE { GOTO(runtime); } - END_IF } } - END_IF if (BIND(runtime)) { V value = __ UntagSmi(V::Cast(__ CallRuntime_StringCharCodeAt( - isolate_, __ NoContextConstant(), *receiver, - __ TagSmi(__ TruncateWordPtrToWord32(*position))))); + isolate_, __ NoContextConstant(), receiver, + __ TagSmi(__ TruncateWordPtrToWord32(position))))); GOTO(done, value); } } @@ -2302,11 +2299,9 @@ class MachineLoweringReducer : public Next { GOTO(done, __ CallBuiltin_StringEqual(isolate_, left, right, __ ChangeInt32ToIntPtr(left_length))); - } - ELSE { + } ELSE { GOTO(done, __ HeapConstant(factory_->false_value())); } - END_IF BIND(done, result); return result; @@ -2332,9 +2327,11 @@ class MachineLoweringReducer : public Next { V rest_length = __ WordPtrSub(arguments_length, formal_parameter_count); Label done(this); - IF(__ IntPtrLessThan(rest_length, 0)) { GOTO(done, 0); } - ELSE { GOTO(done, rest_length); } - END_IF + IF (__ IntPtrLessThan(rest_length, 0)) { + GOTO(done, 0); + } ELSE { + GOTO(done, rest_length); + } BIND(done, value); return __ TagSmi(__ TruncateWordPtrToWord32(value)); @@ -2462,15 +2459,12 @@ class MachineLoweringReducer : public Next { HOLEY_DOUBLE_ELEMENTS, double_map.ToHandleChecked()); GOTO(do_store, HOLEY_DOUBLE_ELEMENTS); - } - ELSE { + } ELSE { TransitionElementsTo(array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, fast_map.ToHandleChecked()); GOTO(do_store, HOLEY_ELEMENTS); } - END_IF } - END_IF GOTO_IF_NOT(LIKELY(__ Int32LessThan(HOLEY_ELEMENTS, elements_kind)), do_store, elements_kind); @@ -2484,7 +2478,6 @@ class MachineLoweringReducer : public Next { fast_map.ToHandleChecked()); GOTO(do_store, HOLEY_ELEMENTS); } - END_IF GOTO(do_store, elements_kind); @@ -2499,8 +2492,7 @@ class MachineLoweringReducer : public Next { __ StoreNonArrayBufferElement( elements, AccessBuilder::ForFixedDoubleArrayElement(), index, float_value); - } - ELSE { + } ELSE { V float_value = __ template LoadField( V::Cast(value), AccessBuilder::ForHeapNumberValue()); @@ -2508,15 +2500,13 @@ class MachineLoweringReducer : public Next { elements, AccessBuilder::ForFixedDoubleArrayElement(), index, __ Float64SilenceNaN(float_value)); } - END_IF - } - ELSE { + } ELSE { // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS. __ StoreNonArrayBufferElement( elements, AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS), index, value); } - END_IF + break; } case TransitionAndStoreArrayElementOp::Kind::kNumberElement: { @@ -2544,8 +2534,7 @@ class MachineLoweringReducer : public Next { // HOLEY_DOUBLE_ELEMENTS. TransitionElementsTo(array, HOLEY_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, double_map.ToHandleChecked()); - } - ELSE { + } ELSE { // We expect that our input array started at HOLEY_SMI_ELEMENTS, and // climbs the lattice up to HOLEY_DOUBLE_ELEMENTS. However, loop // peeling can break this assumption, because in the peeled iteration, @@ -2562,12 +2551,10 @@ class MachineLoweringReducer : public Next { index, AllocateHeapNumberWithValue(value)); GOTO(done); } - END_IF + __ Unreachable(); } - END_IF } - END_IF V elements = __ template LoadField( array, AccessBuilder::ForJSObjectElements()); @@ -2599,12 +2586,10 @@ class MachineLoweringReducer : public Next { // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_ELEMENTS. TransitionElementsTo(array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, fast_map.ToHandleChecked()); - } - ELSE_IF (UNLIKELY(__ Int32LessThan(HOLEY_ELEMENTS, elements_kind))) { + } ELSE IF (UNLIKELY(__ Int32LessThan(HOLEY_ELEMENTS, elements_kind))) { TransitionElementsTo(array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS, fast_map.ToHandleChecked()); } - END_IF V elements = __ template LoadField( array, AccessBuilder::ForJSObjectElements()); @@ -2640,8 +2625,7 @@ class MachineLoweringReducer : public Next { __ StoreNonArrayBufferElement( elements, AccessBuilder::ForFixedDoubleArrayElement(), index, f64); - } - ELSE { + } ELSE { // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS. // In this case, we know our value is a signed small, and we can // optimize the ElementAccess information. @@ -2652,7 +2636,7 @@ class MachineLoweringReducer : public Next { __ StoreNonArrayBufferElement(elements, access, index, __ TagSmi(value)); } - END_IF + break; } } @@ -2683,7 +2667,6 @@ class MachineLoweringReducer : public Next { __ DeoptimizeIfNot(__ CompareMaps(heap_object, maps), frame_state, DeoptimizeReason::kWrongMap, feedback); } - END_IF } else { __ DeoptimizeIfNot(__ CompareMaps(heap_object, maps), frame_state, DeoptimizeReason::kWrongMap, feedback); @@ -2740,19 +2723,18 @@ class MachineLoweringReducer : public Next { Label done(this); - IF(LIKELY(__ Float64LessThan(0.0, input))) { + IF (LIKELY(__ Float64LessThan(0.0, input))) { GOTO_IF(UNLIKELY(__ Float64LessThanOrEqual(two_52, input)), done, input); V temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52); GOTO_IF_NOT(__ Float64LessThan(temp1, input), done, temp1); GOTO(done, __ Float64Add(temp1, 1.0)); - } - ELSE_IF(UNLIKELY(__ Float64Equal(input, 0.0))) { GOTO(done, input); } - ELSE_IF(UNLIKELY(__ Float64LessThanOrEqual(input, minus_two_52))) { + } ELSE IF (UNLIKELY(__ Float64Equal(input, 0.0))) { GOTO(done, input); - } - ELSE { + } ELSE IF (UNLIKELY(__ Float64LessThanOrEqual(input, minus_two_52))) { + GOTO(done, input); + } ELSE { V temp1 = __ Float64Sub(-0.0, input); V temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52); @@ -2760,7 +2742,6 @@ class MachineLoweringReducer : public Next { __ Float64Sub(-0.0, temp2)); GOTO(done, __ Float64Sub(-0.0, __ Float64Sub(temp2, 1.0))); } - END_IF BIND(done, result); return result; @@ -2792,19 +2773,18 @@ class MachineLoweringReducer : public Next { Label done(this); - IF(LIKELY(__ Float64LessThan(0.0, input))) { + IF (LIKELY(__ Float64LessThan(0.0, input))) { GOTO_IF(UNLIKELY(__ Float64LessThanOrEqual(two_52, input)), done, input); V temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52); GOTO_IF_NOT(__ Float64LessThan(input, temp1), done, temp1); GOTO(done, __ Float64Sub(temp1, 1.0)); - } - ELSE_IF(UNLIKELY(__ Float64Equal(input, 0.0))) { GOTO(done, input); } - ELSE_IF(UNLIKELY(__ Float64LessThanOrEqual(input, minus_two_52))) { + } ELSE IF (UNLIKELY(__ Float64Equal(input, 0.0))) { GOTO(done, input); - } - ELSE { + } ELSE IF (UNLIKELY(__ Float64LessThanOrEqual(input, minus_two_52))) { + GOTO(done, input); + } ELSE { V temp1 = __ Float64Sub(-0.0, input); V temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52); @@ -2812,7 +2792,6 @@ class MachineLoweringReducer : public Next { __ Float64Sub(-0.0, temp2)); GOTO(done, __ Float64Sub(-1.0, temp2)); } - END_IF BIND(done, result); return result; @@ -2873,7 +2852,7 @@ class MachineLoweringReducer : public Next { Label done(this); - IF(__ Float64LessThan(0.0, input)) { + IF (__ Float64LessThan(0.0, input)) { GOTO_IF(UNLIKELY(__ Float64LessThanOrEqual(two_52, input)), done, input); @@ -2882,8 +2861,7 @@ class MachineLoweringReducer : public Next { GOTO_IF(__ Float64LessThan(input, temp1), done, __ Float64Sub(temp1, 1.0)); GOTO(done, temp1); - } - ELSE { + } ELSE { GOTO_IF(UNLIKELY(__ Float64Equal(input, 0.0)), done, input); GOTO_IF(UNLIKELY(__ Float64LessThanOrEqual(input, minus_two_52)), done, input); @@ -2892,13 +2870,12 @@ class MachineLoweringReducer : public Next { V temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52); - IF(__ Float64LessThan(temp1, temp2)) { + IF (__ Float64LessThan(temp1, temp2)) { GOTO(done, __ Float64Sub(-0.0, __ Float64Sub(temp2, 1.0))); + } ELSE { + GOTO(done, __ Float64Sub(-0.0, temp2)); } - ELSE { GOTO(done, __ Float64Sub(-0.0, temp2)); } - END_IF } - END_IF BIND(done, result); return result; @@ -2955,8 +2932,7 @@ class MachineLoweringReducer : public Next { value, AccessBuilder::ForThinStringActual()); __ DeoptimizeIfNot(__ TaggedEqual(expected, value_actual), frame_state, DeoptimizeReason::kWrongName, FeedbackSource{}); - } - ELSE { + } ELSE { // Check that the {value} is a non-internalized String, if it's anything // else it cannot match the recorded feedback {expected} anyways. __ DeoptimizeIfNot( @@ -2986,7 +2962,7 @@ class MachineLoweringReducer : public Next { frame_state, DeoptimizeReason::kWrongName, FeedbackSource{}); } - END_IF + GOTO(done); BIND(done); @@ -3017,20 +2993,18 @@ class MachineLoweringReducer : public Next { V REDUCE(Float64SameValue)(OpIndex left, OpIndex right) { Label done(this); - IF(__ Float64Equal(left, right)) { + IF (__ Float64Equal(left, right)) { // Even if the values are float64-equal, we still need to distinguish // zero and minus zero. V left_hi = __ Float64ExtractHighWord32(left); V right_hi = __ Float64ExtractHighWord32(right); GOTO(done, __ Word32Equal(left_hi, right_hi)); - } - ELSE { + } ELSE { // Return true iff both {lhs} and {rhs} are NaN. GOTO_IF(__ Float64Equal(left, left), done, 0); GOTO_IF(__ Float64Equal(right, right), done, 0); GOTO(done, 1); } - END_IF BIND(done, result); return result; @@ -3103,7 +3077,7 @@ class MachineLoweringReducer : public Next { V map = __ LoadMapField(object); // Check if {map} is the same as {source_map}. - IF(UNLIKELY(__ TaggedEqual(map, source_map))) { + IF (UNLIKELY(__ TaggedEqual(map, source_map))) { switch (transition.mode()) { case ElementsTransition::kFastTransition: // In-place migration of {object}, just store the {target_map}. @@ -3116,7 +3090,6 @@ class MachineLoweringReducer : public Next { break; } } - END_IF return OpIndex::Invalid(); } @@ -3146,7 +3119,7 @@ class MachineLoweringReducer : public Next { LoopLabel loop(this); GOTO(loop, first_entry); - LOOP(loop, entry) { + BIND_LOOP(loop, entry) { GOTO_IF(__ WordPtrEqual(entry, OrderedHashMap::kNotFound), done, entry); V candidate = @@ -3162,17 +3135,15 @@ class MachineLoweringReducer : public Next { GOTO_IF( __ Word32Equal(__ UntagSmi(V::Cast(candidate_key)), key), done, candidate); - } - ELSE_IF (__ TaggedEqual( - __ LoadMapField(candidate_key), - __ HeapConstant(factory_->heap_number_map()))) { + } ELSE IF (__ TaggedEqual( + __ LoadMapField(candidate_key), + __ HeapConstant(factory_->heap_number_map()))) { GOTO_IF(__ Float64Equal( __ template LoadField( candidate_key, AccessBuilder::ForHeapNumberValue()), __ ChangeInt32ToFloat64(key)), done, candidate); } - END_IF V next_entry = __ ChangeInt32ToIntPtr(__ UntagSmi(__ Load( data_structure, @@ -3204,12 +3175,10 @@ class MachineLoweringReducer : public Next { IF (__ Word32Equal(__ Word32BitwiseAnd(right, msk), 0)) { // The {rhs} is a power of two, just do a fast bit masking. GOTO(done, __ Word32BitwiseAnd(left, msk)); - } - ELSE { + } ELSE { // The {rhs} is not a power of two, do a generic Uint32Mod. GOTO(done, __ Uint32Mod(left, right)); } - END_IF BIND(done, result); return result; @@ -3247,7 +3216,7 @@ class MachineLoweringReducer : public Next { } void TagSmiOrOverflow(V input, Label<>* overflow, - Label* done) { + Label* done) { DCHECK(SmiValuesAre31Bits()); // Check for overflow at the same time that we are smi tagging. @@ -3264,7 +3233,7 @@ class MachineLoweringReducer : public Next { return __ Word32Equal(__ Word32Equal(value, 0), 0); } - V AllocateHeapNumberWithValue(V value) { + V AllocateHeapNumberWithValue(V value) { auto result = __ template Allocate( __ IntPtrConstant(sizeof(HeapNumber)), AllocationType::kYoung); __ InitializeField(result, AccessBuilder::ForMap(), @@ -3274,7 +3243,7 @@ class MachineLoweringReducer : public Next { } V ConvertHeapObjectToFloat64OrDeopt( - V heap_object, OpIndex frame_state, + V heap_object, OpIndex frame_state, ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind input_kind, const FeedbackSource& feedback) { V map = __ LoadMapField(heap_object); @@ -3297,7 +3266,7 @@ class MachineLoweringReducer : public Next { __ TaggedEqual(map, __ HeapConstant(factory_->boolean_map())), frame_state, DeoptimizeReason::kNotANumberOrBoolean, feedback); } - END_IF + break; } case ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind:: @@ -3310,7 +3279,7 @@ class MachineLoweringReducer : public Next { frame_state, DeoptimizeReason::kNotANumberOrOddball, feedback); } - END_IF + break; } } @@ -3318,21 +3287,19 @@ class MachineLoweringReducer : public Next { heap_object, AccessBuilder::ForHeapNumberOrOddballOrHoleValue()); } - OpIndex LoadFromSeqString(V receiver, V position, + OpIndex LoadFromSeqString(V receiver, V position, V onebyte) { Label done(this); - IF(onebyte) { + IF (onebyte) { GOTO(done, __ template LoadNonArrayBufferElement( receiver, AccessBuilder::ForSeqOneByteStringCharacter(), position)); - } - ELSE { + } ELSE { GOTO(done, __ template LoadNonArrayBufferElement( receiver, AccessBuilder::ForSeqTwoByteStringCharacter(), position)); } - END_IF BIND(done, result); return result; diff --git a/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h b/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h index ca51848d06a3b4..5e7d5a35edc737 100644 --- a/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h +++ b/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h @@ -44,6 +44,109 @@ class VariableReducer; template class GraphVisitor; +namespace { + +// Represents an operation of the form `(source & mask) == masked_value`. +// where each bit set in masked_value also has to be set in mask. +struct BitfieldCheck { + OpIndex const source; + uint32_t const mask; + uint32_t const masked_value; + bool const truncate_from_64_bit; + + BitfieldCheck(OpIndex source, uint32_t mask, uint32_t masked_value, + bool truncate_from_64_bit) + : source(source), + mask(mask), + masked_value(masked_value), + truncate_from_64_bit(truncate_from_64_bit) { + CHECK_EQ(masked_value & ~mask, 0); + } + + static base::Optional Detect(const OperationMatcher& matcher, + const Graph& graph, + OpIndex index) { + // There are two patterns to check for here: + // 1. Single-bit checks: `(val >> shift) & 1`, where: + // - the shift may be omitted, and/or + // - the result may be truncated from 64 to 32 + // 2. Equality checks: `(val & mask) == expected`, where: + // - val may be truncated from 64 to 32 before masking (see + // ReduceWordEqualForConstantRhs) + const Operation& op = graph.Get(index); + if (const ComparisonOp* equal = op.TryCast()) { + if (const WordBinopOp* left_and = + graph.Get(equal->left()).TryCast()) { + uint32_t mask; + uint32_t masked_value; + if (matcher.MatchIntegralWord32Constant(left_and->right(), &mask) && + matcher.MatchIntegralWord32Constant(equal->right(), + &masked_value)) { + if ((masked_value & ~mask) != 0) return base::nullopt; + if (const ChangeOp* truncate = + graph.Get(left_and->left()) + .TryCast()) { + return BitfieldCheck{truncate->input(), mask, masked_value, true}; + } else { + return BitfieldCheck{left_and->left(), mask, masked_value, false}; + } + } + } + } else if (const ChangeOp* truncate = + op.TryCast()) { + return TryDetectShiftAndMaskOneBit( + matcher, truncate->input()); + } else { + return TryDetectShiftAndMaskOneBit(matcher, + index); + } + return base::nullopt; + } + + base::Optional TryCombine(const BitfieldCheck& other) { + if (source != other.source || + truncate_from_64_bit != other.truncate_from_64_bit) { + return base::nullopt; + } + uint32_t overlapping_bits = mask & other.mask; + // It would be kind of strange to have any overlapping bits, but they can be + // allowed as long as they don't require opposite values in the same + // positions. + if ((masked_value & overlapping_bits) != + (other.masked_value & overlapping_bits)) { + return base::nullopt; + } + return BitfieldCheck{source, mask | other.mask, + masked_value | other.masked_value, + truncate_from_64_bit}; + } + + private: + template + static base::Optional TryDetectShiftAndMaskOneBit( + const OperationMatcher& matcher, OpIndex index) { + const WordRepresentation Rep(R); + // Look for the pattern `(val >> shift) & 1`. The shift may be omitted. + OpIndex value; + uint64_t constant; + if (matcher.MatchBitwiseAndWithConstant(index, &value, &constant, Rep) && + constant == 1) { + OpIndex input; + if (int shift_amount; + matcher.MatchConstantRightShift(value, &input, Rep, &shift_amount) && + shift_amount >= 0 && shift_amount < 32) { + uint32_t mask = 1 << shift_amount; + return BitfieldCheck{input, mask, mask, + Rep == WordRepresentation::Word64()}; + } + return BitfieldCheck{value, 1, 1, Rep == WordRepresentation::Word64()}; + } + return base::nullopt; + } +}; + +} // namespace + // The MachineOptimizationAssembler performs basic optimizations on low-level // operations that can be performed on-the-fly, without requiring type analysis // or analyzing uses. It largely corresponds to MachineOperatorReducer in @@ -582,11 +685,10 @@ class MachineOptimizationReducer : public Next { IF (UNLIKELY(__ FloatLessThanOrEqual( lhs, __ FloatConstant(-V8_INFINITY, rep), rep))) { __ SetVariable(result, __ FloatConstant(V8_INFINITY, rep)); - } - ELSE { + } ELSE { __ SetVariable(result, __ FloatSqrt(lhs, rep)); } - END_IF + return __ GetVariable(result); } } @@ -687,8 +789,25 @@ class MachineOptimizationReducer : public Next { } } - // TODO(tebbi): Detect and merge multiple bitfield checks for CSA/Torque - // code. + if (kind == WordBinopOp::Kind::kBitwiseAnd && + rep == WordRepresentation::Word32()) { + if (auto right_bitfield = + BitfieldCheck::Detect(matcher, __ output_graph(), right)) { + if (auto left_bitfield = + BitfieldCheck::Detect(matcher, __ output_graph(), left)) { + if (auto combined_bitfield = + left_bitfield->TryCombine(*right_bitfield)) { + OpIndex source = combined_bitfield->source; + if (combined_bitfield->truncate_from_64_bit) { + source = __ TruncateWord64ToWord32(source); + } + return __ Word32Equal( + __ Word32BitwiseAnd(source, combined_bitfield->mask), + combined_bitfield->masked_value); + } + } + } + } if (uint64_t right_value; matcher.MatchIntegralWordConstant(right, rep, &right_value)) { @@ -731,7 +850,7 @@ class MachineOptimizationReducer : public Next { } // left ^ 1 => left == 0 if left is 0 or 1 if (right_value == 1 && IsBit(left)) { - return __ Word32Equal(left, __ Word32Constant(0)); + return __ Word32Equal(left, 0); } // (x ^ -1) ^ -1 => x { @@ -781,9 +900,8 @@ class MachineOptimizationReducer : public Next { } // left * 2^k => left << k if (base::bits::IsPowerOfTwo(right_value)) { - OpIndex shift_amount = - __ Word32Constant(base::bits::WhichPowerOfTwo(right_value)); - return __ ShiftLeft(left, shift_amount, rep); + return __ ShiftLeft(left, base::bits::WhichPowerOfTwo(right_value), + rep); } break; case Kind::kBitwiseAnd: @@ -1349,11 +1467,26 @@ class MachineOptimizationReducer : public Next { if (matcher.MatchConstantShiftRightArithmeticShiftOutZeros( left, &x, rep_w, &k1) && matcher.MatchIntegralWordConstant(right, rep_w, &k2) && - CountLeadingSignBits(k2, rep_w) > k1 && - matcher.Get(left).saturated_use_count.IsZero()) { - return __ Comparison( - x, __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), kind, - rep_w); + CountLeadingSignBits(k2, rep_w) > k1) { + if (matcher.Get(left).saturated_use_count.IsZero()) { + return __ Comparison( + x, __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), kind, + rep_w); + } else if constexpr (reducer_list_contains< + ReducerList, ValueNumberingReducer>::value) { + // If the shift has uses, we only apply the transformation if the + // result would be GVNed away. + OpIndex rhs = + __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w); + static_assert(ComparisonOp::input_count == 2); + static_assert(sizeof(ComparisonOp) == 8); + base::SmallVector storage; + ComparisonOp* cmp = + CreateOperation(storage, x, rhs, kind, rep_w); + if (__ WillGVNOp(*cmp)) { + return __ Comparison(x, rhs, kind, rep_w); + } + } } // k2 > k1) => (k2 << k1) k1 && - matcher.Get(right).saturated_use_count.IsZero()) { - return __ Comparison( - __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), x, kind, - rep_w); + CountLeadingSignBits(k2, rep_w) > k1) { + if (matcher.Get(right).saturated_use_count.IsZero()) { + return __ Comparison( + __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), x, kind, + rep_w); + } else if constexpr (reducer_list_contains< + ReducerList, ValueNumberingReducer>::value) { + // If the shift has uses, we only apply the transformation if the + // result would be GVNed away. + OpIndex lhs = + __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w); + static_assert(ComparisonOp::input_count == 2); + static_assert(sizeof(ComparisonOp) == 8); + base::SmallVector storage; + ComparisonOp* cmp = + CreateOperation(storage, lhs, x, kind, rep_w); + if (__ WillGVNOp(*cmp)) { + return __ Comparison(lhs, x, kind, rep_w); + } + } } } // Map 64bit to 32bit comparisons. diff --git a/deps/v8/src/compiler/turboshaft/maglev-early-lowering-reducer-inl.h b/deps/v8/src/compiler/turboshaft/maglev-early-lowering-reducer-inl.h new file mode 100644 index 00000000000000..d8f4dcbf76db82 --- /dev/null +++ b/deps/v8/src/compiler/turboshaft/maglev-early-lowering-reducer-inl.h @@ -0,0 +1,122 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_TURBOSHAFT_MAGLEV_EARLY_LOWERING_REDUCER_INL_H_ +#define V8_COMPILER_TURBOSHAFT_MAGLEV_EARLY_LOWERING_REDUCER_INL_H_ + +#include "src/compiler/feedback-source.h" +#include "src/compiler/turboshaft/assembler.h" +#include "src/compiler/turboshaft/index.h" +#include "src/compiler/turboshaft/representations.h" +#include "src/deoptimizer/deoptimize-reason.h" +#include "src/objects/instance-type-inl.h" + +namespace v8::internal::compiler::turboshaft { + +#include "src/compiler/turboshaft/define-assembler-macros.inc" + +template +class MaglevEarlyLoweringReducer : public Next { + // This Reducer provides some helpers that are used during + // MaglevGraphBuildingPhase to lower some Maglev operators. Depending on what + // we decide going forward (regarding SimplifiedLowering for instance), we + // could introduce new Simplified or JS operations instead of using these + // helpers to lower, and turn the helpers into regular REDUCE methods in the + // new simplified lowering or in MachineLoweringReducer. + + public: + TURBOSHAFT_REDUCER_BOILERPLATE(MaglevEarlyLowering) + + void CheckInstanceType(V input, OpIndex frame_state, + const FeedbackSource& feedback, + InstanceType first_instance_type, + InstanceType last_instance_type, bool check_smi) { + if (check_smi) { + __ DeoptimizeIf(__ IsSmi(input), frame_state, + DeoptimizeReason::kWrongInstanceType, feedback); + } + + V map = __ LoadMapField(input); + + if (first_instance_type == last_instance_type) { +#if V8_STATIC_ROOTS_BOOL + if (InstanceTypeChecker::UniqueMapOfInstanceType(first_instance_type)) { + base::Optional expected_index = + InstanceTypeChecker::UniqueMapOfInstanceType(first_instance_type); + CHECK(expected_index.has_value()); + Handle expected_map = Handle::cast( + isolate_->root_handle(expected_index.value())); + __ DeoptimizeIfNot(__ TaggedEqual(map, __ HeapConstant(expected_map)), + frame_state, DeoptimizeReason::kWrongInstanceType, + feedback); + return; + } +#endif // V8_STATIC_ROOTS_BOOL + V instance_type = __ LoadInstanceTypeField(map); + __ DeoptimizeIfNot(__ Word32Equal(instance_type, first_instance_type), + frame_state, DeoptimizeReason::kWrongInstanceType, + feedback); + } else { + V instance_type = __ LoadInstanceTypeField(map); + + V cond; + if (first_instance_type == 0) { + cond = __ Uint32LessThanOrEqual(instance_type, last_instance_type); + } else { + cond = __ Uint32LessThanOrEqual( + __ Word32Sub(instance_type, first_instance_type), + last_instance_type - first_instance_type); + } + __ DeoptimizeIfNot(cond, frame_state, + DeoptimizeReason::kWrongInstanceType, feedback); + } + } + + V CheckedInternalizedString( + V object, OpIndex frame_state, bool check_smi, + const FeedbackSource& feedback) { + if (check_smi) { + __ DeoptimizeIf(__ IsSmi(object), frame_state, DeoptimizeReason::kSmi, + feedback); + } + + Label done(this); + V map = __ LoadMapField(object); + V instance_type = __ LoadInstanceTypeField(map); + + // Go to the slow path if this is a non-string, or a non-internalised + // string. + static_assert((kStringTag | kInternalizedTag) == 0); + IF (UNLIKELY(__ Word32BitwiseAnd( + instance_type, kIsNotStringMask | kIsNotInternalizedMask))) { + // Deopt if this isn't a string. + __ DeoptimizeIf(__ Word32BitwiseAnd(instance_type, kIsNotStringMask), + frame_state, DeoptimizeReason::kWrongMap, feedback); + // Deopt if this isn't a thin string. + static_assert(base::bits::CountPopulation(kThinStringTagBit) == 1); + __ DeoptimizeIfNot(__ Word32BitwiseAnd(instance_type, kThinStringTagBit), + frame_state, DeoptimizeReason::kWrongMap, feedback); + // Load internalized string from thin string. + V intern_string = + __ template LoadField( + object, AccessBuilder::ForThinStringActual()); + GOTO(done, intern_string); + } ELSE { + GOTO(done, V::Cast(object)); + } + + BIND(done, result); + return result; + } + + LocalIsolate* isolate_ = PipelineData::Get().isolate()->AsLocalIsolate(); + JSHeapBroker* broker_ = PipelineData::Get().broker(); + LocalFactory* factory_ = isolate_->factory(); +}; + +#include "src/compiler/turboshaft/undef-assembler-macros.inc" + +} // namespace v8::internal::compiler::turboshaft + +#endif // V8_COMPILER_TURBOSHAFT_MAGLEV_EARLY_LOWERING_REDUCER_INL_H_ diff --git a/deps/v8/src/compiler/turboshaft/maglev-graph-building-phase.cc b/deps/v8/src/compiler/turboshaft/maglev-graph-building-phase.cc index 91faf8a5683668..e30cb4c32ed489 100644 --- a/deps/v8/src/compiler/turboshaft/maglev-graph-building-phase.cc +++ b/deps/v8/src/compiler/turboshaft/maglev-graph-building-phase.cc @@ -4,22 +4,30 @@ #include "src/compiler/turboshaft/maglev-graph-building-phase.h" +#include "src/codegen/optimized-compilation-info.h" +#include "src/compiler/access-builder.h" #include "src/compiler/globals.h" #include "src/compiler/js-heap-broker.h" #include "src/compiler/turboshaft/assembler.h" #include "src/compiler/turboshaft/machine-optimization-reducer.h" +#include "src/compiler/turboshaft/maglev-early-lowering-reducer-inl.h" #include "src/compiler/turboshaft/operations.h" +#include "src/compiler/turboshaft/phase.h" #include "src/compiler/turboshaft/representations.h" #include "src/compiler/turboshaft/required-optimization-reducer.h" #include "src/compiler/turboshaft/value-numbering-reducer.h" #include "src/compiler/turboshaft/variable-reducer.h" +#include "src/compiler/write-barrier-kind.h" #include "src/deoptimizer/deoptimize-reason.h" #include "src/handles/global-handles-inl.h" #include "src/handles/handles.h" +#include "src/interpreter/bytecode-register.h" #include "src/maglev/maglev-compilation-info.h" #include "src/maglev/maglev-graph-builder.h" +#include "src/maglev/maglev-graph-labeller.h" #include "src/maglev/maglev-graph-processor.h" #include "src/maglev/maglev-ir.h" +#include "src/objects/heap-object.h" namespace v8::internal::compiler::turboshaft { @@ -50,8 +58,9 @@ MachineType MachineTypeFor(maglev::ValueRepresentation repr) { class GraphBuilder { public: using AssemblerT = - TSAssembler; + TSAssembler; GraphBuilder(Graph& graph, Zone* temp_zone, maglev::MaglevCompilationUnit* maglev_compilation_unit) @@ -59,7 +68,8 @@ class GraphBuilder { assembler_(graph, graph, temp_zone), maglev_compilation_unit_(maglev_compilation_unit), node_mapping_(temp_zone), - block_mapping_(temp_zone) {} + block_mapping_(temp_zone), + regs_to_vars_(temp_zone) {} void PreProcessGraph(maglev::Graph* graph) { for (maglev::BasicBlock* block : *graph) { @@ -80,6 +90,23 @@ class GraphBuilder { __ Goto(Map(block)); } __ Bind(Map(block)); + + // Because of edge splitting in Maglev, the order of predecessors in the + // Turboshaft graph is not always the same as in the Maglev graph, which + // means that Phi inputs will have to be reordered. We thus compute in + // {predecessor_permutation_} the Turboshaft predecessors position of each + // Maglev predecessor, and we'll use this later when emitting Phis to + // reorder their inputs. + predecessor_permutation_.clear(); + if (block->has_phi()) { + for (int i = 0; i < block->predecessor_count(); ++i) { + Block* pred = Map(block->predecessor_at(i)); + int pred_index = __ current_block() -> GetPredecessorIndex(pred); + DCHECK_IMPLIES(pred_index == -1, + block->is_loop() && i == block->predecessor_count() - 1); + predecessor_permutation_.push_back(pred_index); + } + } } maglev::ProcessResult Process(maglev::Constant* node, @@ -144,13 +171,18 @@ class GraphBuilder { int input_count = node->input_count(); RegisterRepresentation rep = RegisterRepresentationFor(node->value_representation()); + if (node->is_exception_phi()) { + SetMap(node, __ GetVariable(regs_to_vars_[node->owner().index()])); + return maglev::ProcessResult::kContinue; + } if (__ current_block()->IsLoop()) { DCHECK_EQ(input_count, 2); SetMap(node, __ PendingLoopPhi(Map(node->input(0)), rep)); } else { + DCHECK(!predecessor_permutation_.empty()); base::SmallVector inputs; for (int i = 0; i < input_count; ++i) { - inputs.push_back(Map(node->input(i))); + inputs.push_back(Map(node->input(predecessor_permutation_[i]))); } SetMap(node, __ Phi(base::VectorOf(inputs), rep)); } @@ -159,10 +191,10 @@ class GraphBuilder { maglev::ProcessResult Process(maglev::CallKnownJSFunction* node, const maglev::ProcessingState& state) { - // TODO(dmercadier): handle builtin calls. - DCHECK(!node->shared_function_info().HasBuiltinId()); + ThrowingScope throwing_scope(this, node); + OpIndex frame_state = BuildFrameState(node->lazy_deopt_info()); - V callee = Map(node->closure()); + V callee = Map(node->closure()); base::SmallVector arguments; arguments.push_back(Map(node->receiver())); for (int i = 0; i < node->num_args(); i++) { @@ -183,6 +215,41 @@ class GraphBuilder { SetMap(node, __ Call(callee, frame_state, base::VectorOf(arguments), TSCallDescriptor::Create(descriptor, CanThrow::kYes, graph_zone()))); + + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::CallBuiltin* node, + const maglev::ProcessingState& state) { + ThrowingScope throwing_scope(this, node); + + OpIndex frame_state = BuildFrameState(node->lazy_deopt_info()); + Callable callable = Builtins::CallableFor( + isolate_->GetMainThreadIsolateUnsafe(), node->builtin()); + const CallInterfaceDescriptor& descriptor = callable.descriptor(); + CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor( + graph_zone(), descriptor, descriptor.GetStackParameterCount(), + CallDescriptor::kNeedsFrameState); + V stub_code = __ HeapConstant(callable.code()); + base::SmallVector arguments; + + for (int i = 0; i < node->InputCountWithoutContext(); i++) { + arguments.push_back(Map(node->input(i))); + } + + if (node->has_feedback()) { + arguments.push_back(__ TaggedIndexConstant(node->feedback().index())); + arguments.push_back(__ HeapConstant(node->feedback().vector)); + } + + if (Builtins::CallInterfaceDescriptorFor(node->builtin()) + .HasContextParameter()) { + arguments.push_back(Map(node->context_input())); + } + + SetMap(node, __ Call(stub_code, frame_state, base::VectorOf(arguments), + TSCallDescriptor::Create( + call_descriptor, CanThrow::kYes, graph_zone()))); + return maglev::ProcessResult::kContinue; } @@ -219,6 +286,38 @@ class GraphBuilder { node->eager_deopt_info()->feedback_to_update()); return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::CheckString* node, + const maglev::ProcessingState& state) { + OpIndex frame_state = BuildFrameState(node->eager_deopt_info()); + ObjectIsOp::InputAssumptions input_assumptions = + node->check_type() == maglev::CheckType::kCheckHeapObject + ? ObjectIsOp::InputAssumptions::kNone + : ObjectIsOp::InputAssumptions::kHeapObject; + V check = __ ObjectIs(Map(node->receiver_input()), + ObjectIsOp::Kind::kString, input_assumptions); + __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kNotAString, + node->eager_deopt_info()->feedback_to_update()); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::CheckInstanceType* node, + const maglev::ProcessingState& state) { + __ CheckInstanceType( + Map(node->receiver_input()), BuildFrameState(node->eager_deopt_info()), + node->eager_deopt_info()->feedback_to_update(), + node->first_instance_type(), node->last_instance_type(), + node->check_type() != maglev::CheckType::kOmitHeapObjectCheck); + + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::CheckDynamicValue* node, + const maglev::ProcessingState& state) { + OpIndex frame_state = BuildFrameState(node->eager_deopt_info()); + __ DeoptimizeIfNot( + __ TaggedEqual(Map(node->first_input()), Map(node->second_input())), + frame_state, DeoptimizeReason::kWrongValue, + node->eager_deopt_info()->feedback_to_update()); + return maglev::ProcessResult::kContinue; + } maglev::ProcessResult Process(maglev::CheckInt32Condition* node, const maglev::ProcessingState& state) { @@ -236,10 +335,73 @@ class GraphBuilder { return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::AllocationBlock* node, + const maglev::ProcessingState& state) { + if (!node->is_used()) return maglev::ProcessResult::kRemove; + int size = 0; + for (auto alloc : node->allocation_list()) { + if (alloc->HasEscaped()) { + alloc->set_offset(size); + size += alloc->size(); + } + } + node->set_size(size); + SetMap(node, __ FinishInitialization( + __ Allocate(size, node->allocation_type()))); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::InlinedAllocation* node, + const maglev::ProcessingState& state) { + if (!node->HasEscaped()) return maglev::ProcessResult::kRemove; + V alloc = Map(node->allocation_block()); + SetMap(node, __ BitcastWordPtrToHeapObject(__ WordPtrAdd( + __ BitcastHeapObjectToWordPtr(alloc), node->offset()))); + return maglev::ProcessResult::kContinue; + } + + maglev::ProcessResult Process(maglev::StringConcat* node, + const maglev::ProcessingState& state) { + SetMap(node, __ StringConcat(Map(node->lhs()), Map(node->rhs()))); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StringEqual* node, + const maglev::ProcessingState& state) { + SetMap(node, __ StringEqual(Map(node->lhs()), Map(node->rhs()))); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StringLength* node, + const maglev::ProcessingState& state) { + SetMap(node, __ StringLength(Map(node->object_input()))); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StringAt* node, + const maglev::ProcessingState& state) { + V char_code = + __ StringCharCodeAt(Map(node->string_input()), + __ ChangeUint32ToUintPtr(Map(node->index_input()))); + SetMap(node, __ ConvertCharCodeToString(char_code)); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::CheckedInternalizedString* node, + const maglev::ProcessingState& state) { + OpIndex frame_state = BuildFrameState(node->eager_deopt_info()); + SetMap(node, __ CheckedInternalizedString( + Map(node->object_input()), frame_state, + node->check_type() == maglev::CheckType::kCheckHeapObject, + node->eager_deopt_info()->feedback_to_update())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::LoadTaggedField* node, const maglev::ProcessingState& state) { - SetMap(node, __ Load(Map(node->object_input()), LoadOp::Kind::TaggedBase(), - MemoryRepresentation::AnyTagged(), node->offset())); + SetMap(node, __ LoadTaggedField(Map(node->object_input()), node->offset())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::LoadDoubleField* node, + const maglev::ProcessingState& state) { + V field = __ LoadTaggedField( + Map(node->object_input()), node->offset()); + SetMap(node, __ LoadField(field, AccessBuilder::ForHeapNumberValue())); return maglev::ProcessResult::kContinue; } maglev::ProcessResult Process(maglev::LoadFixedArrayElement* node, @@ -256,6 +418,122 @@ class GraphBuilder { __ ChangeInt32ToIntPtr(Map(node->index_input())))); return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::LoadHoleyFixedDoubleArrayElement* node, + const maglev::ProcessingState& state) { + SetMap(node, __ LoadFixedDoubleArrayElement( + Map(node->elements_input()), + __ ChangeInt32ToIntPtr(Map(node->index_input())))); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process( + maglev::LoadHoleyFixedDoubleArrayElementCheckedNotHole* node, + const maglev::ProcessingState& state) { + OpIndex frame_state = BuildFrameState(node->eager_deopt_info()); + V result = __ LoadFixedDoubleArrayElement( + Map(node->elements_input()), + __ ChangeInt32ToIntPtr(Map(node->index_input()))); + __ DeoptimizeIf(__ Float64IsHole(result), frame_state, + DeoptimizeReason::kHole, + node->eager_deopt_info()->feedback_to_update()); + SetMap(node, result); + return maglev::ProcessResult::kContinue; + } + + maglev::ProcessResult Process(maglev::StoreTaggedFieldNoWriteBarrier* node, + const maglev::ProcessingState& state) { + __ Store(Map(node->object_input()), Map(node->value_input()), + StoreOp::Kind::TaggedBase(), MemoryRepresentation::AnyTagged(), + WriteBarrierKind::kNoWriteBarrier, node->offset()); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StoreTaggedFieldWithWriteBarrier* node, + const maglev::ProcessingState& state) { + __ Store(Map(node->object_input()), Map(node->value_input()), + StoreOp::Kind::TaggedBase(), MemoryRepresentation::AnyTagged(), + WriteBarrierKind::kFullWriteBarrier, node->offset()); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StoreDoubleField* node, + const maglev::ProcessingState& state) { + V field = __ LoadTaggedField( + Map(node->object_input()), node->offset()); + __ StoreField(field, AccessBuilder::ForHeapNumberValue(), + Map(node->value_input())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process( + maglev::StoreFixedArrayElementNoWriteBarrier* node, + const maglev::ProcessingState& state) { + __ StoreFixedArrayElement(Map(node->elements_input()), + __ ChangeInt32ToIntPtr(Map(node->index_input())), + Map(node->value_input()), + WriteBarrierKind::kNoWriteBarrier); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process( + maglev::StoreFixedArrayElementWithWriteBarrier* node, + const maglev::ProcessingState& state) { + __ StoreFixedArrayElement(Map(node->elements_input()), + __ ChangeInt32ToIntPtr(Map(node->index_input())), + Map(node->value_input()), + WriteBarrierKind::kFullWriteBarrier); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StoreFixedDoubleArrayElement* node, + const maglev::ProcessingState& state) { + __ StoreFixedDoubleArrayElement( + Map(node->elements_input()), + __ ChangeInt32ToIntPtr(Map(node->index_input())), + Map(node->value_input())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StoreMap* node, + const maglev::ProcessingState& state) { + __ Store(Map(node->object_input()), __ HeapConstant(node->map().object()), + StoreOp::Kind::TaggedBase(), MemoryRepresentation::TaggedPointer(), + WriteBarrierKind::kMapWriteBarrier, HeapObject::kMapOffset); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::StoreFloat64* node, + const maglev::ProcessingState& state) { + __ Store(Map(node->object_input()), Map(node->value_input()), + StoreOp::Kind::TaggedBase(), MemoryRepresentation::Float64(), + WriteBarrierKind::kNoWriteBarrier, node->offset()); + return maglev::ProcessResult::kContinue; + } + + // For-in specific operations. + maglev::ProcessResult Process(maglev::LoadEnumCacheLength* node, + const maglev::ProcessingState& state) { + V bitfield3 = + __ LoadField(V::Cast(Map(node->map_input())), + AccessBuilder::ForMapBitField3()); + V length = __ Word32ShiftRightLogical( + __ Word32BitwiseAnd(bitfield3, Map::Bits3::EnumLengthBits::kMask), + Map::Bits3::EnumLengthBits::kShift); + SetMap(node, length); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::CheckCacheIndicesNotCleared* node, + const maglev::ProcessingState& state) { + // If the cache length is zero, we don't have any indices, so we know this + // is ok even though the indices are the empty array. + IF_NOT (__ Word32Equal(Map(node->length_input()), 0)) { + // Otherwise, an empty array with non-zero required length is not valid. + V condition = + RootEqual(node->indices_input(), RootIndex::kEmptyFixedArray); + __ DeoptimizeIfNot(condition, BuildFrameState(node->eager_deopt_info()), + DeoptimizeReason::kWrongEnumIndices, + node->eager_deopt_info()->feedback_to_update()); + } + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::LoadTaggedFieldByFieldIndex* node, + const maglev::ProcessingState& state) { + SetMap(node, __ LoadFieldByIndex(Map(node->object_input()), + Map(node->index_input()))); + return maglev::ProcessResult::kContinue; + } maglev::ProcessResult Process(maglev::Jump* node, const maglev::ProcessingState& state) { @@ -270,6 +548,27 @@ class GraphBuilder { return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::Int32Compare* node, + const maglev::ProcessingState& state) { + V bool_res = ConvertCompare( + node->left_input(), node->right_input(), node->operation()); + SetMap(node, ConvertWord32ToJSBool(bool_res)); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::Float64Compare* node, + const maglev::ProcessingState& state) { + V bool_res = ConvertCompare( + node->left_input(), node->right_input(), node->operation()); + SetMap(node, ConvertWord32ToJSBool(bool_res)); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::TaggedEqual* node, + const maglev::ProcessingState& state) { + SetMap(node, ConvertWord32ToJSBool( + __ TaggedEqual(Map(node->lhs()), Map(node->rhs())))); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::BranchIfToBooleanTrue* node, const maglev::ProcessingState& state) { TruncateJSPrimitiveToUntaggedOp::InputAssumptions assumption = @@ -282,28 +581,56 @@ class GraphBuilder { __ Branch(condition, Map(node->if_true()), Map(node->if_false())); return maglev::ProcessResult::kContinue; } - maglev::ProcessResult Process(maglev::Int32Compare* node, + maglev::ProcessResult Process(maglev::BranchIfInt32Compare* node, const maglev::ProcessingState& state) { - Label done(this); - IF (ConvertInt32Compare(node->left_input(), node->right_input(), - node->operation())) { - GOTO(done, __ HeapConstant(factory_->true_value())); - } - ELSE { - GOTO(done, __ HeapConstant(factory_->false_value())); - } - END_IF - BIND(done, result); - SetMap(node, result); + V condition = ConvertCompare( + node->left_input(), node->right_input(), node->operation()); + __ Branch(condition, Map(node->if_true()), Map(node->if_false())); return maglev::ProcessResult::kContinue; } - maglev::ProcessResult Process(maglev::BranchIfInt32Compare* node, + maglev::ProcessResult Process(maglev::BranchIfFloat64Compare* node, const maglev::ProcessingState& state) { - V condition = ConvertInt32Compare( + V condition = ConvertCompare( node->left_input(), node->right_input(), node->operation()); __ Branch(condition, Map(node->if_true()), Map(node->if_false())); return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::BranchIfInt32ToBooleanTrue* node, + const maglev::ProcessingState& state) { + V condition = Map(node->condition_input()); + __ Branch(condition, Map(node->if_true()), Map(node->if_false())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::BranchIfFloat64ToBooleanTrue* node, + const maglev::ProcessingState& state) { + V condition = __ Float64Equal(Map(node->condition_input()), 0.0); + // Swapping if_true and if_false because we the real condition is "!= 0" + // rather than "== 0" (but Turboshaft doesn't have Float64NotEqual). + __ Branch(condition, Map(node->if_false()), Map(node->if_true())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::BranchIfReferenceEqual* node, + const maglev::ProcessingState& state) { + V condition = + __ TaggedEqual(Map(node->left_input()), Map(node->right_input())); + __ Branch(condition, Map(node->if_true()), Map(node->if_false())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::BranchIfRootConstant* node, + const maglev::ProcessingState& state) { + V condition = + RootEqual(node->condition_input(), node->root_index()); + __ Branch(condition, Map(node->if_true()), Map(node->if_false())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::BranchIfUndefinedOrNull* node, + const maglev::ProcessingState& state) { + __ GotoIf(RootEqual(node->condition_input(), RootIndex::kUndefinedValue), + Map(node->if_true())); + __ Branch(RootEqual(node->condition_input(), RootIndex::kNullValue), + Map(node->if_true()), Map(node->if_false())); + return maglev::ProcessResult::kContinue; + } maglev::ProcessResult Process(maglev::CheckedSmiUntag* node, const maglev::ProcessingState& state) { @@ -347,6 +674,16 @@ class GraphBuilder { node->eager_deopt_info()->feedback_to_update())); return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::Int32DecrementWithOverflow* node, + const maglev::ProcessingState& state) { + // Turboshaft doesn't have a dedicated Decrement operation; we use a regular + // addition instead. + SetMap(node, __ Word32SignedSubDeoptOnOverflow( + Map(node->value_input()), 1, + BuildFrameState(node->eager_deopt_info()), + node->eager_deopt_info()->feedback_to_update())); + return maglev::ProcessResult::kContinue; + } #define PROCESS_FLOAT64_BINOP(MaglevName, TurboshaftName) \ maglev::ProcessResult Process(maglev::Float64##MaglevName* node, \ @@ -412,23 +749,108 @@ class GraphBuilder { // adjusting if the difference exceeds 0.5 (like SimplifiedLowering does // for lower Float64Round). OpIndex input = Map(node->input()); - ScopedVariable result(Asm(), + ScopedVariable result(this, __ Float64RoundUp(input)); - IF_NOT (__ Float64LessThanOrEqual(__ Float64Sub(*result, 0.5), input)) { - result = __ Float64Sub(*result, 1.0); + IF_NOT (__ Float64LessThanOrEqual(__ Float64Sub(result, 0.5), input)) { + result = __ Float64Sub(result, 1.0); } - END_IF - SetMap(node, *result); + + SetMap(node, result); + } + return maglev::ProcessResult::kContinue; + } + + maglev::ProcessResult Process(maglev::Float64Ieee754Unary* node, + const maglev::ProcessingState& state) { + FloatUnaryOp::Kind kind; + switch (node->ieee_function()) { +#define CASE(MathName, ExpName, EnumName) \ + case maglev::Float64Ieee754Unary::Ieee754Function::k##EnumName: \ + kind = FloatUnaryOp::Kind::k##EnumName; \ + break; + IEEE_754_UNARY_LIST(CASE) +#undef CASE } + SetMap(node, __ Float64Unary(Map(node->input()), kind)); return maglev::ProcessResult::kContinue; } +// Note that Maglev collects feedback in the generic binops and unops, so that +// Turbofan has chance to get better feedback. However, once we reach Turbofan, +// we stop collecting feedback, since we've tried multiple times to keep +// collecting feedback in Turbofan, but it never seemed worth it. The latest +// occurence of this was ended by this CL: https://crrev.com/c/4110858. +#define PROCESS_GENERIC_BINOP(Name) \ + maglev::ProcessResult Process(maglev::Generic##Name* node, \ + const maglev::ProcessingState& state) { \ + OpIndex frame_state = BuildFrameState(node->lazy_deopt_info()); \ + SetMap(node, \ + __ Generic##Name(Map(node->left_input()), Map(node->right_input()), \ + frame_state, native_context())); \ + return maglev::ProcessResult::kContinue; \ + } + GENERIC_BINOP_LIST(PROCESS_GENERIC_BINOP) +#undef PROCESS_GENERIC_BINOP + +#define PROCESS_GENERIC_UNOP(Name) \ + maglev::ProcessResult Process(maglev::Generic##Name* node, \ + const maglev::ProcessingState& state) { \ + OpIndex frame_state = BuildFrameState(node->lazy_deopt_info()); \ + SetMap(node, __ Generic##Name(Map(node->operand_input()), frame_state, \ + native_context())); \ + return maglev::ProcessResult::kContinue; \ + } + GENERIC_UNOP_LIST(PROCESS_GENERIC_UNOP) +#undef PROCESS_GENERIC_UNOP + + maglev::ProcessResult Process(maglev::ToNumberOrNumeric* node, + const maglev::ProcessingState& state) { + OpIndex frame_state = BuildFrameState(node->lazy_deopt_info()); + SetMap(node, __ ToNumberOrNumeric(Map(node->value_input()), frame_state, + native_context(), node->mode())); + return maglev::ProcessResult::kContinue; + } + + maglev::ProcessResult Process(maglev::LogicalNot* node, + const maglev::ProcessingState& state) { + V condition = __ TaggedEqual( + Map(node->value()), __ HeapConstant(factory_->true_value())); + SetMap(node, ConvertWord32ToJSBool(condition, /*flip*/ true)); + return maglev::ProcessResult::kContinue; + } + + maglev::ProcessResult Process(maglev::ToBooleanLogicalNot* node, + const maglev::ProcessingState& state) { + TruncateJSPrimitiveToUntaggedOp::InputAssumptions assumption = + node->check_type() == maglev::CheckType::kCheckHeapObject + ? TruncateJSPrimitiveToUntaggedOp::InputAssumptions::kObject + : TruncateJSPrimitiveToUntaggedOp::InputAssumptions::kHeapObject; + V condition = __ TruncateJSPrimitiveToUntagged( + Map(node->value()), TruncateJSPrimitiveToUntaggedOp::UntaggedKind::kBit, + assumption); + SetMap(node, ConvertWord32ToJSBool(condition, /*flip*/ true)); + return maglev::ProcessResult::kContinue; + } + + maglev::ProcessResult Process(maglev::Int32ToBoolean* node, + const maglev::ProcessingState& state) { + SetMap(node, ConvertWord32ToJSBool(Map(node->value()), node->flip())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::Float64ToBoolean* node, + const maglev::ProcessingState& state) { + V condition = __ Float64Equal(Map(node->value()), 0.0); + // {condition} is 0 if the input is truthy, and false otherwise (because we + // compared "== 0" rather than "!= 0"), so we need to negate `flip` in the + // call to ConvertWord32ToJSBool. + SetMap(node, ConvertWord32ToJSBool(condition, !node->flip())); + return maglev::ProcessResult::kContinue; + } maglev::ProcessResult Process(maglev::Int32ToNumber* node, const maglev::ProcessingState& state) { SetMap(node, __ ConvertInt32ToNumber(Map(node->input()))); return maglev::ProcessResult::kContinue; } - maglev::ProcessResult Process(maglev::Float64ToTagged* node, const maglev::ProcessingState& state) { // Float64ToTagged's conversion mode is used to control whether integer @@ -447,6 +869,36 @@ class GraphBuilder { CheckForMinusZeroMode::kCheckForMinusZero)); return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::HoleyFloat64ToTagged* node, + const maglev::ProcessingState& state) { + Label done(this); + V input = Map(node->input()); + if (node->conversion_mode() == + maglev::HoleyFloat64ToTagged::ConversionMode::kCanonicalizeSmi) { + // ConvertUntaggedToJSPrimitive cannot at the same time canonicalize smis + // and handle holes. We thus manually insert a smi check when the + // conversion_mode is CanonicalizeSmi. + IF (__ Float64IsSmi(input)) { + GOTO(done, + __ ConvertUntaggedToJSPrimitive( + __ TruncateFloat64ToInt32OverflowUndefined(input), + ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind::kSmi, + RegisterRepresentation::Word32(), + ConvertUntaggedToJSPrimitiveOp::InputInterpretation::kSigned, + CheckForMinusZeroMode::kDontCheckForMinusZero)); + } + } + GOTO(done, __ ConvertUntaggedToJSPrimitive( + Map(node->input()), + ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind:: + kHeapNumberOrUndefined, + RegisterRepresentation::Float64(), + ConvertUntaggedToJSPrimitiveOp::InputInterpretation::kSigned, + CheckForMinusZeroMode::kCheckForMinusZero)); + BIND(done, result); + SetMap(node, result); + return maglev::ProcessResult::kContinue; + } maglev::ProcessResult Process(maglev::CheckedNumberOrOddballToFloat64* node, const maglev::ProcessingState& state) { @@ -460,6 +912,15 @@ class GraphBuilder { node->eager_deopt_info()->feedback_to_update())); return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::UncheckedNumberOrOddballToFloat64* node, + const maglev::ProcessingState& state) { + SetMap(node, __ ConvertJSPrimitiveToUntagged( + Map(node->input()), + ConvertJSPrimitiveToUntaggedOp::UntaggedKind::kFloat64, + ConvertJSPrimitiveToUntaggedOp::InputAssumptions:: + kNumberOrOddball)); + return maglev::ProcessResult::kContinue; + } maglev::ProcessResult Process(maglev::TruncateUint32ToInt32* node, const maglev::ProcessingState& state) { // This doesn't matter in Turboshaft: both Uint32 and Int32 are Word32. @@ -479,6 +940,33 @@ class GraphBuilder { node->eager_deopt_info()->feedback_to_update())); return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::ChangeInt32ToFloat64* node, + const maglev::ProcessingState& state) { + SetMap(node, __ ChangeInt32ToFloat64(Map(node->input()))); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::CheckedTruncateFloat64ToInt32* node, + const maglev::ProcessingState& state) { + SetMap(node, + __ ChangeFloat64ToInt32OrDeopt( + Map(node->input()), BuildFrameState(node->eager_deopt_info()), + CheckForMinusZeroMode::kCheckForMinusZero, + node->eager_deopt_info()->feedback_to_update())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::HoleyFloat64ToMaybeNanFloat64* node, + const maglev::ProcessingState& state) { + SetMap(node, __ Float64SilenceNaN(Map(node->input()))); + return maglev::ProcessResult::kContinue; + } + + maglev::ProcessResult Process(maglev::ToObject* node, + const maglev::ProcessingState& state) { + SetMap(node, __ ConvertJSPrimitiveToObject( + Map(node->value_input()), Map(node->context()), + ConvertReceiverMode::kNotNullOrUndefined)); + return maglev::ProcessResult::kContinue; + } maglev::ProcessResult Process(maglev::Return* node, const maglev::ProcessingState& state) { @@ -486,6 +974,15 @@ class GraphBuilder { return maglev::ProcessResult::kContinue; } + maglev::ProcessResult Process(maglev::SetPendingMessage* node, + const maglev::ProcessingState& state) { + __ StoreMessage( + __ ExternalConstant( + ExternalReference::address_of_pending_message(isolate_)), + Map(node->value())); + return maglev::ProcessResult::kContinue; + } + maglev::ProcessResult Process(maglev::ReduceInterruptBudgetForReturn*, const maglev::ProcessingState&) { // No need to update the interrupt budget once we reach Turboshaft. @@ -618,9 +1115,9 @@ class GraphBuilder { combine, info); } - V ConvertInt32Compare(maglev::Input left_input, - maglev::Input right_input, - ::Operation operation) { + template + V ConvertCompare(maglev::Input left_input, maglev::Input right_input, + ::Operation operation) { ComparisonOp::Kind kind; bool swap_inputs = false; switch (operation) { @@ -644,10 +1141,10 @@ class GraphBuilder { default: UNREACHABLE(); } - V left = Map(left_input); - V right = Map(right_input); + V left = Map(left_input); + V right = Map(right_input); if (swap_inputs) std::swap(left, right); - return __ Comparison(left, right, kind, WordRepresentation::Word32()); + return __ Comparison(left, right, kind, V::rep); } V ConvertInt32Compare(maglev::Input left_input, @@ -699,6 +1196,12 @@ class GraphBuilder { return __ Comparison(left, right, kind, WordRepresentation::Word32()); } + V RootEqual(maglev::Input input, RootIndex root) { + return __ TaggedEqual( + Map(input), + __ HeapConstant(Handle::cast(isolate_->root_handle(root)))); + } + void FixLoopPhis(maglev::BasicBlock* loop) { DCHECK(loop->is_loop()); for (maglev::Phi* maglev_phi : *loop->phis()) { @@ -728,6 +1231,100 @@ class GraphBuilder { } } + // TODO(dmercadier): Using a Branch would open more optimization opportunities + // for BranchElimination compared to using a Select. However, in most cases, + // Maglev should avoid materializing JS booleans, so there is a good chance + // that it we actually need to do it, it's because we have to, and + // BranchElimination probably cannot help. Thus, using a Select rather than a + // Branch leads to smaller graphs, which is generally beneficial. Still, once + // the graph builder is finished, we should evaluate whether Select or Branch + // is the best choice here. + V ConvertWord32ToJSBool(V b, bool flip = false) { + V true_idx = __ HeapConstant(factory_->true_value()); + V false_idx = __ HeapConstant(factory_->false_value()); + if (flip) std::swap(true_idx, false_idx); + return __ Select(b, true_idx, false_idx, RegisterRepresentation::Tagged(), + BranchHint::kNone, SelectOp::Implementation::kBranch); + } + + class ThrowingScope { + // In Maglev, exception handlers have no predecessors, and their Phis are a + // bit special: they all correspond to interpreter registers, and get + // eventually initialized with the value that their predecessors have for + // the corresponding interpreter registers. + + // In Turboshaft, exception handlers have predecessors and contain regular + // phis. Creating a ThrowingScope takes care of recording in Variables + // the current value of interpreter registers (right before emitting a node + // that can throw), and sets the current_catch_block of the Assembler. + // Throwing operations that are emitted while the scope is active will + // automatically be wired to the catch handler. Then, when calling + // Process(Phi) on exception phis (= when processing the catch handler), + // these Phis will be mapped to the Variable corresponding to their owning + // intepreter register. + + public: + ThrowingScope(GraphBuilder* builder, maglev::NodeBase* throwing_node) + : builder_(*builder) { + DCHECK(throwing_node->properties().can_throw()); + const maglev::ExceptionHandlerInfo* info = + throwing_node->exception_handler_info(); + if (!info->HasExceptionHandler()) return; + + maglev::BasicBlock* block = info->catch_block.block_ptr(); + auto* liveness = block->state()->frame_state().liveness(); + + maglev::LazyDeoptInfo* deopt_info = throwing_node->lazy_deopt_info(); + const maglev::InterpretedDeoptFrame* lazy_frame; + switch (deopt_info->top_frame().type()) { + case maglev::DeoptFrame::FrameType::kInterpretedFrame: + lazy_frame = &deopt_info->top_frame().as_interpreted(); + break; + case maglev::DeoptFrame::FrameType::kInlinedArgumentsFrame: + UNREACHABLE(); + case maglev::DeoptFrame::FrameType::kConstructInvokeStubFrame: + case maglev::DeoptFrame::FrameType::kBuiltinContinuationFrame: + lazy_frame = &deopt_info->top_frame().parent()->as_interpreted(); + break; + } + + lazy_frame->frame_state()->ForEachValue( + lazy_frame->unit(), [this, liveness](maglev::ValueNode* value, + interpreter::Register reg) { + if (!reg.is_parameter() && !liveness->RegisterIsLive(reg.index())) { + // Skip, since not live at the handler offset. + return; + } + auto it = builder_.regs_to_vars_.find(reg.index()); + Variable var; + if (it == builder_.regs_to_vars_.end()) { + var = __ NewVariable(RegisterRepresentation::Tagged()); + builder_.regs_to_vars_.insert({reg.index(), var}); + } else { + var = it->second; + } + __ SetVariable(var, builder_.Map(value)); + }); + + DCHECK_EQ(__ current_catch_block(), nullptr); + __ set_current_catch_block(builder_.Map(block)); + } + + ~ThrowingScope() { + // Resetting the catch handler. It is always set on a case-by-case basis + // before emitting a throwing node, so there is no need to "reset the + // previous catch handler" or something like that, since there is no + // previous handler (there is a DCHECK in the ThrowingScope constructor + // checking that the current_catch_block is indeed nullptr when the scope + // is created). + __ set_current_catch_block(nullptr); + } + + private: + GraphBuilder::AssemblerT& Asm() { return builder_.Asm(); } + GraphBuilder& builder_; + }; + OpIndex Map(const maglev::Input input) { return Map(input.node()); } OpIndex Map(const maglev::NodeBase* node) { DCHECK(node_mapping_[node].valid()); @@ -741,6 +1338,14 @@ class GraphBuilder { return idx; } + V native_context() { + if (!native_context_.valid()) { + native_context_ = + __ HeapConstant(broker_->target_native_context().object()); + } + return native_context_; + } + Zone* temp_zone_; LocalIsolate* isolate_ = PipelineData::Get().isolate()->AsLocalIsolate(); JSHeapBroker* broker_ = PipelineData::Get().broker(); @@ -749,6 +1354,9 @@ class GraphBuilder { maglev::MaglevCompilationUnit* maglev_compilation_unit_; ZoneUnorderedMap node_mapping_; ZoneUnorderedMap block_mapping_; + ZoneUnorderedMap regs_to_vars_; + V native_context_ = OpIndex::Invalid(); + base::SmallVector predecessor_permutation_; }; void MaglevGraphBuildingPhase::Run(Zone* temp_zone) { @@ -765,11 +1373,23 @@ void MaglevGraphBuildingPhase::Run(Zone* temp_zone) { : broker->isolate()->AsLocalIsolate(); maglev::Graph* maglev_graph = maglev::Graph::New(temp_zone, data.info()->is_osr()); + if (V8_UNLIKELY(data.info()->trace_turbo_graph())) { + compilation_info->set_graph_labeller(new maglev::MaglevGraphLabeller()); + } maglev::MaglevGraphBuilder maglev_graph_builder( local_isolate, compilation_info->toplevel_compilation_unit(), maglev_graph); maglev_graph_builder.Build(); + if (V8_UNLIKELY(data.info()->trace_turbo_graph())) { + CodeTracer* code_tracer = data.GetCodeTracer(); + CodeTracer::StreamScope tracing_scope(code_tracer); + tracing_scope.stream() + << "\n----- Maglev graph after MaglevGraphBuilding -----" << std::endl; + maglev::PrintGraph(tracing_scope.stream(), compilation_info.get(), + maglev_graph); + } + maglev::GraphProcessor builder( data.graph(), temp_zone, compilation_info->toplevel_compilation_unit()); builder.ProcessGraph(maglev_graph); diff --git a/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h b/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h index 30267553a11138..eda7c3823f40d1 100644 --- a/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h +++ b/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h @@ -250,11 +250,11 @@ class MemoryOptimizationReducer : public Next { if (analyzer_->IsFoldedAllocation(__ current_operation_origin())) { DCHECK_NE(__ GetVariable(top(type)), OpIndex::Invalid()); OpIndex obj_addr = __ GetVariable(top(type)); - __ SetVariable(top(type), __ PointerAdd(__ GetVariable(top(type)), size)); + __ SetVariable(top(type), __ WordPtrAdd(__ GetVariable(top(type)), size)); __ StoreOffHeap(top_address, __ GetVariable(top(type)), MemoryRepresentation::UintPtr()); return __ BitcastWordPtrToHeapObject( - __ PointerAdd(obj_addr, __ IntPtrConstant(kHeapObjectTag))); + __ WordPtrAdd(obj_addr, __ IntPtrConstant(kHeapObjectTag))); } __ SetVariable(top(type), __ LoadOffHeap(top_address, @@ -319,7 +319,7 @@ class MemoryOptimizationReducer : public Next { __ SetVariable(result, __ BitcastWordPtrToHeapObject(__ WordPtrAdd( top_value, __ IntPtrConstant(kHeapObjectTag)))); - OpIndex new_top = __ PointerAdd(top_value, size); + OpIndex new_top = __ WordPtrAdd(top_value, size); OpIndex limit = __ LoadOffHeap(limit_address, MemoryRepresentation::UintPtr()); __ GotoIfNot(LIKELY(__ UintPtrLessThan(new_top, limit)), call_runtime); @@ -356,7 +356,7 @@ class MemoryOptimizationReducer : public Next { OpIndex limit = __ LoadOffHeap(limit_address, MemoryRepresentation::UintPtr()); __ Branch(__ UintPtrLessThan( - __ PointerAdd(__ GetVariable(top(type)), reservation_size), + __ WordPtrAdd(__ GetVariable(top(type)), reservation_size), limit), done, call_runtime, BranchHint::kTrue); } @@ -366,7 +366,7 @@ class MemoryOptimizationReducer : public Next { OpIndex allocated = __ Call(allocate_builtin, {reservation_size}, AllocateBuiltinDescriptor()); __ SetVariable(top(type), - __ PointerSub(__ BitcastHeapObjectToWordPtr(allocated), + __ WordPtrSub(__ BitcastHeapObjectToWordPtr(allocated), __ IntPtrConstant(kHeapObjectTag))); __ Goto(done); } @@ -374,11 +374,11 @@ class MemoryOptimizationReducer : public Next { __ BindReachable(done); // Compute the new top and write it back. OpIndex obj_addr = __ GetVariable(top(type)); - __ SetVariable(top(type), __ PointerAdd(__ GetVariable(top(type)), size)); + __ SetVariable(top(type), __ WordPtrAdd(__ GetVariable(top(type)), size)); __ StoreOffHeap(top_address, __ GetVariable(top(type)), MemoryRepresentation::UintPtr()); return __ BitcastWordPtrToHeapObject( - __ PointerAdd(obj_addr, __ IntPtrConstant(kHeapObjectTag))); + __ WordPtrAdd(obj_addr, __ IntPtrConstant(kHeapObjectTag))); } OpIndex REDUCE(DecodeExternalPointer)(OpIndex handle, diff --git a/deps/v8/src/compiler/turboshaft/operation-matcher.h b/deps/v8/src/compiler/turboshaft/operation-matcher.h index cd3b64ad1502c6..da20d55fe07dff 100644 --- a/deps/v8/src/compiler/turboshaft/operation-matcher.h +++ b/deps/v8/src/compiler/turboshaft/operation-matcher.h @@ -206,13 +206,13 @@ class OperationMatcher { return true; } - bool MatchWasmStubCallConstant(OpIndex matched, int64_t* stub_id) const { + bool MatchWasmStubCallConstant(OpIndex matched, uint64_t* stub_id) const { const ConstantOp* op = TryCast(matched); if (!op) return false; if (op->kind != ConstantOp::Kind::kRelocatableWasmStubCall) { return false; } - *stub_id = op->signed_integral(); + *stub_id = op->integral(); return true; } @@ -276,6 +276,21 @@ class OperationMatcher { rep); } + bool MatchBitwiseAndWithConstant(OpIndex matched, OpIndex* value, + uint64_t* constant, + WordRepresentation rep) const { + OpIndex left, right; + if (!MatchBitwiseAnd(matched, &left, &right, rep)) return false; + if (MatchIntegralWordConstant(right, rep, constant)) { + *value = left; + return true; + } else if (MatchIntegralWordConstant(left, rep, constant)) { + *value = right; + return true; + } + return false; + } + bool MatchEqual(OpIndex matched, OpIndex* left, OpIndex* right, WordRepresentation rep) const { const ComparisonOp* op = TryCast(matched); diff --git a/deps/v8/src/compiler/turboshaft/operations.cc b/deps/v8/src/compiler/turboshaft/operations.cc index 7ad0cbe4e7c412..e54f045ddfb156 100644 --- a/deps/v8/src/compiler/turboshaft/operations.cc +++ b/deps/v8/src/compiler/turboshaft/operations.cc @@ -158,6 +158,26 @@ std::ostream& operator<<(std::ostream& os, OperationPrintStyle styled_op) { return os; } +std::ostream& operator<<(std::ostream& os, GenericBinopOp::Kind kind) { + switch (kind) { +#define PRINT_KIND(Name) \ + case GenericBinopOp::Kind::k##Name: \ + return os << #Name; + GENERIC_BINOP_LIST(PRINT_KIND) +#undef PRINT_KIND + } +} + +std::ostream& operator<<(std::ostream& os, GenericUnopOp::Kind kind) { + switch (kind) { +#define PRINT_KIND(Name) \ + case GenericUnopOp::Kind::k##Name: \ + return os << #Name; + GENERIC_UNOP_LIST(PRINT_KIND) +#undef PRINT_KIND + } +} + std::ostream& operator<<(std::ostream& os, WordUnaryOp::Kind kind) { switch (kind) { case WordUnaryOp::Kind::kReverseBytes: @@ -1025,6 +1045,8 @@ std::ostream& operator<<(std::ostream& os, ObjectIsOp::Kind kind) { return os << "Smi"; case ObjectIsOp::Kind::kString: return os << "String"; + case ObjectIsOp::Kind::kStringOrStringWrapper: + return os << "StringOrStringWrapper"; case ObjectIsOp::Kind::kSymbol: return os << "Symbol"; case ObjectIsOp::Kind::kUndetectable: @@ -1054,6 +1076,8 @@ std::ostream& operator<<(std::ostream& os, NumericKind kind) { return os << "Integer"; case NumericKind::kSafeInteger: return os << "SafeInteger"; + case NumericKind::kSmi: + return os << "kSmi"; case NumericKind::kMinusZero: return os << "MinusZero"; case NumericKind::kNaN: @@ -1618,6 +1642,65 @@ void Simd128ShuffleOp::PrintOptions(std::ostream& os) const { PrintSimd128Value(os, shuffle); } +#if V8_ENABLE_WASM_SIMD256_REVEC +void Simd256LoadTransformOp::PrintOptions(std::ostream& os) const { + os << "["; + if (load_kind.maybe_unaligned) os << "unaligned, "; + if (load_kind.with_trap_handler) os << "protected, "; + + switch (transform_kind) { +#define PRINT_KIND(kind) \ + case TransformKind::k##kind: \ + os << #kind; \ + break; + FOREACH_SIMD_256_LOAD_TRANSFORM_OPCODE(PRINT_KIND) +#undef PRINT_KIND + } + + os << ", offset: " << offset << "]"; +} + +std::ostream& operator<<(std::ostream& os, Simd256UnaryOp::Kind kind) { + switch (kind) { +#define PRINT_KIND(kind) \ + case Simd256UnaryOp::Kind::k##kind: \ + return os << #kind; + FOREACH_SIMD_256_UNARY_OPCODE(PRINT_KIND) + } +#undef PRINT_KIND +} + +std::ostream& operator<<(std::ostream& os, Simd256TernaryOp::Kind kind) { + switch (kind) { +#define PRINT_KIND(kind) \ + case Simd256TernaryOp::Kind::k##kind: \ + return os << #kind; + FOREACH_SIMD_256_TERNARY_OPCODE(PRINT_KIND) + } +#undef PRINT_KIND +} + +std::ostream& operator<<(std::ostream& os, Simd256BinopOp::Kind kind) { + switch (kind) { +#define PRINT_KIND(kind) \ + case Simd256BinopOp::Kind::k##kind: \ + return os << #kind; + FOREACH_SIMD_256_BINARY_OPCODE(PRINT_KIND) + } +#undef PRINT_KIND +} + +std::ostream& operator<<(std::ostream& os, Simd256ShiftOp::Kind kind) { + switch (kind) { +#define PRINT_KIND(kind) \ + case Simd256ShiftOp::Kind::k##kind: \ + return os << #kind; + FOREACH_SIMD_256_SHIFT_OPCODE(PRINT_KIND) + } +#undef PRINT_KIND +} +#endif // V8_ENABLE_WASM_SIMD256_REVEC + void WasmAllocateArrayOp::PrintOptions(std::ostream& os) const { os << '[' << array_type->element_type() << "]"; } diff --git a/deps/v8/src/compiler/turboshaft/operations.h b/deps/v8/src/compiler/turboshaft/operations.h index d7a80c6728cadc..fc1994105b37d5 100644 --- a/deps/v8/src/compiler/turboshaft/operations.h +++ b/deps/v8/src/compiler/turboshaft/operations.h @@ -133,6 +133,18 @@ using Variable = SnapshotTable::Key; V(StringAsWtf16) \ V(StringPrepareForGetCodeUnit) +#if V8_ENABLE_WASM_SIMD256_REVEC +#define TURBOSHAFT_SIMD256_OPERATION_LIST(V) \ + V(Simd256Extract128Lane) \ + V(Simd256LoadTransform) \ + V(Simd256Unary) \ + V(Simd256Binop) \ + V(Simd256Shift) \ + V(Simd256Ternary) +#else +#define TURBOSHAFT_SIMD256_OPERATION_LIST(V) +#endif + #define TURBOSHAFT_SIMD_OPERATION_LIST(V) \ V(Simd128Constant) \ V(Simd128Binop) \ @@ -145,7 +157,8 @@ using Variable = SnapshotTable::Key; V(Simd128ReplaceLane) \ V(Simd128LaneMemory) \ V(Simd128LoadTransform) \ - V(Simd128Shuffle) + V(Simd128Shuffle) \ + TURBOSHAFT_SIMD256_OPERATION_LIST(V) #else #define TURBOSHAFT_WASM_OPERATION_LIST(V) @@ -266,7 +279,11 @@ using Variable = SnapshotTable::Key; // These are operations used in the frontend and are mostly tied to JS // semantics. -#define TURBOSHAFT_JS_OPERATION_LIST(V) V(SpeculativeNumberBinop) +#define TURBOSHAFT_JS_OPERATION_LIST(V) \ + V(SpeculativeNumberBinop) \ + V(GenericBinop) \ + V(GenericUnop) \ + V(ToNumberOrNumeric) // These are operations that are not Machine operations and need to be lowered // before Instruction Selection, but they are not lowered during the @@ -1242,6 +1259,120 @@ V8_EXPORT_PRIVATE bool ValidOpInputRep( const Graph& graph, OpIndex input, RegisterRepresentation expected_rep, base::Optional projection_index = {}); +struct GenericBinopOp : FixedArityOperationT<4, GenericBinopOp> { +#define GENERIC_BINOP_LIST(V) \ + V(Add) \ + V(Multiply) \ + V(Subtract) \ + V(Divide) \ + V(Modulus) \ + V(Exponentiate) \ + V(BitwiseAnd) \ + V(BitwiseOr) \ + V(BitwiseXor) \ + V(ShiftLeft) \ + V(ShiftRight) \ + V(ShiftRightLogical) \ + V(Equal) \ + V(StrictEqual) \ + V(LessThan) \ + V(LessThanOrEqual) \ + V(GreaterThan) \ + V(GreaterThanOrEqual) + enum class Kind : uint8_t { +#define DEFINE_KIND(Name) k##Name, + GENERIC_BINOP_LIST(DEFINE_KIND) +#undef DEFINE_KIND + }; + Kind kind; + + static constexpr OpEffects effects = OpEffects().CanCallAnything(); + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + V left() const { return input(0); } + V right() const { return input(1); } + OpIndex frame_state() const { return input(2); } + V context() const { return input(3); } + + GenericBinopOp(V left, V right, OpIndex frame_state, + V context, Kind kind) + : Base(left, right, frame_state, context), kind(kind) {} + + void Validate(const Graph& graph) const {} + auto options() const { return std::tuple{kind}; } +}; +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, + GenericBinopOp::Kind kind); + +struct GenericUnopOp : FixedArityOperationT<3, GenericUnopOp> { +#define GENERIC_UNOP_LIST(V) \ + V(BitwiseNot) \ + V(Negate) \ + V(Increment) \ + V(Decrement) + enum class Kind : uint8_t { +#define DEFINE_KIND(Name) k##Name, + GENERIC_UNOP_LIST(DEFINE_KIND) +#undef DEFINE_KIND + }; + Kind kind; + + static constexpr OpEffects effects = OpEffects().CanCallAnything(); + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + V input() const { return Base::input(0); } + OpIndex frame_state() const { return Base::input(1); } + V context() const { return Base::input(2); } + + GenericUnopOp(V input, OpIndex frame_state, V context, + Kind kind) + : Base(input, frame_state, context), kind(kind) {} + + void Validate(const Graph& graph) const {} + auto options() const { return std::tuple{kind}; } +}; +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, + GenericUnopOp::Kind kind); + +struct ToNumberOrNumericOp : FixedArityOperationT<3, ToNumberOrNumericOp> { + Object::Conversion kind; + + static constexpr OpEffects effects = OpEffects().CanCallAnything(); + base::Vector outputs_rep() const { + return RepVector(); + } + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + V input() const { return Base::input(0); } + OpIndex frame_state() const { return Base::input(1); } + V context() const { return Base::input(2); } + + ToNumberOrNumericOp(V input, OpIndex frame_state, V context, + Object::Conversion kind) + : Base(input, frame_state, context), kind(kind) {} + + void Validate(const Graph& graph) const {} + auto options() const { return std::tuple{kind}; } +}; + struct WordBinopOp : FixedArityOperationT<2, WordBinopOp> { enum class Kind : uint8_t { kAdd, @@ -1335,8 +1466,7 @@ struct WordBinopOp : FixedArityOperationT<2, WordBinopOp> { WordBinopOp(OpIndex left, OpIndex right, Kind kind, WordRepresentation rep) : Base(left, right), kind(kind), rep(rep) {} - void Validate(const Graph& graph) const { - } + void Validate(const Graph& graph) const {} auto options() const { return std::tuple{kind, rep}; } void PrintOptions(std::ostream& os) const; }; @@ -1762,7 +1892,8 @@ DEFINE_MULTI_SWITCH_INTEGRAL(ComparisonOp::Kind, 8) struct ChangeOp : FixedArityOperationT<1, ChangeOp> { enum class Kind : uint8_t { - // convert between different floating-point types + // convert between different floating-point types. Note that the + // Float64->Float32 conversion is truncating. kFloatConversion, // overflow guaranteed to result in the minimal integer kSignedFloatTruncateOverflowToMin, @@ -2394,18 +2525,22 @@ struct ConstantOp : FixedArityOperationT<0, ConstantOp> { case Kind::kFloat32: // Using a bit_cast to uint32_t in order to return false when comparing // +0 and -0. + // Note: for JavaScript, it would be fine to return true when both + // values are NaNs, but for Wasm we must not merge NaNs that way. + // Since we canonicalize NaNs for JS anyway, we don't need to treat + // them specially here. return base::bit_cast(storage.float32) == - base::bit_cast(other.storage.float32) || - (std::isnan(storage.float32) && - std::isnan(other.storage.float32)); + base::bit_cast(other.storage.float32); case Kind::kFloat64: case Kind::kNumber: // Using a bit_cast to uint64_t in order to return false when comparing // +0 and -0. + // Note: for JavaScript, it would be fine to return true when both + // values are NaNs, but for Wasm we must not merge NaNs that way. + // Since we canonicalize NaNs for JS anyway, we don't need to treat + // them specially here. return base::bit_cast(storage.float64) == - base::bit_cast(other.storage.float64) || - (std::isnan(storage.float64) && - std::isnan(other.storage.float64)); + base::bit_cast(other.storage.float64); case Kind::kExternal: return storage.external.address() == other.storage.external.address(); case Kind::kHeapObject: @@ -3065,8 +3200,7 @@ struct AllocateOp : FixedArityOperationT<1, AllocateOp> { AllocateOp(OpIndex size, AllocationType type) : Base(size), type(type) {} - void Validate(const Graph& graph) const { - } + void Validate(const Graph& graph) const {} void PrintOptions(std::ostream& os) const; auto options() const { return std::tuple{type}; } @@ -3794,13 +3928,13 @@ struct ReturnOp : OperationT { } // Number of additional stack slots to be removed. - OpIndex pop_count() const { return input(0); } + V pop_count() const { return input(0); } base::Vector return_values() const { return inputs().SubVector(1, input_count); } - ReturnOp(OpIndex pop_count, base::Vector return_values) + ReturnOp(V pop_count, base::Vector return_values) : Base(1 + return_values.size()) { base::Vector inputs = this->inputs(); inputs[0] = pop_count; @@ -3816,7 +3950,7 @@ struct ReturnOp : OperationT { void Validate(const Graph& graph) const { } - static ReturnOp& New(Graph* graph, OpIndex pop_count, + static ReturnOp& New(Graph* graph, V pop_count, base::Vector return_values) { return Base::New(graph, 1 + return_values.size(), pop_count, return_values); } @@ -4043,6 +4177,7 @@ struct ObjectIsOp : FixedArityOperationT<1, ObjectIsOp> { kReceiverOrNullOrUndefined, kSmi, kString, + kStringOrStringWrapper, kSymbol, kUndetectable, }; @@ -4086,6 +4221,7 @@ enum class NumericKind : uint8_t { kFinite, kInteger, kSafeInteger, + kSmi, kMinusZero, kNaN, }; @@ -4554,8 +4690,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<( std::ostream& os, TruncateJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind kind); -struct ConvertJSPrimitiveToObjectOp - : FixedArityOperationT<3, ConvertJSPrimitiveToObjectOp> { +struct ConvertJSPrimitiveToObjectOp : OperationT { ConvertReceiverMode mode; static constexpr OpEffects effects = OpEffects().CanCallAnything(); @@ -4569,17 +4704,39 @@ struct ConvertJSPrimitiveToObjectOp MaybeRegisterRepresentation::Tagged()>(); } - OpIndex value() const { return Base::input(0); } - OpIndex native_context() const { return Base::input(1); } - OpIndex global_proxy() const { return Base::input(2); } + V value() const { return Base::input(0); } + V native_context() const { return Base::input(1); } + OptionalV global_proxy() const { + return input_count > 2 ? Base::input(2) : OpIndex::Invalid(); + } - ConvertJSPrimitiveToObjectOp(OpIndex value, OpIndex native_context, - OpIndex global_proxy, ConvertReceiverMode mode) - : Base(value, native_context, global_proxy), mode(mode) {} + ConvertJSPrimitiveToObjectOp(V value, V native_context, + OptionalV global_proxy, + ConvertReceiverMode mode) + : Base(2 + global_proxy.valid()), mode(mode) { + input(0) = value; + input(1) = native_context; + if (global_proxy.valid()) { + input(2) = global_proxy.value(); + } + } - void Validate(const Graph& graph) const { + static ConvertJSPrimitiveToObjectOp& New(Graph* graph, V value, + V native_context, + OptionalV global_proxy, + ConvertReceiverMode mode) { + return Base::New(graph, 2 + global_proxy.valid(), value, native_context, + global_proxy, mode); + } + + template + V8_INLINE auto Explode(Fn fn, Mapper& mapper) const { + return fn(mapper.Map(value()), mapper.Map(native_context()), + mapper.Map(global_proxy()), mode); } + void Validate(const Graph& graph) const {} + auto options() const { return std::tuple{mode}; } }; @@ -5247,6 +5404,8 @@ inline constexpr RegisterRepresentation RegisterRepresentationForArrayType( case kExternalBigInt64Array: case kExternalBigUint64Array: return RegisterRepresentation::Word64(); + case kExternalFloat16Array: + UNIMPLEMENTED(); } } @@ -5479,8 +5638,7 @@ struct TransitionAndStoreArrayElementOp fast_map(fast_map), double_map(double_map) {} - void Validate(const Graph& graph) const { - } + void Validate(const Graph& graph) const {} RegisterRepresentation value_representation() const { switch (kind) { @@ -6257,7 +6415,7 @@ struct WasmTypeCheckOp : OperationT { static constexpr OpEffects effects = OpEffects().AssumesConsistentHeap(); - WasmTypeCheckOp(V object, OptionalV rtt, + WasmTypeCheckOp(V object, OptionalV rtt, WasmTypeCheckConfig config) : Base(1 + rtt.valid()), config(config) { input(0) = object; @@ -6271,8 +6429,8 @@ struct WasmTypeCheckOp : OperationT { return fn(mapper.Map(object()), mapper.Map(rtt()), config); } - V object() const { return Base::input(0); } - OptionalV rtt() const { + V object() const { return Base::input(0); } + OptionalV rtt() const { return input_count > 1 ? input(1) : OpIndex::Invalid(); } @@ -6292,8 +6450,8 @@ struct WasmTypeCheckOp : OperationT { auto options() const { return std::tuple{config}; } - static WasmTypeCheckOp& New(Graph* graph, V object, - OptionalV rtt, + static WasmTypeCheckOp& New(Graph* graph, V object, + OptionalV rtt, WasmTypeCheckConfig config) { return Base::New(graph, 1 + rtt.valid(), object, rtt, config); } @@ -6304,7 +6462,7 @@ struct WasmTypeCastOp : OperationT { static constexpr OpEffects effects = OpEffects().CanLeaveCurrentFunction(); - WasmTypeCastOp(V object, OptionalV rtt, + WasmTypeCastOp(V object, OptionalV rtt, WasmTypeCheckConfig config) : Base(1 + rtt.valid()), config(config) { input(0) = object; @@ -6318,8 +6476,8 @@ struct WasmTypeCastOp : OperationT { return fn(mapper.Map(object()), mapper.Map(rtt()), config); } - V object() const { return Base::input(0); } - OptionalV rtt() const { + V object() const { return Base::input(0); } + OptionalV rtt() const { return input_count > 1 ? input(1) : OpIndex::Invalid(); } @@ -6339,8 +6497,8 @@ struct WasmTypeCastOp : OperationT { auto options() const { return std::tuple{config}; } - static WasmTypeCastOp& New(Graph* graph, V object, - OptionalV rtt, + static WasmTypeCastOp& New(Graph* graph, V object, + OptionalV rtt, WasmTypeCheckConfig config) { return Base::New(graph, 1 + rtt.valid(), object, rtt, config); } @@ -6356,7 +6514,7 @@ struct WasmTypeAnnotationOp : FixedArityOperationT<1, WasmTypeAnnotationOp> { explicit WasmTypeAnnotationOp(OpIndex value, wasm::ValueType type) : Base(value), type(type) {} - V value() const { return Base::input(0); } + V value() const { return Base::input(0); } base::Vector outputs_rep() const { return RepVector(); @@ -6382,9 +6540,9 @@ struct AnyConvertExternOp : FixedArityOperationT<1, AnyConvertExternOp> { SmiValuesAre31Bits() ? OpEffects().CanReadMemory() : OpEffects().CanReadMemory().CanAllocate(); - explicit AnyConvertExternOp(V object) : Base(object) {} + explicit AnyConvertExternOp(V object) : Base(object) {} - V object() const { return Base::input(0); } + V object() const { return Base::input(0); } base::Vector outputs_rep() const { return RepVector(); @@ -6403,9 +6561,9 @@ struct AnyConvertExternOp : FixedArityOperationT<1, AnyConvertExternOp> { struct ExternConvertAnyOp : FixedArityOperationT<1, ExternConvertAnyOp> { static constexpr OpEffects effects = OpEffects(); - explicit ExternConvertAnyOp(V object) : Base(object) {} + explicit ExternConvertAnyOp(V object) : Base(object) {} - V object() const { return Base::input(0); } + V object() const { return Base::input(0); } base::Vector outputs_rep() const { return RepVector(); @@ -6548,8 +6706,7 @@ struct ArrayGetOp : FixedArityOperationT<2, ArrayGetOp> { MaybeRegisterRepresentation::Word32()>(); } - void Validate(const Graph& graph) const { - } + void Validate(const Graph& graph) const {} auto options() const { return std::tuple{array_type, is_signed}; } void PrintOptions(std::ostream& os) const; @@ -6678,7 +6835,7 @@ struct WasmRefFuncOp : FixedArityOperationT<1, WasmRefFuncOp> { static constexpr OpEffects effects = OpEffects().CanAllocate(); uint32_t function_index; - explicit WasmRefFuncOp(V wasm_instance, uint32_t function_index) + explicit WasmRefFuncOp(V wasm_instance, uint32_t function_index) : Base(wasm_instance), function_index(function_index) {} OpIndex instance() const { return Base::input(0); } @@ -6706,7 +6863,7 @@ struct StringAsWtf16Op : FixedArityOperationT<1, StringAsWtf16Op> { .CanDependOnChecks() .CanReadMemory(); - explicit StringAsWtf16Op(V string) : Base(string) {} + explicit StringAsWtf16Op(V string) : Base(string) {} OpIndex string() const { return input(0); } @@ -6732,7 +6889,7 @@ struct StringPrepareForGetCodeUnitOp // This should not float above a protective null/length check. .CanDependOnChecks(); - explicit StringPrepareForGetCodeUnitOp(V string) : Base(string) {} + explicit StringPrepareForGetCodeUnitOp(V string) : Base(string) {} OpIndex string() const { return input(0); } @@ -6993,15 +7150,18 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, V(I32x4RelaxedTruncF64x2SZero) \ V(I32x4RelaxedTruncF64x2UZero) -#define FOREACH_SIMD_128_UNARY_OPTIONAL_OPCODE(V) \ - V(F32x4Ceil) \ - V(F32x4Floor) \ - V(F32x4Trunc) \ - V(F32x4NearestInt) \ - V(F64x2Ceil) \ - V(F64x2Floor) \ - V(F64x2Trunc) \ - V(F64x2NearestInt) +#define FOREACH_SIMD_128_UNARY_OPTIONAL_OPCODE(V) \ + V(F32x4Ceil) \ + V(F32x4Floor) \ + V(F32x4Trunc) \ + V(F32x4NearestInt) \ + V(F64x2Ceil) \ + V(F64x2Floor) \ + V(F64x2Trunc) \ + V(F64x2NearestInt) \ + /* TODO(mliedtke): Rename to ReverseBytes once the naming is decoupled from \ + * Turbofan naming. */ \ + V(Simd128ReverseBytes) #define FOREACH_SIMD_128_UNARY_OPCODE(V) \ FOREACH_SIMD_128_UNARY_NON_OPTIONAL_OPCODE(V) \ @@ -7567,6 +7727,353 @@ struct Simd128ShuffleOp : FixedArityOperationT<2, Simd128ShuffleOp> { void PrintOptions(std::ostream& os) const; }; +#if V8_ENABLE_WASM_SIMD256_REVEC +struct Simd256Extract128LaneOp + : FixedArityOperationT<1, Simd256Extract128LaneOp> { + uint8_t lane; + + static constexpr OpEffects effects = OpEffects(); + + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + Simd256Extract128LaneOp(OpIndex input, uint8_t lane) + : Base(input), lane(lane) {} + + OpIndex input() const { return Base::input(0); } + + void Validate(const Graph& graph) const { +#if DEBUG + DCHECK_LT(lane, 2); +#endif + } + + auto options() const { return std::tuple{lane}; } +}; + +#define FOREACH_SIMD_256_LOAD_TRANSFORM_OPCODE(V) \ + V(8x16S) \ + V(8x16U) \ + V(16x8S) \ + V(16x8U) \ + V(32x4S) \ + V(32x4U) \ + V(8Splat) \ + V(16Splat) \ + V(32Splat) \ + V(64Splat) + +struct Simd256LoadTransformOp + : FixedArityOperationT<2, Simd256LoadTransformOp> { + using LoadKind = LoadOp::Kind; + enum class TransformKind : uint8_t { +#define DEFINE_KIND(kind) k##kind, + FOREACH_SIMD_256_LOAD_TRANSFORM_OPCODE(DEFINE_KIND) +#undef DEFINE_KIND + }; + + LoadKind load_kind; + TransformKind transform_kind; + int offset; + + OpEffects Effects() const { + OpEffects effects = OpEffects().CanReadMemory().CanDependOnChecks(); + if (load_kind.with_trap_handler) { + effects = effects.CanLeaveCurrentFunction(); + } + return effects; + } + + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + Simd256LoadTransformOp(V base, V index, LoadKind load_kind, + TransformKind transform_kind, int offset) + : Base(base, index), + load_kind(load_kind), + transform_kind(transform_kind), + offset(offset) {} + + V base() const { return input(0); } + V index() const { return input(1); } + + void Validate(const Graph& graph) { DCHECK(!load_kind.tagged_base); } + + auto options() const { return std::tuple{load_kind, transform_kind, offset}; } + void PrintOptions(std::ostream& os) const; +}; + +#define FOREACH_SIMD_256_UNARY_OPCODE(V) \ + V(S256Not) \ + V(I8x32Abs) \ + V(I8x32Neg) \ + V(I16x16ExtAddPairwiseI8x32S) \ + V(I16x16ExtAddPairwiseI8x32U) \ + V(I32x8ExtAddPairwiseI16x16S) \ + V(I32x8ExtAddPairwiseI16x16U) \ + V(I16x16Abs) \ + V(I16x16Neg) \ + V(I32x8Abs) \ + V(I32x8Neg) \ + V(F32x8Abs) \ + V(F32x8Neg) \ + V(F32x8Sqrt) \ + V(F64x4Sqrt) \ + V(I32x8UConvertF32x8) \ + V(F32x8UConvertI32x8) + +struct Simd256UnaryOp : FixedArityOperationT<1, Simd256UnaryOp> { + enum class Kind : uint8_t { +#define DEFINE_KIND(kind) k##kind, + FOREACH_SIMD_256_UNARY_OPCODE(DEFINE_KIND) +#undef DEFINE_KIND + }; + + Kind kind; + + static constexpr OpEffects effects = OpEffects(); + + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + Simd256UnaryOp(OpIndex input, Kind kind) : Base(input), kind(kind) {} + + OpIndex input() const { return Base::input(0); } + + void Validate(const Graph& graph) const {} + + auto options() const { return std::tuple{kind}; } +}; +std::ostream& operator<<(std::ostream& os, Simd256UnaryOp::Kind kind); + +#define FOREACH_SIMD_256_BINARY_BASIC_OPCODE(V) \ + V(I8x32Eq) \ + V(I8x32Ne) \ + V(I8x32GtS) \ + V(I8x32GtU) \ + V(I8x32GeS) \ + V(I8x32GeU) \ + V(I16x16Eq) \ + V(I16x16Ne) \ + V(I16x16GtS) \ + V(I16x16GtU) \ + V(I16x16GeS) \ + V(I16x16GeU) \ + V(I32x8Eq) \ + V(I32x8Ne) \ + V(I32x8GtS) \ + V(I32x8GtU) \ + V(I32x8GeS) \ + V(I32x8GeU) \ + V(F32x8Eq) \ + V(F32x8Ne) \ + V(F32x8Lt) \ + V(F32x8Le) \ + V(F64x4Eq) \ + V(F64x4Ne) \ + V(F64x4Lt) \ + V(F64x4Le) \ + V(S256And) \ + V(S256AndNot) \ + V(S256Or) \ + V(S256Xor) \ + V(I8x32SConvertI16x16) \ + V(I8x32UConvertI16x16) \ + V(I8x32Add) \ + V(I8x32AddSatS) \ + V(I8x32AddSatU) \ + V(I8x32Sub) \ + V(I8x32SubSatS) \ + V(I8x32SubSatU) \ + V(I8x32MinS) \ + V(I8x32MinU) \ + V(I8x32MaxS) \ + V(I8x32MaxU) \ + V(I8x32RoundingAverageU) \ + V(I16x16SConvertI32x8) \ + V(I16x16UConvertI32x8) \ + V(I16x16Add) \ + V(I16x16AddSatS) \ + V(I16x16AddSatU) \ + V(I16x16Sub) \ + V(I16x16SubSatS) \ + V(I16x16SubSatU) \ + V(I16x16Mul) \ + V(I16x16MinS) \ + V(I16x16MinU) \ + V(I16x16MaxS) \ + V(I16x16MaxU) \ + V(I16x16RoundingAverageU) \ + V(I32x8Add) \ + V(I32x8Sub) \ + V(I32x8Mul) \ + V(I32x8MinS) \ + V(I32x8MinU) \ + V(I32x8MaxS) \ + V(I32x8MaxU) \ + V(I32x8DotI16x16S) \ + V(I64x4Add) \ + V(I64x4Sub) \ + V(I64x4Mul) \ + V(I64x4Eq) \ + V(I64x4Ne) \ + V(I64x4GtS) \ + V(I64x4GeS) \ + V(F32x8Add) \ + V(F32x8Sub) \ + V(F32x8Mul) \ + V(F32x8Div) \ + V(F32x8Min) \ + V(F32x8Max) \ + V(F32x8Pmin) \ + V(F32x8Pmax) \ + V(F64x4Add) \ + V(F64x4Sub) \ + V(F64x4Mul) \ + V(F64x4Div) \ + V(F64x4Min) \ + V(F64x4Max) \ + V(F64x4Pmin) \ + V(F64x4Pmax) + +#define FOREACH_SIMD_256_BINARY_OPCODE(V) \ + FOREACH_SIMD_256_BINARY_BASIC_OPCODE(V) + +struct Simd256BinopOp : FixedArityOperationT<2, Simd256BinopOp> { + enum class Kind : uint8_t { +#define DEFINE_KIND(kind) k##kind, + FOREACH_SIMD_256_BINARY_OPCODE(DEFINE_KIND) +#undef DEFINE_KIND + }; + + Kind kind; + + static constexpr OpEffects effects = OpEffects(); + + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + Simd256BinopOp(OpIndex left, OpIndex right, Kind kind) + : Base(left, right), kind(kind) {} + + OpIndex left() const { return input(0); } + OpIndex right() const { return input(1); } + + void Validate(const Graph& graph) const {} + + auto options() const { return std::tuple{kind}; } +}; + +std::ostream& operator<<(std::ostream& os, Simd256BinopOp::Kind kind); + +#define FOREACH_SIMD_256_SHIFT_OPCODE(V) \ + V(I16x16Shl) \ + V(I16x16ShrS) \ + V(I16x16ShrU) \ + V(I32x8Shl) \ + V(I32x8ShrS) \ + V(I32x8ShrU) \ + V(I64x4Shl) \ + V(I64x4ShrU) + +struct Simd256ShiftOp : FixedArityOperationT<2, Simd256ShiftOp> { + enum class Kind : uint8_t { +#define DEFINE_KIND(kind) k##kind, + FOREACH_SIMD_256_SHIFT_OPCODE(DEFINE_KIND) +#undef DEFINE_KIND + }; + + Kind kind; + + static constexpr OpEffects effects = OpEffects(); + + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + Simd256ShiftOp(V input, V shift, Kind kind) + : Base(input, shift), kind(kind) {} + + V input() const { return Base::input(0); } + V shift() const { return Base::input(1); } + + void Validate(const Graph& graph) const {} + + auto options() const { return std::tuple{kind}; } +}; +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, + Simd256ShiftOp::Kind kind); + +#define FOREACH_SIMD_256_TERNARY_OPCODE(V) V(S256Select) +struct Simd256TernaryOp : FixedArityOperationT<3, Simd256TernaryOp> { + enum class Kind : uint8_t { +#define DEFINE_KIND(kind) k##kind, + FOREACH_SIMD_256_TERNARY_OPCODE(DEFINE_KIND) +#undef DEFINE_KIND + }; + + Kind kind; + + static constexpr OpEffects effects = OpEffects(); + + base::Vector outputs_rep() const { + return RepVector(); + } + + base::Vector inputs_rep( + ZoneVector& storage) const { + return MaybeRepVector(); + } + + Simd256TernaryOp(V first, V second, V third, + Kind kind) + : Base(first, second, third), kind(kind) {} + + V first() const { return input(0); } + V second() const { return input(1); } + V third() const { return input(2); } + + void Validate(const Graph& graph) const {} + + auto options() const { return std::tuple{kind}; } +}; +std::ostream& operator<<(std::ostream& os, Simd256TernaryOp::Kind kind); +#endif // V8_ENABLE_WASM_SIMD256_REVEC + struct LoadStackPointerOp : FixedArityOperationT<0, LoadStackPointerOp> { // TODO(nicohartmann@): Review effects. static constexpr OpEffects effects = OpEffects().CanReadMemory(); @@ -7681,6 +8188,10 @@ inline OpEffects Operation::Effects() const { return Cast().Effects(); case Opcode::kSimd128LoadTransform: return Cast().Effects(); +#if V8_ENABLE_WASM_SIMD256_REVEC + case Opcode::kSimd256LoadTransform: + return Cast().Effects(); +#endif // V8_ENABLE_WASM_SIMD256_REVEC #endif default: UNREACHABLE(); @@ -7738,15 +8249,66 @@ V8_EXPORT_PRIVATE V8_INLINE bool ShouldSkipOperation(const Operation& op) { } namespace detail { -// Computes the number of inputs of an operation, ignoring non-OpIndex inputs -// (which are always inlined in the operation) and `base::Vector` -// inputs. -template +// Defining `input_count` to compute the number of OpIndex inputs of an +// operation. + +// There is one overload for each possible type of parameters for all +// Operations rather than a default generic overload, so that we don't +// accidentally forget some types (eg, if a new Operation takes its inputs as a +// std::vector, we shouldn't count this as "0 inputs because it's +// neither raw OpIndex nor base::Vector", which a generic overload +// might do). + +// Base case +constexpr size_t input_count() { return 0; } + +// All parameters that are not OpIndex and should thus not count towards the +// "input_count" of the operations. +template || + std::is_integral_v || + std::is_floating_point_v>> constexpr size_t input_count(T) { return 0; } -constexpr size_t input_count() { return 0; } +template +constexpr size_t input_count(const MaybeHandle) { + return 0; +} +template +constexpr size_t input_count(const Handle) { + return 0; +} +template +constexpr size_t input_count(const base::Flags) { + return 0; +} +constexpr size_t input_count(const Block*) { return 0; } +constexpr size_t input_count(const TSCallDescriptor*) { return 0; } +constexpr size_t input_count(const char*) { return 0; } +constexpr size_t input_count(const DeoptimizeParameters*) { return 0; } +constexpr size_t input_count(const FastApiCallParameters*) { return 0; } +constexpr size_t input_count(const FrameStateData*) { return 0; } +constexpr size_t input_count(const base::Vector) { return 0; } +constexpr size_t input_count(LoadOp::Kind) { return 0; } +constexpr size_t input_count(RegisterRepresentation) { return 0; } +constexpr size_t input_count(MemoryRepresentation) { return 0; } +constexpr size_t input_count(OpEffects) { return 0; } +inline size_t input_count(const ElementsTransition) { return 0; } +inline size_t input_count(const FeedbackSource) { return 0; } +inline size_t input_count(const ZoneRefSet) { return 0; } +inline size_t input_count(ConstantOp::Storage) { return 0; } +inline size_t input_count(Type) { return 0; } +#ifdef V8_ENABLE_WEBASSEMBLY +constexpr size_t input_count(const wasm::WasmGlobal*) { return 0; } +constexpr size_t input_count(const wasm::StructType*) { return 0; } +constexpr size_t input_count(const wasm::ArrayType*) { return 0; } +constexpr size_t input_count(wasm::ValueType) { return 0; } +constexpr size_t input_count(WasmTypeCheckConfig) { return 0; } +#endif + +// All parameters that are OpIndex-like (ie, OpIndex, and OpIndex containers) constexpr size_t input_count(OpIndex) { return 1; } +constexpr size_t input_count(OptionalOpIndex) { return 1; } constexpr size_t input_count(base::Vector inputs) { return inputs.size(); } @@ -7755,10 +8317,16 @@ constexpr size_t input_count(base::Vector inputs) { template Op* CreateOperation(base::SmallVector& storage, Args... args) { - size_t size = Operation::StorageSlotCount( - Op::opcode, (0 + ... + detail::input_count(args))); + size_t input_count = (0 + ... + detail::input_count(args)); + size_t size = Operation::StorageSlotCount(Op::opcode, input_count); storage.resize_no_init(size); - return new (storage.data()) Op(args...); + Op* op = new (storage.data()) Op(args...); + // Checking that the {input_count} we computed is at least the actual + // input_count of the operation. {input_count} could be greater in the case of + // OptionalOpIndex: they count for 1 input when computing {input_count} here, + // but in Operations, they only count for 1 input when they are valid. + DCHECK_GE(input_count, op->input_count); + return op; } } // namespace v8::internal::compiler::turboshaft diff --git a/deps/v8/src/compiler/turboshaft/opmasks.h b/deps/v8/src/compiler/turboshaft/opmasks.h index 425fcbad36fe50..087e46ce9bb8e9 100644 --- a/deps/v8/src/compiler/turboshaft/opmasks.h +++ b/deps/v8/src/compiler/turboshaft/opmasks.h @@ -173,6 +173,10 @@ using kWord64Mul = WordBinopMask::For; using kWord64BitwiseAnd = WordBinopMask::For; +using kWord64BitwiseOr = WordBinopMask::For; +using kWord64BitwiseXor = WordBinopMask::For; using kBitwiseAnd = WordBinopKindMask::For; using kBitwiseXor = WordBinopKindMask::For; diff --git a/deps/v8/src/compiler/turboshaft/phase.cc b/deps/v8/src/compiler/turboshaft/phase.cc index 2c5b56870e5e07..3d2c20f3ffdd24 100644 --- a/deps/v8/src/compiler/turboshaft/phase.cc +++ b/deps/v8/src/compiler/turboshaft/phase.cc @@ -9,6 +9,7 @@ #include "src/compiler/js-heap-broker.h" #include "src/compiler/turboshaft/graph-visualizer.h" #include "src/diagnostics/code-tracer.h" +#include "src/utils/ostreams.h" namespace v8::internal::compiler::turboshaft { @@ -64,6 +65,14 @@ void PrintTurboshaftGraphForTurbolizer(std::ofstream& stream, } return false; }); + PrintTurboshaftCustomDataPerOperation( + stream, "Representations", graph, + [](std::ostream& stream, const turboshaft::Graph& graph, + turboshaft::OpIndex index) -> bool { + const Operation& op = graph.Get(index); + stream << PrintCollection(op.outputs_rep()); + return true; + }); PrintTurboshaftCustomDataPerOperation( stream, "Use Count (saturated)", graph, [](std::ostream& stream, const turboshaft::Graph& graph, diff --git a/deps/v8/src/compiler/turboshaft/recreate-schedule.cc b/deps/v8/src/compiler/turboshaft/recreate-schedule.cc index a42c98e8be3ce9..89a3b55b1c9827 100644 --- a/deps/v8/src/compiler/turboshaft/recreate-schedule.cc +++ b/deps/v8/src/compiler/turboshaft/recreate-schedule.cc @@ -925,6 +925,7 @@ Node* ScheduleBuilder::ProcessOperation(const SelectOp& op) { case RegisterRepresentation::Enum::kTagged: case RegisterRepresentation::Enum::kCompressed: case RegisterRepresentation::Enum::kSimd128: + case RegisterRepresentation::Enum::kSimd256: UNREACHABLE(); } @@ -1815,6 +1816,76 @@ Node* ScheduleBuilder::ProcessOperation(const Simd128ShuffleOp& op) { {GetNode(op.left()), GetNode(op.right())}); } +#if V8_ENABLE_WASM_SIMD256_REVEC +Node* ScheduleBuilder::ProcessOperation(const Simd256Extract128LaneOp& op) { + const Operator* o = machine.ExtractF128(op.lane); + return AddNode(o, {GetNode(op.input())}); +} + +Node* ScheduleBuilder::ProcessOperation(const Simd256LoadTransformOp& op) { + DCHECK_EQ(op.offset, 0); + MemoryAccessKind access = + op.load_kind.with_trap_handler ? MemoryAccessKind::kProtected + : op.load_kind.maybe_unaligned ? MemoryAccessKind::kUnaligned + : MemoryAccessKind::kNormal; + LoadTransformation transformation; + switch (op.transform_kind) { +#define HANDLE_KIND(kind) \ + case Simd256LoadTransformOp::TransformKind::k##kind: \ + transformation = LoadTransformation::kS256Load##kind; \ + break; + FOREACH_SIMD_256_LOAD_TRANSFORM_OPCODE(HANDLE_KIND) +#undef HANDLE_KIND + } + + const Operator* o = machine.LoadTransform(access, transformation); + + return AddNode(o, {GetNode(op.base()), GetNode(op.index())}); +} + +Node* ScheduleBuilder::ProcessOperation(const Simd256UnaryOp& op) { + switch (op.kind) { +#define HANDLE_KIND(kind) \ + case Simd256UnaryOp::Kind::k##kind: \ + return AddNode(machine.kind(), {GetNode(op.input())}); + FOREACH_SIMD_256_UNARY_OPCODE(HANDLE_KIND); +#undef HANDLE_KIND + } +} + +Node* ScheduleBuilder::ProcessOperation(const Simd256BinopOp& op) { + switch (op.kind) { +#define HANDLE_KIND(kind) \ + case Simd256BinopOp::Kind::k##kind: \ + return AddNode(machine.kind(), {GetNode(op.left()), GetNode(op.right())}); + FOREACH_SIMD_256_BINARY_OPCODE(HANDLE_KIND); +#undef HANDLE_KIND + } +} + +Node* ScheduleBuilder::ProcessOperation(const Simd256ShiftOp& op) { + switch (op.kind) { +#define HANDLE_KIND(kind) \ + case Simd256ShiftOp::Kind::k##kind: \ + return AddNode(machine.kind(), {GetNode(op.input()), GetNode(op.shift())}); + FOREACH_SIMD_256_SHIFT_OPCODE(HANDLE_KIND); +#undef HANDLE_KIND + } +} + +Node* ScheduleBuilder::ProcessOperation(const Simd256TernaryOp& op) { + switch (op.kind) { +#define HANDLE_KIND(kind) \ + case Simd256TernaryOp::Kind::k##kind: \ + return AddNode(machine.kind(), {GetNode(op.first()), GetNode(op.second()), \ + GetNode(op.third())}); + FOREACH_SIMD_256_TERNARY_OPCODE(HANDLE_KIND); +#undef HANDLE_KIND + } +} + +#endif // V8_ENABLE_WASM_SIMD256_REVEC + Node* ScheduleBuilder::ProcessOperation(const LoadStackPointerOp& op) { return AddNode(machine.LoadStackPointer(), {}); } diff --git a/deps/v8/src/compiler/turboshaft/representations.cc b/deps/v8/src/compiler/turboshaft/representations.cc index d1ee3be369cd0e..63118c8eacdfd8 100644 --- a/deps/v8/src/compiler/turboshaft/representations.cc +++ b/deps/v8/src/compiler/turboshaft/representations.cc @@ -22,6 +22,8 @@ std::ostream& operator<<(std::ostream& os, MaybeRegisterRepresentation rep) { return os << "Compressed"; case MaybeRegisterRepresentation::Simd128(): return os << "Simd128"; + case MaybeRegisterRepresentation::Simd256(): + return os << "Simd256"; case MaybeRegisterRepresentation::None(): return os << "None"; } @@ -61,6 +63,8 @@ std::ostream& operator<<(std::ostream& os, MemoryRepresentation rep) { return os << "SandboxedPointer"; case MemoryRepresentation::Simd128(): return os << "Simd128"; + case MemoryRepresentation::Simd256(): + return os << "Simd256"; } } } // namespace v8::internal::compiler::turboshaft diff --git a/deps/v8/src/compiler/turboshaft/representations.h b/deps/v8/src/compiler/turboshaft/representations.h index 3f13a08021a18e..e970b83a3f6c2b 100644 --- a/deps/v8/src/compiler/turboshaft/representations.h +++ b/deps/v8/src/compiler/turboshaft/representations.h @@ -29,6 +29,7 @@ class MaybeRegisterRepresentation { kTagged, kCompressed, kSimd128, + kSimd256, kNone, // No register representation. }; @@ -81,6 +82,10 @@ class MaybeRegisterRepresentation { return MaybeRegisterRepresentation(Enum::kSimd128); } + static constexpr MaybeRegisterRepresentation Simd256() { + return MaybeRegisterRepresentation(Enum::kSimd256); + } + static constexpr MaybeRegisterRepresentation None() { return MaybeRegisterRepresentation(Enum::kNone); } @@ -95,6 +100,7 @@ class MaybeRegisterRepresentation { case Enum::kTagged: case Enum::kCompressed: case Enum::kSimd128: + case Enum::kSimd256: case Enum::kNone: return false; } @@ -110,6 +116,7 @@ class MaybeRegisterRepresentation { case Enum::kTagged: case Enum::kCompressed: case Enum::kSimd128: + case Enum::kSimd256: case Enum::kNone: return false; } @@ -125,6 +132,7 @@ class MaybeRegisterRepresentation { case Enum::kFloat32: case Enum::kFloat64: case Enum::kSimd128: + case Enum::kSimd256: case Enum::kNone: return false; } @@ -141,6 +149,7 @@ class MaybeRegisterRepresentation { case Enum::kTagged: case Enum::kCompressed: case Enum::kSimd128: + case Enum::kSimd256: case Enum::kNone: UNREACHABLE(); } @@ -162,6 +171,8 @@ class MaybeRegisterRepresentation { return MachineRepresentation::kCompressed; case Simd128(): return MachineRepresentation::kSimd128; + case Simd256(): + return MachineRepresentation::kSimd256; case None(): UNREACHABLE(); } @@ -183,6 +194,8 @@ class MaybeRegisterRepresentation { return kSystemPointerSize; case Simd128(): return 128; + case Simd256(): + return 256; case None(): UNREACHABLE(); } @@ -205,6 +218,7 @@ class RegisterRepresentation : public MaybeRegisterRepresentation { kCompressed = static_cast(MaybeRegisterRepresentation::Enum::kCompressed), kSimd128 = static_cast(MaybeRegisterRepresentation::Enum::kSimd128), + kSimd256 = static_cast(MaybeRegisterRepresentation::Enum::kSimd256), }; explicit constexpr RegisterRepresentation(Enum value) @@ -251,6 +265,9 @@ class RegisterRepresentation : public MaybeRegisterRepresentation { static constexpr RegisterRepresentation Simd128() { return RegisterRepresentation(Enum::kSimd128); } + static constexpr RegisterRepresentation Simd256() { + return RegisterRepresentation(Enum::kSimd256); + } static RegisterRepresentation FromMachineRepresentation( MachineRepresentation rep) { @@ -275,11 +292,47 @@ class RegisterRepresentation : public MaybeRegisterRepresentation { return Float64(); case MachineRepresentation::kSimd128: return Simd128(); + case MachineRepresentation::kSimd256: + return Simd256(); case MachineRepresentation::kMapWord: case MachineRepresentation::kIndirectPointer: case MachineRepresentation::kSandboxedPointer: case MachineRepresentation::kNone: + UNREACHABLE(); + } + } + + static constexpr RegisterRepresentation FromMachineType(MachineType type) { + switch (type.representation()) { + case MachineRepresentation::kBit: + case MachineRepresentation::kWord8: + case MachineRepresentation::kWord16: + case MachineRepresentation::kWord32: + return RegisterRepresentation::Word32(); + case MachineRepresentation::kWord64: + return RegisterRepresentation::Word64(); + case MachineRepresentation::kTagged: + case MachineRepresentation::kTaggedSigned: + case MachineRepresentation::kTaggedPointer: + return RegisterRepresentation::Tagged(); + case MachineRepresentation::kMapWord: + // Turboshaft does not support map packing. + DCHECK(!V8_MAP_PACKING_BOOL); + return RegisterRepresentation::Tagged(); + case MachineRepresentation::kFloat32: + return RegisterRepresentation::Float32(); + case MachineRepresentation::kFloat64: + return RegisterRepresentation::Float64(); + case MachineRepresentation::kIndirectPointer: + case MachineRepresentation::kSandboxedPointer: + return RegisterRepresentation::WordPtr(); + case MachineRepresentation::kSimd128: + return RegisterRepresentation::Simd128(); case MachineRepresentation::kSimd256: + return RegisterRepresentation::Simd256(); + case MachineRepresentation::kNone: + case MachineRepresentation::kCompressedPointer: + case MachineRepresentation::kCompressed: UNREACHABLE(); } } @@ -477,6 +530,7 @@ class MemoryRepresentation { kIndirectPointer, kSandboxedPointer, kSimd128, + kSimd256 }; explicit constexpr MemoryRepresentation(Enum value) : value_(value) {} @@ -545,6 +599,9 @@ class MemoryRepresentation { static constexpr MemoryRepresentation Simd128() { return MemoryRepresentation(Enum::kSimd128); } + static constexpr MemoryRepresentation Simd256() { + return MemoryRepresentation(Enum::kSimd256); + } bool IsWord() const { switch (*this) { @@ -565,6 +622,7 @@ class MemoryRepresentation { case IndirectPointer(): case SandboxedPointer(): case Simd128(): + case Simd256(): return false; } } @@ -589,6 +647,7 @@ class MemoryRepresentation { case IndirectPointer(): case SandboxedPointer(): case Simd128(): + case Simd256(): UNREACHABLE(); } } @@ -612,6 +671,7 @@ class MemoryRepresentation { case IndirectPointer(): case SandboxedPointer(): case Simd128(): + case Simd256(): return false; } } @@ -635,6 +695,7 @@ class MemoryRepresentation { case IndirectPointer(): case SandboxedPointer(): case Simd128(): + case Simd256(): return false; } } @@ -665,6 +726,8 @@ class MemoryRepresentation { return RegisterRepresentation::Word64(); case Simd128(): return RegisterRepresentation::Simd128(); + case Simd256(): + return RegisterRepresentation::Simd256(); } } @@ -683,6 +746,8 @@ class MemoryRepresentation { return AnyTagged(); case RegisterRepresentation::Simd128(): return Simd128(); + case RegisterRepresentation::Simd256(): + return Simd256(); case RegisterRepresentation::Compressed(): UNREACHABLE(); } @@ -735,6 +800,8 @@ class MemoryRepresentation { return MachineType::SandboxedPointer(); case Simd128(): return MachineType::Simd128(); + case Simd256(): + return MachineType::Simd256(); } } @@ -768,9 +835,10 @@ class MemoryRepresentation { return SandboxedPointer(); case MachineRepresentation::kSimd128: return Simd128(); + case MachineRepresentation::kSimd256: + return Simd256(); case MachineRepresentation::kNone: case MachineRepresentation::kBit: - case MachineRepresentation::kSimd256: case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressed: UNREACHABLE(); @@ -802,10 +870,11 @@ class MemoryRepresentation { return SandboxedPointer(); case MachineRepresentation::kSimd128: return Simd128(); + case MachineRepresentation::kSimd256: + return Simd256(); case MachineRepresentation::kNone: case MachineRepresentation::kMapWord: case MachineRepresentation::kBit: - case MachineRepresentation::kSimd256: case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressed: case MachineRepresentation::kIndirectPointer: @@ -841,6 +910,8 @@ class MemoryRepresentation { return kTaggedSizeLog2; case Simd128(): return 4; + case Simd256(): + return 5; } } diff --git a/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h index 2f03030bd525fe..6b95d9d1754416 100644 --- a/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h @@ -49,11 +49,9 @@ class SelectLoweringReducer : public Next { Variable result = __ NewLoopInvariantVariable(rep); IF (cond) { __ SetVariable(result, vtrue); - } - ELSE { + } ELSE { __ SetVariable(result, vfalse); } - END_IF return __ GetVariable(result); } diff --git a/deps/v8/src/compiler/turboshaft/simplified-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/simplified-lowering-reducer.h index 5c946e67e7fa3d..556c0d23484f9b 100644 --- a/deps/v8/src/compiler/turboshaft/simplified-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/simplified-lowering-reducer.h @@ -32,10 +32,10 @@ class SimplifiedLoweringReducer : public Next { OpIndex ig_index, const SpeculativeNumberBinopOp& op) { DCHECK_EQ(op.kind, SpeculativeNumberBinopOp::Kind::kSafeIntegerAdd); - OpIndex frame_state = MapImpl(op.frame_state()); - V left = ProcessInput(MapImpl(op.left()), Rep::Word32(), + OpIndex frame_state = Map(op.frame_state()); + V left = ProcessInput(Map(op.left()), Rep::Word32(), CheckKind::kSigned32, frame_state); - V right = ProcessInput(MapImpl(op.right()), Rep::Word32(), + V right = ProcessInput(Map(op.right()), Rep::Word32(), CheckKind::kSigned32, frame_state); V result = __ OverflowCheckedBinop( @@ -43,7 +43,7 @@ class SimplifiedLoweringReducer : public Next { WordRepresentation::Word32()); V overflow = __ Projection(result, 1, Rep::Word32()); - __ DeoptimizeIf(overflow, MapImpl(op.frame_state()), + __ DeoptimizeIf(overflow, Map(op.frame_state()), DeoptimizeReason::kOverflow, FeedbackSource{}); return __ Projection(result, 0, Rep::Word32()); } @@ -52,10 +52,10 @@ class SimplifiedLoweringReducer : public Next { base::SmallVector return_values; for (OpIndex input : ret.return_values()) { return_values.push_back( - ProcessInput(MapImpl(input), Rep::Tagged(), CheckKind::kNone, {})); + ProcessInput(Map(input), Rep::Tagged(), CheckKind::kNone, {})); } - __ Return(MapImpl(ret.pop_count()), base::VectorOf(return_values)); + __ Return(Map(ret.pop_count()), base::VectorOf(return_values)); return OpIndex::Invalid(); } @@ -94,7 +94,7 @@ class SimplifiedLoweringReducer : public Next { } } - inline OpIndex MapImpl(OpIndex ig_index) { return __ MapToNewGraph(ig_index); } + inline OpIndex Map(OpIndex ig_index) { return __ MapToNewGraph(ig_index); } }; #include "src/compiler/turboshaft/undef-assembler-macros.inc" diff --git a/deps/v8/src/compiler/turboshaft/stack-check-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/stack-check-lowering-reducer.h index 9c5f28b17efaa4..48636afa385d06 100644 --- a/deps/v8/src/compiler/turboshaft/stack-check-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/stack-check-lowering-reducer.h @@ -75,7 +75,7 @@ class StackCheckLoweringReducer : public Next { } #endif // V8_ENABLE_WEBASSEMBLY } - END_IF + return OpIndex::Invalid(); } diff --git a/deps/v8/src/compiler/turboshaft/store-store-elimination-reducer.h b/deps/v8/src/compiler/turboshaft/store-store-elimination-reducer.h index 9f28266bc07b69..201da0d70b6371 100644 --- a/deps/v8/src/compiler/turboshaft/store-store-elimination-reducer.h +++ b/deps/v8/src/compiler/turboshaft/store-store-elimination-reducer.h @@ -14,8 +14,8 @@ namespace v8::internal::compiler::turboshaft { -// StoreStoreEliminationReducer tries to identify and remove redundant stores. -// E.g. for an input like +// 1. StoreStoreEliminationReducer tries to identify and remove redundant +// stores. E.g. for an input like // // let o = {}; // o.x = 2; @@ -60,6 +60,14 @@ namespace v8::internal::compiler::turboshaft { // loop header, we revisit the loop if the resulting state has changed until we // reach a fixpoint. // +// +// 2. StoreStoreEliminationReducer tries to merge 2 continuous 32-bits stores +// into a 64-bits one. +// When v8 create a new js object, it will initialize it's in object fields to +// some constant value after allocation, like `undefined`. When pointer +// compression is enabled, they are continuous 32-bits stores, and the store +// values are usually constants (heap object). This reducer will try to merge 2 +// continuous 32-bits stores into a 64-bits one. #include "src/compiler/turboshaft/define-assembler-macros.inc" @@ -270,8 +278,10 @@ class RedundantStoreAnalysis { RedundantStoreAnalysis(const Graph& graph, Zone* phase_zone) : graph_(graph), table_(graph, phase_zone) {} - void Run(ZoneSet& eliminable_stores) { + void Run(ZoneSet& eliminable_stores, + ZoneMap& mergeable_store_pairs) { eliminable_stores_ = &eliminable_stores; + mergeable_store_pairs_ = &mergeable_store_pairs; for (uint32_t processed = graph_.block_count(); processed > 0; --processed) { BlockIndex block_index = static_cast(processed - 1); @@ -293,6 +303,7 @@ class RedundantStoreAnalysis { } } eliminable_stores_ = nullptr; + mergeable_store_pairs_ = nullptr; } void ProcessBlock(const Block& block) { @@ -314,9 +325,12 @@ class RedundantStoreAnalysis { const uint8_t size = store.stored_rep.SizeInBytes(); // For now we consider only stores of fields of objects on the heap. if (is_on_heap_store && is_field_store) { + bool is_eliminable_store = false; switch (table_.GetObservability(store.base(), store.offset, size)) { case StoreObservability::kUnobservable: eliminable_stores_->insert(index); + last_field_initialization_store_ = OpIndex::Invalid(); + is_eliminable_store = true; break; case StoreObservability::kGCObservable: if (store.maybe_initializing_or_transitioning) { @@ -326,6 +340,8 @@ class RedundantStoreAnalysis { size); } else { eliminable_stores_->insert(index); + last_field_initialization_store_ = OpIndex::Invalid(); + is_eliminable_store = true; } break; case StoreObservability::kObservable: @@ -335,6 +351,43 @@ class RedundantStoreAnalysis { size); break; } + + if (COMPRESS_POINTERS_BOOL && !is_eliminable_store && + store.maybe_initializing_or_transitioning && + store.kind == StoreOp::Kind::TaggedBase() && + store.write_barrier == WriteBarrierKind::kNoWriteBarrier && + store.stored_rep.IsTagged()) { + if (last_field_initialization_store_.valid() && + graph_.NextIndex(index) == last_field_initialization_store_) { + const StoreOp& store0 = store; + const StoreOp& store1 = + graph_.Get(last_field_initialization_store_) + .Cast(); + + DCHECK(!store0.index().valid()); + DCHECK(!store1.index().valid()); + + const ConstantOp* c0 = + graph_.Get(store0.value()).TryCast(); + const ConstantOp* c1 = + graph_.Get(store1.value()).TryCast(); + + if (c0 && c1 && c0->kind == ConstantOp::Kind::kHeapObject && + c1->kind == ConstantOp::Kind::kHeapObject && + store1.offset - store0.offset == 4) { + uint32_t high = static_cast(c1->handle()->ptr()); + uint32_t low = static_cast(c0->handle()->ptr()); + mergeable_store_pairs_->insert( + {index, make_uint64(high, low)}); + + eliminable_stores_->insert(last_field_initialization_store_); + last_field_initialization_store_ = OpIndex::Invalid(); + } + + } else { + last_field_initialization_store_ = index; + } + } } break; } @@ -367,6 +420,9 @@ class RedundantStoreAnalysis { const Graph& graph_; MaybeRedundantStoresTable table_; ZoneSet* eliminable_stores_ = nullptr; + + ZoneMap* mergeable_store_pairs_ = nullptr; + OpIndex last_field_initialization_store_ = OpIndex::Invalid(); }; template @@ -375,13 +431,20 @@ class StoreStoreEliminationReducer : public Next { TURBOSHAFT_REDUCER_BOILERPLATE(StoreStoreElimination) void Analyze() { - analysis_.Run(eliminable_stores_); + analysis_.Run(eliminable_stores_, mergeable_store_pairs_); Next::Analyze(); } OpIndex REDUCE_INPUT_GRAPH(Store)(OpIndex ig_index, const StoreOp& store) { if (eliminable_stores_.count(ig_index) > 0) { return OpIndex::Invalid(); + } else if (mergeable_store_pairs_.count(ig_index) > 0) { + DCHECK(COMPRESS_POINTERS_BOOL); + OpIndex value = __ Word64Constant(mergeable_store_pairs_[ig_index]); + __ Store(__ MapToNewGraph(store.base()), value, + StoreOp::Kind::TaggedBase(), MemoryRepresentation::Uint64(), + WriteBarrierKind::kNoWriteBarrier, store.offset); + return OpIndex::Invalid(); } return Next::ReduceInputGraphStore(ig_index, store); } @@ -389,6 +452,7 @@ class StoreStoreEliminationReducer : public Next { private: RedundantStoreAnalysis analysis_{Asm().input_graph(), Asm().phase_zone()}; ZoneSet eliminable_stores_{Asm().phase_zone()}; + ZoneMap mergeable_store_pairs_{Asm().phase_zone()}; }; #include "src/compiler/turboshaft/undef-assembler-macros.inc" diff --git a/deps/v8/src/compiler/turboshaft/typer.h b/deps/v8/src/compiler/turboshaft/typer.h index 444767e6768276..8483dea0d0c34e 100644 --- a/deps/v8/src/compiler/turboshaft/typer.h +++ b/deps/v8/src/compiler/turboshaft/typer.h @@ -1156,6 +1156,7 @@ class Typer { case RegisterRepresentation::Tagged(): case RegisterRepresentation::Compressed(): case RegisterRepresentation::Simd128(): + case RegisterRepresentation::Simd256(): // TODO(nicohartmann@): Support these representations. return Type::Any(); } @@ -1419,6 +1420,7 @@ class Typer { case RegisterRepresentation::Tagged(): case RegisterRepresentation::Compressed(): case RegisterRepresentation::Simd128(): + case RegisterRepresentation::Simd256(): if (lhs.IsNone() || rhs.IsNone()) return Type::None(); // TODO(nicohartmann@): Support those cases. return Word32Type::Set({0, 1}, zone); diff --git a/deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc b/deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc index ca1daa24daf937..8df36ff70483d1 100644 --- a/deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc +++ b/deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc @@ -17,18 +17,19 @@ #undef Assert #undef BIND +#undef BIND_LOOP +#undef BREAK +#undef CONTINUE #undef ELSE -#undef ELSE_IF -#undef END_IF #undef GOTO #undef GOTO_IF #undef GOTO_IF_NOT #undef IF #undef IF_NOT #undef LIKLEY -#undef LOOP #undef REDUCE #undef REDUCE_INPUT_GRAPH #undef UNLIKELY +#undef WHILE #undef V8_COMPILER_TURBOSHAFT_ASSEMBLER_MACROS_DEFINED diff --git a/deps/v8/src/compiler/turboshaft/use-map.cc b/deps/v8/src/compiler/turboshaft/use-map.cc index 8badbaf6dd2788..836d3f6edc9597 100644 --- a/deps/v8/src/compiler/turboshaft/use-map.cc +++ b/deps/v8/src/compiler/turboshaft/use-map.cc @@ -8,7 +8,7 @@ namespace v8::internal::compiler::turboshaft { -UseMap::UseMap(const Graph& graph, Zone* zone) +UseMap::UseMap(const Graph& graph, Zone* zone, FunctionType filter) : table_(graph.op_id_count(), zone, &graph), uses_(zone), saturated_uses_(zone) { @@ -41,6 +41,8 @@ UseMap::UseMap(const Graph& graph, Zone* zone) uses_.resize(offset); } + if (filter(op, zone)) continue; + if (block.IsLoop()) { if (op.Is()) { DCHECK_EQ(op.input_count, 2); diff --git a/deps/v8/src/compiler/turboshaft/use-map.h b/deps/v8/src/compiler/turboshaft/use-map.h index 08c4426f37c45f..2504c1196166ae 100644 --- a/deps/v8/src/compiler/turboshaft/use-map.h +++ b/deps/v8/src/compiler/turboshaft/use-map.h @@ -9,6 +9,8 @@ namespace v8::internal::compiler::turboshaft { +typedef bool (*FunctionType)(const Operation& op, Zone* zone); + // UseMap computes uses of all operations of the given turboshaft graph. It // provides a mapping from `OpIndex` to its `uses`. class UseMap { @@ -22,7 +24,11 @@ class UseMap { }; public: - UseMap(const Graph& graph, Zone* zone); + UseMap(const Graph& graph, Zone* zone, FunctionType filter); + + UseMap(const Graph& graph, Zone* zone) + : UseMap(graph, zone, + [](const Operation& op, Zone* zone) { return false; }) {} base::Vector uses(OpIndex index) const; @@ -34,6 +40,25 @@ class UseMap { ZoneVector> saturated_uses_; }; +// SimdUseMap computes uses of SIMD operations of the given turboshaft graph and +// skip other operations. +class SimdUseMap : public UseMap, public NON_EXPORTED_BASE(ZoneObject) { + public: + SimdUseMap(const Graph& graph, Zone* zone) + : UseMap(graph, zone, [](const Operation& op, Zone* zone) { + if (op.outputs_rep().size() == 1 && + op.outputs_rep()[0] == RegisterRepresentation::Simd128()) { + return false; + } + + ZoneVector storage(zone); + for (auto rep : op.inputs_rep(storage)) { + if (rep == MaybeRegisterRepresentation::Simd128()) return false; + } + return true; + }) {} +}; + } // namespace v8::internal::compiler::turboshaft #endif // V8_COMPILER_TURBOSHAFT_USE_MAP_H_ diff --git a/deps/v8/src/compiler/turboshaft/variable-reducer.h b/deps/v8/src/compiler/turboshaft/variable-reducer.h index 45eeba0b545d87..2e8e8916c98b8f 100644 --- a/deps/v8/src/compiler/turboshaft/variable-reducer.h +++ b/deps/v8/src/compiler/turboshaft/variable-reducer.h @@ -55,11 +55,9 @@ namespace v8::internal::compiler::turboshaft { // with constant inputs introduced by `VariableReducer` need to be eliminated. template class VariableReducer : public RequiredOptimizationReducer { -protected: using Next = RequiredOptimizationReducer; using Snapshot = SnapshotTable::Snapshot; -private: struct GetActiveLoopVariablesIndex { IntrusiveSetIndex& operator()(Variable var) const { return var.data().active_loop_variables_index; diff --git a/deps/v8/src/compiler/turboshaft/wasm-assembler-helpers.h b/deps/v8/src/compiler/turboshaft/wasm-assembler-helpers.h index 5813189c6c9a1f..fd2a90af930ab1 100644 --- a/deps/v8/src/compiler/turboshaft/wasm-assembler-helpers.h +++ b/deps/v8/src/compiler/turboshaft/wasm-assembler-helpers.h @@ -9,6 +9,7 @@ #ifndef V8_COMPILER_TURBOSHAFT_WASM_ASSEMBLER_HELPERS_H_ #define V8_COMPILER_TURBOSHAFT_WASM_ASSEMBLER_HELPERS_H_ +#include "src/compiler/turboshaft/operations.h" #include "src/roots/roots.h" namespace v8::internal::compiler::turboshaft { @@ -19,35 +20,36 @@ struct RootTypes { #undef DEFINE_TYPE }; +template +OpIndex LoadRootHelper(AssemblerT&& assembler, RootIndex index) { + if (RootsTable::IsImmortalImmovable(index)) { + // Note that we skip the bit cast here as the value does not need to be + // tagged as the object will never be collected / moved. + return assembler.Load( + assembler.LoadRootRegister(), LoadOp::Kind::RawAligned().Immutable(), + MemoryRepresentation::UintPtr(), IsolateData::root_slot_offset(index)); + } else { + return assembler.BitcastWordPtrToTagged(assembler.Load( + assembler.LoadRootRegister(), LoadOp::Kind::RawAligned(), + MemoryRepresentation::UintPtr(), IsolateData::root_slot_offset(index))); + } +} + #define LOAD_INSTANCE_FIELD(instance, name, representation) \ __ Load(instance, LoadOp::Kind::TaggedBase(), representation, \ WasmTrustedInstanceData::k##name##Offset) -#define LOAD_PROTECTED_INSTANCE_FIELD(instance, name) \ - __ LoadProtectedPointerField(instance, \ +#define LOAD_PROTECTED_INSTANCE_FIELD(instance, name) \ + __ LoadProtectedPointerField(instance, LoadOp::Kind::TaggedBase(), \ WasmTrustedInstanceData::k##name##Offset) #define LOAD_IMMUTABLE_INSTANCE_FIELD(instance, name, representation) \ __ Load(instance, LoadOp::Kind::TaggedBase().Immutable(), representation, \ WasmTrustedInstanceData::k##name##Offset) -#define LOAD_ROOT(name) \ - V::Cast( \ - __ Load(__ LoadRootRegister(), LoadOp::Kind::RawAligned(), \ - MemoryRepresentation::UintPtr(), \ - IsolateData::root_slot_offset(RootIndex::k##name))) - -#define LOAD_TAGGED_ROOT(name) \ - V::Cast( \ - __ Load(__ LoadRootRegister(), LoadOp::Kind::RawAligned(), \ - MemoryRepresentation::TaggedPointer(), \ - IsolateData::root_slot_offset(RootIndex::k##name))) - -#define LOAD_IMMUTABLE_ROOT(name) \ - V::Cast( \ - __ Load(__ LoadRootRegister(), LoadOp::Kind::RawAligned().Immutable(), \ - MemoryRepresentation::UintPtr(), \ - IsolateData::root_slot_offset(RootIndex::k##name))) +#define LOAD_ROOT(name) \ + V::Cast( \ + LoadRootHelper(Asm(), RootIndex::k##name)) } // namespace v8::internal::compiler::turboshaft diff --git a/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc b/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc index 060569f1477d48..2e0698503d649f 100644 --- a/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc +++ b/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc @@ -67,6 +67,7 @@ void WasmGCTypeAnalyzer::StartNewSnapshotFor(const Block& block) { // Reset reachability information. This can be outdated in case of loop // revisits. Below the reachability is calculated again and potentially // re-added. + bool block_was_previously_reachable = IsReachable(block); block_is_unreachable_.Remove(block.index().id()); // Start new snapshot based on predecessor information. if (block.HasPredecessors() == 0) { @@ -74,21 +75,29 @@ void WasmGCTypeAnalyzer::StartNewSnapshotFor(const Block& block) { DCHECK_EQ(block.index().id(), 0); types_table_.StartNewSnapshot(); } else if (block.IsLoop()) { + const Block& forward_predecessor = + *block.LastPredecessor()->NeighboringPredecessor(); + if (!IsReachable(forward_predecessor)) { + // If a loop isn't reachable through its forward edge, it can't possibly + // become reachable via the backedge. + block_is_unreachable_.Add(block.index().id()); + } MaybeSnapshot back_edge_snap = block_to_snapshot_[block.LastPredecessor()->index()]; - if (back_edge_snap.has_value()) { + if (back_edge_snap.has_value() && block_was_previously_reachable) { // The loop was already visited at least once. In this case use the // available information from the backedge. + // Note that we only do this if the loop wasn't marked as unreachable + // before. This solves an issue where a single block loop would think the + // backedge is reachable as we just removed the unreachable information + // above. Once the analyzer hits the backedge, it will re-evaluate if the + // backedge changes any analysis results and then potentially revisit + // this loop with forward edge and backedge. CreateMergeSnapshot(block); } else { // The loop wasn't visited yet. There isn't any type information available // for the backedge. is_first_loop_header_evaluation_ = true; - const Block& forward_predecessor = - *block.LastPredecessor()->NeighboringPredecessor(); - if (!IsReachable(forward_predecessor)) { - block_is_unreachable_.Add(block.index().id()); - } Snapshot forward_edge_snap = block_to_snapshot_[forward_predecessor.index()].value(); types_table_.StartNewSnapshot(forward_edge_snap); diff --git a/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h b/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h index 05a3f3c3e95925..533c9cdb036d59 100644 --- a/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h +++ b/deps/v8/src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h @@ -324,7 +324,7 @@ class WasmGCTypedOptimizationReducer : public Next { // TODO(14108): This isn't a type optimization and doesn't fit well into this // reducer. - OpIndex REDUCE(AnyConvertExtern)(V object) { + OpIndex REDUCE(AnyConvertExtern)(V object) { LABEL_BLOCK(no_change) { return Next::ReduceAnyConvertExtern(object); } if (ShouldSkipOptimizationStep()) goto no_change; diff --git a/deps/v8/src/compiler/turboshaft/wasm-js-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/wasm-js-lowering-reducer.h index 246fbf3c5893e3..be28e8e1aa24e2 100644 --- a/deps/v8/src/compiler/turboshaft/wasm-js-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/wasm-js-lowering-reducer.h @@ -51,7 +51,7 @@ class WasmJSLoweringReducer : public Next { __ Call(call_target, new_frame_state, {}, ts_descriptor); __ Unreachable(); // The trap builtin never returns. } - END_IF + return OpIndex::Invalid(); } diff --git a/deps/v8/src/compiler/turboshaft/wasm-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/wasm-lowering-reducer.h index 87c21e83bd98e4..82c3356fcd93d2 100644 --- a/deps/v8/src/compiler/turboshaft/wasm-lowering-reducer.h +++ b/deps/v8/src/compiler/turboshaft/wasm-lowering-reducer.h @@ -43,15 +43,17 @@ class WasmLoweringReducer : public Next { OpIndex REDUCE(Null)(wasm::ValueType type) { return Null(type); } OpIndex REDUCE(IsNull)(OpIndex object, wasm::ValueType type) { - // TODO(14108): Can this be done simpler for static-roots nowadays? - Tagged_t static_null = - wasm::GetWasmEngine()->compressed_wasm_null_value_or_zero(); - OpIndex null_value = +#if V8_STATIC_ROOTS_BOOL + // TODO(14616): Extend this for shared types. + const bool is_wasm_null = !wasm::IsSubtypeOf(type, wasm::kWasmExternRef, module_) && - !wasm::IsSubtypeOf(type, wasm::kWasmExnRef, module_) && - static_null != 0 - ? __ UintPtrConstant(static_null) - : Null(type); + !wasm::IsSubtypeOf(type, wasm::kWasmExnRef, module_); + OpIndex null_value = + __ UintPtrConstant(is_wasm_null ? StaticReadOnlyRoot::kWasmNull + : StaticReadOnlyRoot::kNullValue); +#else + OpIndex null_value = Null(type); +#endif return __ TaggedEqual(object, null_value); } @@ -90,7 +92,7 @@ class WasmLoweringReducer : public Next { MemoryRepresentation::AnyTagged(), map_offset); } - OpIndex REDUCE(WasmTypeCheck)(V object, OptionalV rtt, + OpIndex REDUCE(WasmTypeCheck)(V object, OptionalV rtt, WasmTypeCheckConfig config) { if (rtt.has_value()) { return ReduceWasmTypeCheckRtt(object, rtt, config); @@ -99,7 +101,7 @@ class WasmLoweringReducer : public Next { } } - OpIndex REDUCE(WasmTypeCast)(V object, OptionalV rtt, + OpIndex REDUCE(WasmTypeCast)(V object, OptionalV rtt, WasmTypeCheckConfig config) { if (rtt.has_value()) { return ReduceWasmTypeCastRtt(object, rtt, config); @@ -108,8 +110,8 @@ class WasmLoweringReducer : public Next { } } - OpIndex REDUCE(AnyConvertExtern)(V object) { - Label end_label(&Asm()); + OpIndex REDUCE(AnyConvertExtern)(V object) { + Label end_label(&Asm()); Label<> null_label(&Asm()); Label<> smi_label(&Asm()); Label<> int_to_smi_label(&Asm()); @@ -143,7 +145,7 @@ class WasmLoweringReducer : public Next { GOTO(end_label, object); BIND(convert_to_heap_number_label); - V heap_number = __ template WasmCallBuiltinThroughJumptable< + V heap_number = __ template WasmCallBuiltinThroughJumptable< BuiltinCallDescriptor::WasmInt32ToHeapNumber>({int_value}); GOTO(end_label, heap_number); } @@ -189,8 +191,8 @@ class WasmLoweringReducer : public Next { return result; } - OpIndex REDUCE(ExternConvertAny)(V object) { - Label end(&Asm()); + OpIndex REDUCE(ExternConvertAny)(V object) { + Label end(&Asm()); GOTO_IF_NOT(__ IsNull(object, wasm::kWasmAnyRef), end, object); GOTO(end, Null(wasm::kWasmExternRef)); BIND(end, result); @@ -340,34 +342,31 @@ class WasmLoweringReducer : public Next { return struct_value; } - OpIndex REDUCE(WasmRefFunc)(V wasm_instance, + OpIndex REDUCE(WasmRefFunc)(V wasm_instance, uint32_t function_index) { - V functions = - LOAD_IMMUTABLE_INSTANCE_FIELD(wasm_instance, WasmInternalFunctions, - MemoryRepresentation::TaggedPointer()); - V maybe_function = - __ LoadFixedArrayElement(functions, function_index); - - Label done(&Asm()); - IF (UNLIKELY(__ IsSmi(maybe_function))) { + V func_refs = LOAD_IMMUTABLE_INSTANCE_FIELD( + wasm_instance, FuncRefs, MemoryRepresentation::TaggedPointer()); + V maybe_func_ref = + __ LoadFixedArrayElement(func_refs, function_index); + + Label done(&Asm()); + IF (UNLIKELY(__ IsSmi(maybe_func_ref))) { V function_index_constant = __ Word32Constant(function_index); - V from_builtin = - __ template WasmCallBuiltinThroughJumptable< - BuiltinCallDescriptor::WasmRefFunc>({function_index_constant}); + V from_builtin = __ template WasmCallBuiltinThroughJumptable< + BuiltinCallDescriptor::WasmRefFunc>({function_index_constant}); GOTO(done, from_builtin); + } ELSE { + GOTO(done, V::Cast(maybe_func_ref)); } - ELSE { - GOTO(done, V::Cast(maybe_function)); - } - END_IF + BIND(done, result_value); return result_value; } OpIndex REDUCE(StringAsWtf16)(OpIndex string) { - Label done(&Asm()); + Label done(&Asm()); V instance_type = __ LoadInstanceTypeField(__ LoadMapField(string)); V string_representation = __ Word32BitwiseAnd( instance_type, __ Word32Constant(kStringRepresentationMask)); @@ -379,10 +378,10 @@ class WasmLoweringReducer : public Next { return result; } - OpIndex REDUCE(StringPrepareForGetCodeUnit)(V original_string) { - LoopLabel + OpIndex REDUCE(StringPrepareForGetCodeUnit)(V original_string) { + LoopLabel dispatch(&Asm()); - Label + Label direct_string(&Asm()); // These values will be used to replace the original node's projections. @@ -395,14 +394,14 @@ class WasmLoweringReducer : public Next { // i.e. it is 0 for one-byte strings, 1 for two-byte strings, // kCharWidthBailoutSentinel for uncached external strings (for which // "string"/"offset" are invalid and unusable). - Label + Label done(&Asm()); V original_type = __ LoadInstanceTypeField(__ LoadMapField(original_string)); GOTO(dispatch, original_string, original_type, __ Word32Constant(0)); - LOOP(dispatch, string, instance_type, offset) { + BIND_LOOP(dispatch, string, instance_type, offset) { Label<> thin_string(&Asm()); Label<> cons_string(&Asm()); @@ -425,14 +424,14 @@ class WasmLoweringReducer : public Next { V new_offset = __ Word32Add( offset, __ UntagSmi(__ template LoadField( string, AccessBuilder::ForSlicedStringOffset()))); - V parent = __ template LoadField( + V parent = __ template LoadField( string, AccessBuilder::ForSlicedStringParent()); V parent_type = __ LoadInstanceTypeField(__ LoadMapField(parent)); GOTO(dispatch, parent, parent_type, new_offset); // Thin string. BIND(thin_string); - V actual = __ template LoadField( + V actual = __ template LoadField( string, AccessBuilder::ForThinStringActual()); V actual_type = __ LoadInstanceTypeField(__ LoadMapField(actual)); // ThinStrings always reference (internalized) direct strings. @@ -441,7 +440,7 @@ class WasmLoweringReducer : public Next { // Flat cons string. (Non-flat cons strings are ruled out by // string.as_wtf16.) BIND(cons_string); - V first = __ template LoadField( + V first = __ template LoadField( string, AccessBuilder::ForConsStringFirst()); V first_type = __ LoadInstanceTypeField(__ LoadMapField(first)); GOTO(dispatch, first, first_type, offset); @@ -527,7 +526,7 @@ class WasmLoweringReducer : public Next { } } - V BuildLoadExternalPointerFromObject(V object, + V BuildLoadExternalPointerFromObject(V object, FieldAccess access) { #ifdef V8_ENABLE_SANDBOX DCHECK_NE(access.external_pointer_tag, kExternalPointerNullTag); @@ -540,7 +539,7 @@ class WasmLoweringReducer : public Next { #endif // V8_ENABLE_SANDBOX } - OpIndex ReduceWasmTypeCheckAbstract(V object, + OpIndex ReduceWasmTypeCheckAbstract(V object, WasmTypeCheckConfig config) { const bool object_can_be_null = config.from.is_nullable(); const bool null_succeeds = config.to.is_nullable(); @@ -612,7 +611,7 @@ class WasmLoweringReducer : public Next { return final_result; } - OpIndex ReduceWasmTypeCastAbstract(V object, + OpIndex ReduceWasmTypeCastAbstract(V object, WasmTypeCheckConfig config) { const bool object_can_be_null = config.from.is_nullable(); const bool null_succeeds = config.to.is_nullable(); @@ -689,7 +688,7 @@ class WasmLoweringReducer : public Next { return object; } - OpIndex ReduceWasmTypeCastRtt(V object, OptionalV rtt, + OpIndex ReduceWasmTypeCastRtt(V object, OptionalV rtt, WasmTypeCheckConfig config) { DCHECK(rtt.has_value()); int rtt_depth = wasm::GetSubtypingDepth(module_, config.to.ref_index()); @@ -733,7 +732,7 @@ class WasmLoweringReducer : public Next { __ TrapIfNot(is_wasm_obj, OpIndex::Invalid(), TrapId::kTrapIllegalCast); } - V type_info = LoadWasmTypeInfo(map); + V type_info = LoadWasmTypeInfo(map); DCHECK_GE(rtt_depth, 0); // If the depth of the rtt is known to be less that the minimum supertype // array length, we can access the supertype without bounds-checking the @@ -749,7 +748,7 @@ class WasmLoweringReducer : public Next { OpIndex::Invalid(), TrapId::kTrapIllegalCast); } - V maybe_match = + V maybe_match = __ Load(type_info, LoadOp::Kind::TaggedBase().Immutable(), MemoryRepresentation::TaggedPointer(), WasmTypeInfo::kSupertypesOffset + kTaggedSize * rtt_depth); @@ -763,7 +762,7 @@ class WasmLoweringReducer : public Next { return object; } - OpIndex ReduceWasmTypeCheckRtt(V object, OptionalV rtt, + OpIndex ReduceWasmTypeCheckRtt(V object, OptionalV rtt, WasmTypeCheckConfig config) { DCHECK(rtt.has_value()); int rtt_depth = wasm::GetSubtypingDepth(module_, config.to.ref_index()); @@ -803,7 +802,7 @@ class WasmLoweringReducer : public Next { GOTO_IF_NOT(LIKELY(is_wasm_obj), end_label, __ Word32Constant(0)); } - V type_info = LoadWasmTypeInfo(map); + V type_info = LoadWasmTypeInfo(map); DCHECK_GE(rtt_depth, 0); // If the depth of the rtt is known to be less that the minimum supertype // array length, we can access the supertype without bounds-checking the @@ -821,7 +820,7 @@ class WasmLoweringReducer : public Next { end_label, __ Word32Constant(0)); } - V maybe_match = + V maybe_match = __ Load(type_info, LoadOp::Kind::TaggedBase().Immutable(), MemoryRepresentation::TaggedPointer(), WasmTypeInfo::kSupertypesOffset + kTaggedSize * rtt_depth); @@ -923,8 +922,16 @@ class WasmLoweringReducer : public Next { wasm::IsSubtypeOf(type, wasm::kWasmExnRef, module_) ? RootIndex::kNullValue : RootIndex::kWasmNull; + // We load WasmNull as a pointer here and not as a TaggedPointer because + // WasmNull is stored uncompressed in the IsolateData, and a load of a + // TaggedPointer loads compressed pointers. We do not bitcast the WasmNull + // to Tagged at the moment because it would increase graph size, which may + // affect optimizations negatively. These regressions would be worth it if + // there was any benefit of the bitcast. However, the graph validation + // currently allows implicit representation changes from `WordPtr` to + // `Tagged`. return __ Load(roots, LoadOp::Kind::RawAligned().Immutable(), - MemoryRepresentation::TaggedPointer(), + MemoryRepresentation::UintPtr(), IsolateData::root_slot_offset(index)); } @@ -942,7 +949,7 @@ class WasmLoweringReducer : public Next { comparison_value, LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE); } - V LoadWasmTypeInfo(V map) { + V LoadWasmTypeInfo(V map) { int offset = Map::kConstructorOrBackPointerOrNativeContextOffset; return __ Load(map, LoadOp::Kind::TaggedBase().Immutable(), MemoryRepresentation::TaggedPointer(), offset); diff --git a/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.cc b/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.cc index 7a1493136b7767..8f4f2e75287eb5 100644 --- a/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.cc +++ b/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.cc @@ -31,16 +31,21 @@ std::string GetSimdOpcodeName(Operation const& op) { // This class is the wrapper for StoreOp/LoadOp, which is helpful to calcualte // the relative offset between two StoreOp/LoadOp. template || - std::is_same_v>> + typename = std::enable_if_t< + std::is_same_v || std::is_same_v || + std::is_same_v>> class StoreLoadInfo { public: StoreLoadInfo(const Graph* graph, const Op* op) : op_(op), offset_(op->offset) { - if (!op->index().has_value()) return; base_ = &graph->Get(op->base()); - const ChangeOp* change = - graph->Get(op->index().value()).template TryCast(); + const ChangeOp* change = nullptr; + if constexpr (std::is_same_v) { + change = graph->Get(op->index()).template TryCast(); + } else { + if (!op->index().has_value()) return; + change = graph->Get(op->index().value()).template TryCast(); + } if (change == nullptr) { SetInvalid(); return; @@ -64,8 +69,15 @@ class StoreLoadInfo { base::Optional operator-(const StoreLoadInfo& rhs) const { DCHECK(IsValid() && rhs.IsValid()); - bool calculatable = base_ == rhs.base_ && index_ == rhs.index_ && - op_->kind == rhs.op_->kind; + bool calculatable = base_ == rhs.base_ && index_ == rhs.index_; + + if constexpr (std::is_same_v) { + calculatable &= (op_->load_kind == rhs.op_->load_kind && + op_->transform_kind == rhs.op_->transform_kind); + } else { + calculatable &= (op_->kind == rhs.op_->kind); + } + if constexpr (std::is_same_v) { // TODO(v8:12716) If one store has a full write barrier and the other has // no write barrier, consider combine them with a full write barrier. @@ -122,6 +134,14 @@ bool LoadStrideEqualTo(const Graph& graph, const NodeGroup& node_group, return load_infos[1] - load_infos[0] == stride; } +// Returns true if all of the nodes in node_group are identical. +// Splat opcode in WASM SIMD is used to create vector with identical lanes. +template +bool IsSplat(const T& node_group) { + DCHECK_EQ(node_group.size(), 2); + return node_group[1] == node_group[0]; +} + void PackNode::Print(Graph* graph) const { Operation& op = graph->Get(nodes_[0]); TRACE("%s(#%d, #%d)\n", GetSimdOpcodeName(op).c_str(), nodes_[0].id(), @@ -136,20 +156,12 @@ PackNode* SLPTree::GetPackNode(OpIndex node) { return nullptr; } -void SLPTree::Print(const char* info) { - TRACE("%s, %zu Packed node:\n", info, node_to_packnode_.size()); - if (!v8_flags.trace_wasm_revectorize) { - return; - } - - ForEach([this](PackNode const* pnode) { pnode->Print(&graph_); }); -} - template -void SLPTree::ForEach(FunctionType callback) { +void ForEach(FunctionType callback, + ZoneUnorderedMap& node_map) { std::unordered_set visited; - for (auto& entry : node_to_packnode_) { + for (auto& entry : node_map) { PackNode const* pnode = entry.second; if (!pnode || visited.find(pnode) != visited.end()) { continue; @@ -160,6 +172,16 @@ void SLPTree::ForEach(FunctionType callback) { } } +void SLPTree::Print(const char* info) { + TRACE("%s, %zu Packed node:\n", info, node_to_packnode_.size()); + if (!v8_flags.trace_wasm_revectorize) { + return; + } + + ForEach([this](PackNode const* pnode) { pnode->Print(&graph_); }, + node_to_packnode_); +} + PackNode* SLPTree::NewPackNode(const NodeGroup& node_group) { Operation& op = graph_.Get(node_group[0]); TRACE("PackNode %s(#%d, #%d)\n", GetSimdOpcodeName(op).c_str(), @@ -226,6 +248,24 @@ bool SLPTree::IsSideEffectFree(OpIndex first, OpIndex second) { return true; } +// Returns true if op in node_group have same kind. +bool IsSameOpAndKind(const Operation& op0, const Operation& op1) { +#define CASE(operation) \ + case Opcode::k##operation: { \ + using Op = opcode_to_operation_map::Op; \ + return op0.Cast().kind == op1.Cast().kind; \ + } + switch (op0.opcode) { + CASE(Simd128Unary) + CASE(Simd128Binop) + CASE(Simd128Shift) + CASE(Simd128Ternary) + default: + return true; + } +#undef CASE +} + bool SLPTree::CanBePacked(const NodeGroup& node_group) { OpIndex node0 = node_group[0]; OpIndex node1 = node_group[1]; @@ -249,6 +289,12 @@ bool SLPTree::CanBePacked(const NodeGroup& node_group) { return false; } + if (!IsSameOpAndKind(op0, op1)) { + TRACE("(%s, %s) have different op\n", GetSimdOpcodeName(op0).c_str(), + GetSimdOpcodeName(op1).c_str()); + return false; + } + if (node0.offset() <= node1.offset() ? !IsSideEffectFree(node0, node1) : !IsSideEffectFree(node1, node0)) { TRACE("Break side effect\n"); @@ -257,11 +303,47 @@ bool SLPTree::CanBePacked(const NodeGroup& node_group) { return true; } +bool SLPTree::IsEqual(const OpIndex node0, const OpIndex node1) { + if (node0 == node1) return true; + if (const ConstantOp* const0 = graph_.Get(node0).TryCast()) { + if (const ConstantOp* const1 = graph_.Get(node1).TryCast()) { + return *const0 == *const1; + } + } + return false; +} + PackNode* SLPTree::BuildTree(const NodeGroup& roots) { root_ = BuildTreeRec(roots, 0); return root_; } +bool IsLoadExtend(const Simd128LoadTransformOp& op) { + switch (op.transform_kind) { + case Simd128LoadTransformOp::TransformKind::k8x8S: + case Simd128LoadTransformOp::TransformKind::k8x8U: + case Simd128LoadTransformOp::TransformKind::k16x4S: + case Simd128LoadTransformOp::TransformKind::k16x4U: + case Simd128LoadTransformOp::TransformKind::k32x2S: + case Simd128LoadTransformOp::TransformKind::k32x2U: + return true; + default: + return false; + } +} + +bool IsLoadSplat(const Simd128LoadTransformOp& op) { + switch (op.transform_kind) { + case Simd128LoadTransformOp::TransformKind::k8Splat: + case Simd128LoadTransformOp::TransformKind::k16Splat: + case Simd128LoadTransformOp::TransformKind::k32Splat: + case Simd128LoadTransformOp::TransformKind::k64Splat: + return true; + default: + return false; + } +} + PackNode* SLPTree::BuildTreeRec(const NodeGroup& node_group, unsigned recursion_depth) { DCHECK_EQ(node_group.size(), 2); @@ -297,7 +379,39 @@ PackNode* SLPTree::BuildTreeRec(const NodeGroup& node_group, } } + int value_in_count = op0.input_count; + switch (op0.opcode) { + case Opcode::kSimd128LoadTransform: { + const Simd128LoadTransformOp& transform_op = + op0.Cast(); + if (IsLoadSplat(transform_op)) { + TRACE("Simd128LoadTransform: LoadSplat\n"); + if (!IsSplat(node_group)) { + return nullptr; + } + } else if (IsLoadExtend(transform_op)) { + TRACE("Simd128LoadTransform: LoadExtend\n"); + if (!LoadStrideEqualTo>( + graph_, node_group, kSimd128Size / 2)) { + TRACE("Wrong Access stride\n"); + return nullptr; + } + } else { + TRACE("Load Transfrom k64Zero/k32Zero!\n"); + DCHECK(transform_op.transform_kind == + Simd128LoadTransformOp::TransformKind::k32Zero || + transform_op.transform_kind == + Simd128LoadTransformOp::TransformKind::k64Zero); + // k64Zero/k32Zero is not supported + TRACE("Simd128LoadTransform: unsupported k64Zero/k32Zero\n"); + return nullptr; + } + PackNode* p = NewPackNode(node_group); + return p; + } + case Opcode::kLoad: { TRACE("Load leaf node\n"); if (op0.Cast().loaded_rep != MemoryRepresentation::Simd128() || @@ -320,6 +434,86 @@ PackNode* SLPTree::BuildTreeRec(const NodeGroup& node_group, PackNode* pnode = NewPackNodeAndRecurs(node_group, 1, 1, recursion_depth); return pnode; } + case Opcode::kSimd128Unary: { +#define UNARY_CASE(op_128, not_used) case Simd128UnaryOp::Kind::k##op_128: + switch (op0.Cast().kind) { + SIMD256_UNARY_OP(UNARY_CASE) { + TRACE("Added a vector of Unary\n"); + PackNode* pnode = NewPackNodeAndRecurs(node_group, 0, value_in_count, + recursion_depth); + return pnode; + } + default: { + TRACE("Unsupported Simd128Unary: %s\n", + GetSimdOpcodeName(op0).c_str()); + return nullptr; + } + } +#undef UNARY_CASE + } + case Opcode::kSimd128Binop: { +#define BINOP_CASE(op_128, not_used) case Simd128BinopOp::Kind::k##op_128: + switch (op0.Cast().kind) { + SIMD256_BINOP_SIMPLE_OP(BINOP_CASE) { + TRACE("Added a vector of BinOp\n"); + PackNode* pnode = NewPackNodeAndRecurs(node_group, 0, value_in_count, + recursion_depth); + return pnode; + } + default: { + TRACE("Unsupported Simd128BinopOp: %s\n", + GetSimdOpcodeName(op0).c_str()); + return nullptr; + } + } +#undef BINOP_CASE + } + case Opcode::kSimd128Shift: { + Simd128ShiftOp& shift_op0 = op0.Cast(); + Simd128ShiftOp& shift_op1 = op1.Cast(); + if (IsEqual(shift_op0.shift(), shift_op1.shift())) { + switch (op0.Cast().kind) { +#define SHIFT_CASE(op_128, not_used) case Simd128ShiftOp::Kind::k##op_128: + SIMD256_SHIFT_OP(SHIFT_CASE) { + TRACE("Added a vector of Shift op.\n"); + // We've already checked that the "shift by" input of both shifts is + // the same, and we'll only pack the 1st input of the shifts + // together anyways (since on both Simd128 and Simd256, the "shift + // by" input of shifts is a Word32). Thus we only need to check the + // 1st input of the shift when recursing. + constexpr int kShiftValueInCount = 1; + PackNode* pnode = NewPackNodeAndRecurs( + node_group, 0, kShiftValueInCount, recursion_depth); + return pnode; + } +#undef SHIFT_CASE + default: { + TRACE("Unsupported Simd128ShiftOp: %s\n", + GetSimdOpcodeName(op0).c_str()); + return nullptr; + } + } + } + TRACE("Failed due to SimdShiftOp kind or shift scalar is different!\n"); + return nullptr; + } + case Opcode::kSimd128Ternary: { +#define TERNARY_CASE(op_128, not_used) case Simd128TernaryOp::Kind::k##op_128: + switch (op0.Cast().kind) { + SIMD256_TERNARY_OP(TERNARY_CASE) { + TRACE("Added a vector of Ternary\n"); + PackNode* pnode = NewPackNodeAndRecurs(node_group, 0, value_in_count, + recursion_depth); + return pnode; + } +#undef TERNARY_CASE + default: { + TRACE("Unsupported Simd128Ternary: %s\n", + GetSimdOpcodeName(op0).c_str()); + return nullptr; + } + } + } default: TRACE("Default branch #%d:%s\n", node0.id(), @@ -418,6 +612,51 @@ void WasmRevecAnalyzer::Run() { revectorizable_node_.merge(slp_tree_->GetNodeMapping()); } } + + // Early exist when no revectorizable node found. + if (revectorizable_node_.empty()) return; + + // Build SIMD usemap + use_map_ = phase_zone_->New(graph_, phase_zone_); + if (!DecideVectorize()) { + revectorizable_node_.clear(); + } else { + should_reduce_ = true; + TRACE("Decide to revectorize!\n"); + } +} + +bool WasmRevecAnalyzer::DecideVectorize() { + TRACE("Enter %s\n", __func__); + int save = 0, cost = 0; + ForEach( + [&](PackNode const* pnode) { + const NodeGroup& nodes = pnode->Nodes(); + // Splat nodes will not cause a saving as it simply extends itself. + if (!IsSplat(nodes)) { + save++; + } + + for (int i = 0; i < static_cast(nodes.size()); i++) { + if (i > 0 && nodes[i] == nodes[0]) continue; + + for (auto use : use_map_->uses(nodes[i])) { + if (!GetPackNode(use)) { + TRACE("External use edge: (%d:%s) -> (%d:%s)\n", use.id(), + OpcodeName(graph_.Get(use).opcode), nodes[i].id(), + OpcodeName(graph_.Get(nodes[i]).opcode)); + cost++; + + // We only need one Extract node and all other uses can share. + break; + } + } + } + }, + revectorizable_node_); + + TRACE("Save: %d, cost: %d\n", save, cost); + return save > cost; } } // namespace v8::internal::compiler::turboshaft diff --git a/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.h b/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.h index 0d098643c7b973..7b3c1798de31d6 100644 --- a/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.h +++ b/deps/v8/src/compiler/turboshaft/wasm-revec-reducer.h @@ -12,10 +12,144 @@ #include "src/compiler/turboshaft/assembler.h" #include "src/compiler/turboshaft/operations.h" #include "src/compiler/turboshaft/phase.h" +#include "src/compiler/turboshaft/use-map.h" #include "src/compiler/wasm-graph-assembler.h" namespace v8::internal::compiler::turboshaft { +#define SIMD256_LOADTRANSFORM_OP(V) \ + V(8x8S, 8x16S) \ + V(8x8U, 8x16U) \ + V(16x4S, 16x8S) \ + V(16x4U, 16x8U) \ + V(32x2S, 32x4S) \ + V(32x2U, 32x4U) \ + V(8Splat, 8Splat) \ + V(16Splat, 16Splat) \ + V(32Splat, 32Splat) \ + V(64Splat, 64Splat) + +#define SIMD256_UNARY_OP(V) \ + V(S128Not, S256Not) \ + V(I8x16Abs, I8x32Abs) \ + V(I8x16Neg, I8x32Neg) \ + V(I16x8ExtAddPairwiseI8x16S, I16x16ExtAddPairwiseI8x32S) \ + V(I16x8ExtAddPairwiseI8x16U, I16x16ExtAddPairwiseI8x32U) \ + V(I32x4ExtAddPairwiseI16x8S, I32x8ExtAddPairwiseI16x16S) \ + V(I32x4ExtAddPairwiseI16x8U, I32x8ExtAddPairwiseI16x16U) \ + V(I16x8Abs, I16x16Abs) \ + V(I16x8Neg, I16x16Neg) \ + V(I32x4Abs, I32x8Abs) \ + V(I32x4Neg, I32x8Neg) \ + V(F32x4Abs, F32x8Abs) \ + V(F32x4Neg, F32x8Neg) \ + V(F32x4Sqrt, F32x8Sqrt) \ + V(F64x2Sqrt, F64x4Sqrt) \ + V(I32x4UConvertF32x4, I32x8UConvertF32x8) \ + V(F32x4UConvertI32x4, F32x8UConvertI32x8) + +#define SIMD256_BINOP_SIMPLE_OP(V) \ + V(I8x16Eq, I8x32Eq) \ + V(I8x16Ne, I8x32Ne) \ + V(I8x16GtS, I8x32GtS) \ + V(I8x16GtU, I8x32GtU) \ + V(I8x16GeS, I8x32GeS) \ + V(I8x16GeU, I8x32GeU) \ + V(I16x8Eq, I16x16Eq) \ + V(I16x8Ne, I16x16Ne) \ + V(I16x8GtS, I16x16GtS) \ + V(I16x8GtU, I16x16GtU) \ + V(I16x8GeS, I16x16GeS) \ + V(I16x8GeU, I16x16GeU) \ + V(I32x4Eq, I32x8Eq) \ + V(I32x4Ne, I32x8Ne) \ + V(I32x4GtS, I32x8GtS) \ + V(I32x4GtU, I32x8GtU) \ + V(I32x4GeS, I32x8GeS) \ + V(I32x4GeU, I32x8GeU) \ + V(F32x4Eq, F32x8Eq) \ + V(F32x4Ne, F32x8Ne) \ + V(F32x4Lt, F32x8Lt) \ + V(F32x4Le, F32x8Le) \ + V(F64x2Eq, F64x4Eq) \ + V(F64x2Ne, F64x4Ne) \ + V(F64x2Lt, F64x4Lt) \ + V(F64x2Le, F64x4Le) \ + V(S128And, S256And) \ + V(S128AndNot, S256AndNot) \ + V(S128Or, S256Or) \ + V(S128Xor, S256Xor) \ + V(I8x16SConvertI16x8, I8x32SConvertI16x16) \ + V(I8x16UConvertI16x8, I8x32UConvertI16x16) \ + V(I8x16Add, I8x32Add) \ + V(I8x16AddSatS, I8x32AddSatS) \ + V(I8x16AddSatU, I8x32AddSatU) \ + V(I8x16Sub, I8x32Sub) \ + V(I8x16SubSatS, I8x32SubSatS) \ + V(I8x16SubSatU, I8x32SubSatU) \ + V(I8x16MinS, I8x32MinS) \ + V(I8x16MinU, I8x32MinU) \ + V(I8x16MaxS, I8x32MaxS) \ + V(I8x16MaxU, I8x32MaxU) \ + V(I8x16RoundingAverageU, I8x32RoundingAverageU) \ + V(I16x8SConvertI32x4, I16x16SConvertI32x8) \ + V(I16x8UConvertI32x4, I16x16UConvertI32x8) \ + V(I16x8Add, I16x16Add) \ + V(I16x8AddSatS, I16x16AddSatS) \ + V(I16x8AddSatU, I16x16AddSatU) \ + V(I16x8Sub, I16x16Sub) \ + V(I16x8SubSatS, I16x16SubSatS) \ + V(I16x8SubSatU, I16x16SubSatU) \ + V(I16x8Mul, I16x16Mul) \ + V(I16x8MinS, I16x16MinS) \ + V(I16x8MinU, I16x16MinU) \ + V(I16x8MaxS, I16x16MaxS) \ + V(I16x8MaxU, I16x16MaxU) \ + V(I16x8RoundingAverageU, I16x16RoundingAverageU) \ + V(I32x4Add, I32x8Add) \ + V(I32x4Sub, I32x8Sub) \ + V(I32x4Mul, I32x8Mul) \ + V(I32x4MinS, I32x8MinS) \ + V(I32x4MinU, I32x8MinU) \ + V(I32x4MaxS, I32x8MaxS) \ + V(I32x4MaxU, I32x8MaxU) \ + V(I32x4DotI16x8S, I32x8DotI16x16S) \ + V(I64x2Add, I64x4Add) \ + V(I64x2Sub, I64x4Sub) \ + V(I64x2Mul, I64x4Mul) \ + V(I64x2Eq, I64x4Eq) \ + V(I64x2Ne, I64x4Ne) \ + V(I64x2GtS, I64x4GtS) \ + V(I64x2GeS, I64x4GeS) \ + V(F32x4Add, F32x8Add) \ + V(F32x4Sub, F32x8Sub) \ + V(F32x4Mul, F32x8Mul) \ + V(F32x4Div, F32x8Div) \ + V(F32x4Min, F32x8Min) \ + V(F32x4Max, F32x8Max) \ + V(F32x4Pmin, F32x8Pmin) \ + V(F32x4Pmax, F32x8Pmax) \ + V(F64x2Add, F64x4Add) \ + V(F64x2Sub, F64x4Sub) \ + V(F64x2Mul, F64x4Mul) \ + V(F64x2Div, F64x4Div) \ + V(F64x2Min, F64x4Min) \ + V(F64x2Max, F64x4Max) \ + V(F64x2Pmin, F64x4Pmin) \ + V(F64x2Pmax, F64x4Pmax) + +#define SIMD256_SHIFT_OP(V) \ + V(I16x8Shl, I16x16Shl) \ + V(I16x8ShrS, I16x16ShrS) \ + V(I16x8ShrU, I16x16ShrU) \ + V(I32x4Shl, I32x8Shl) \ + V(I32x4ShrS, I32x8ShrS) \ + V(I32x4ShrU, I32x8ShrU) \ + V(I64x2Shl, I64x4Shl) \ + V(I64x2ShrU, I64x4ShrU) + +#define SIMD256_TERNARY_OP(V) V(S128Select, S256Select) + #include "src/compiler/turboshaft/define-assembler-macros.inc" class NodeGroup { @@ -57,7 +191,7 @@ class NodeGroup { // are mutually independent. class PackNode : public NON_EXPORTED_BASE(ZoneObject) { public: - PackNode(const NodeGroup& node_group) + explicit PackNode(const NodeGroup& node_group) : nodes_(node_group), revectorized_node_() {} NodeGroup Nodes() const { return nodes_; } bool IsSame(const NodeGroup& node_group) const { @@ -92,9 +226,6 @@ class SLPTree : public NON_EXPORTED_BASE(ZoneObject) { void Print(const char* info); - template - void ForEach(FunctionType callback); - private: // This is the recursive part of BuildTree. PackNode* BuildTreeRec(const NodeGroup& node_group, unsigned depth); @@ -108,6 +239,7 @@ class SLPTree : public NON_EXPORTED_BASE(ZoneObject) { bool IsSideEffectFree(OpIndex first, OpIndex second); bool CanBePacked(const NodeGroup& node_group); + bool IsEqual(const OpIndex node0, const OpIndex node1); Graph& graph() const { return graph_; } Zone* zone() const { return phase_zone_; } @@ -128,7 +260,8 @@ class WasmRevecAnalyzer { store_seeds_(zone), slp_tree_(nullptr), revectorizable_node_(zone), - should_reduce_(false) { + should_reduce_(false), + use_map_(nullptr) { Run(); } @@ -137,8 +270,37 @@ class WasmRevecAnalyzer { bool CanMergeSLPTrees(); bool ShouldReduce() const { return should_reduce_; } + PackNode* GetPackNode(const OpIndex ig_index) { + auto itr = revectorizable_node_.find(ig_index); + if (itr != revectorizable_node_.end()) { + return itr->second; + } + return nullptr; + } + + const OpIndex GetReduced(const OpIndex node) { + auto pnode = GetPackNode(node); + if (!pnode) { + return OpIndex::Invalid(); + } + return pnode->RevectorizedNode(); + } + + const Operation& GetStartOperation(const PackNode* pnode, const OpIndex node, + const Operation& op) { + DCHECK(pnode); + OpIndex start = pnode->Nodes()[0]; + if (start == node) return op; + return graph_.Get(start); + } + + base::Vector uses(OpIndex node) { + return use_map_->uses(node); + } + private: void ProcessBlock(const Block& block); + bool DecideVectorize(); Graph& graph_; Zone* phase_zone_; @@ -148,6 +310,7 @@ class WasmRevecAnalyzer { SLPTree* slp_tree_; ZoneUnorderedMap revectorizable_node_; bool should_reduce_; + SimdUseMap* use_map_; }; template @@ -155,7 +318,242 @@ class WasmRevecReducer : public Next { public: TURBOSHAFT_REDUCER_BOILERPLATE(WasmRevec) + OpIndex GetExtractOpIfNeeded(PackNode* pnode, OpIndex ig_index, + OpIndex og_index) { + uint8_t lane = 0; + for (; lane < static_cast(pnode->Nodes().size()); lane++) { + if (pnode->Nodes()[lane] == ig_index) break; + } + + for (auto use : analyzer_.uses(ig_index)) { + if (!analyzer_.GetPackNode(use)) { + OpIndex extract_128 = __ Simd256Extract128Lane(og_index, lane); + return extract_128; + } + } + + return OpIndex::Invalid(); + } + + V REDUCE_INPUT_GRAPH(Simd128LoadTransform)( + V ig_index, const Simd128LoadTransformOp& load_transform) { + if (auto pnode = analyzer_.GetPackNode(ig_index)) { + V og_index = pnode->RevectorizedNode(); + // Skip revectorized node. + if (!og_index.valid()) { + auto base = __ MapToNewGraph(load_transform.base()); + auto index = __ MapToNewGraph(load_transform.index()); + auto offset = load_transform.offset; + DCHECK_EQ(load_transform.offset, 0); + + og_index = __ Simd256LoadTransform( + base, index, load_transform.load_kind, + Get256LoadTransformKindFrom128(load_transform.transform_kind), + offset); + pnode->SetRevectorizedNode(og_index); + } + return GetExtractOpIfNeeded(pnode, ig_index, og_index); + } + + return Next::ReduceInputGraphSimd128LoadTransform(ig_index, load_transform); + } + + OpIndex REDUCE_INPUT_GRAPH(Load)(OpIndex ig_index, const LoadOp& load) { + if (auto pnode = analyzer_.GetPackNode(ig_index)) { + OpIndex og_index = pnode->RevectorizedNode(); + + // Emit revectorized op. + if (!og_index.valid()) { + const LoadOp* start = analyzer_.GetStartOperation(pnode, ig_index, load) + .TryCast(); + DCHECK_EQ(start->base(), load.base()); + + auto base = __ MapToNewGraph(start->base()); + auto index = __ MapToNewGraph(start->index()); + og_index = __ Load(base, index, load.kind, + MemoryRepresentation::Simd256(), start->offset); + pnode->SetRevectorizedNode(og_index); + } + + // Emit extract op if needed. + return GetExtractOpIfNeeded(pnode, ig_index, og_index); + } + + // no_change + return Next::ReduceInputGraphLoad(ig_index, load); + } + + OpIndex REDUCE_INPUT_GRAPH(Store)(OpIndex ig_index, const StoreOp& store) { + if (auto pnode = analyzer_.GetPackNode(ig_index)) { + OpIndex og_index = pnode->RevectorizedNode(); + + // Emit revectorized op. + if (!og_index.valid()) { + const StoreOp* start = + (analyzer_.GetStartOperation(pnode, ig_index, store)) + .TryCast(); + DCHECK_EQ(start->base(), store.base()); + + auto base = __ MapToNewGraph(start->base()); + auto index = __ MapToNewGraph(start->index()); + OpIndex value = analyzer_.GetReduced(start->value()); + DCHECK(value.valid()); + + __ Store(base, index, value, store.kind, + MemoryRepresentation::Simd256(), store.write_barrier, + start->offset); + + // Set an arbitrary valid opindex here to skip reduce later. + pnode->SetRevectorizedNode(ig_index); + } + + // No extract op needed for Store. + return OpIndex::Invalid(); + } + + // no_change + return Next::ReduceInputGraphStore(ig_index, store); + } + + OpIndex REDUCE_INPUT_GRAPH(Simd128Unary)(OpIndex ig_index, + const Simd128UnaryOp& unary) { + if (auto pnode = analyzer_.GetPackNode(ig_index)) { + OpIndex og_index = pnode->RevectorizedNode(); + // Skip revectorized node. + if (!og_index.valid()) { + auto input = analyzer_.GetReduced(unary.input()); + og_index = __ Simd256Unary(V::Cast(input), + GetSimd256UnaryKind(unary.kind)); + pnode->SetRevectorizedNode(og_index); + } + return GetExtractOpIfNeeded(pnode, ig_index, og_index); + } + return Next::ReduceInputGraphSimd128Unary(ig_index, unary); + } + + OpIndex REDUCE_INPUT_GRAPH(Simd128Binop)(OpIndex ig_index, + const Simd128BinopOp& op) { + if (auto pnode = analyzer_.GetPackNode(ig_index)) { + OpIndex og_index = pnode->RevectorizedNode(); + // Skip revectorized node. + if (!og_index.valid()) { + auto left = analyzer_.GetReduced(op.left()); + auto right = analyzer_.GetReduced(op.right()); + og_index = __ Simd256Binop(left, right, GetSimd256BinOpKind(op.kind)); + pnode->SetRevectorizedNode(og_index); + } + return GetExtractOpIfNeeded(pnode, ig_index, og_index); + } + + // no_change + return Next::ReduceInputGraphSimd128Binop(ig_index, op); + } + + OpIndex REDUCE_INPUT_GRAPH(Simd128Shift)(OpIndex ig_index, + const Simd128ShiftOp& op) { + if (auto pnode = analyzer_.GetPackNode(ig_index)) { + OpIndex og_index = pnode->RevectorizedNode(); + // Skip revectorized node. + if (!og_index.valid()) { + V input = analyzer_.GetReduced(op.input()); + DCHECK(input.valid()); + V shift = __ MapToNewGraph(op.shift()); + og_index = + __ Simd256Shift(input, shift, GetSimd256ShiftOpKind(op.kind)); + pnode->SetRevectorizedNode(og_index); + } + return GetExtractOpIfNeeded(pnode, ig_index, og_index); + } + + // no_change + return Next::ReduceInputGraphSimd128Shift(ig_index, op); + } + + OpIndex REDUCE_INPUT_GRAPH(Simd128Ternary)(OpIndex ig_index, + const Simd128TernaryOp& ternary) { + if (auto pnode = analyzer_.GetPackNode(ig_index)) { + OpIndex og_index = pnode->RevectorizedNode(); + // Skip revectorized node. + if (!og_index.valid()) { + V first = analyzer_.GetReduced(ternary.first()); + V second = analyzer_.GetReduced(ternary.second()); + V third = analyzer_.GetReduced(ternary.third()); + + og_index = __ Simd256Ternary(first, second, third, + GetSimd256TernaryKind(ternary.kind)); + + pnode->SetRevectorizedNode(og_index); + } + + return GetExtractOpIfNeeded(pnode, ig_index, og_index); + } + return Next::ReduceInputGraphSimd128Ternary(ig_index, ternary); + } + private: + static Simd256UnaryOp::Kind GetSimd256UnaryKind( + Simd128UnaryOp::Kind simd128_kind) { + switch (simd128_kind) { +#define UNOP_KIND_MAPPING(from, to) \ + case Simd128UnaryOp::Kind::k##from: \ + return Simd256UnaryOp::Kind::k##to; + SIMD256_UNARY_OP(UNOP_KIND_MAPPING) +#undef UNOP_KIND_MAPPING + default: + UNIMPLEMENTED(); + } + } + + static Simd256BinopOp::Kind GetSimd256BinOpKind(Simd128BinopOp::Kind kind) { + switch (kind) { +#define BINOP_KIND_MAPPING(from, to) \ + case Simd128BinopOp::Kind::k##from: \ + return Simd256BinopOp::Kind::k##to; + SIMD256_BINOP_SIMPLE_OP(BINOP_KIND_MAPPING) +#undef BINOP_KIND_MAPPING + default: + UNIMPLEMENTED(); + } + } + + static Simd256ShiftOp::Kind GetSimd256ShiftOpKind(Simd128ShiftOp::Kind kind) { + switch (kind) { +#define SHIFT_KIND_MAPPING(from, to) \ + case Simd128ShiftOp::Kind::k##from: \ + return Simd256ShiftOp::Kind::k##to; + SIMD256_SHIFT_OP(SHIFT_KIND_MAPPING) +#undef SHIFT_KIND_MAPPING + default: + UNIMPLEMENTED(); + } + } + + static Simd256TernaryOp::Kind GetSimd256TernaryKind( + Simd128TernaryOp::Kind simd128_kind) { + switch (simd128_kind) { +#define TERNARY_KIND_MAPPING(from, to) \ + case Simd128TernaryOp::Kind::k##from: \ + return Simd256TernaryOp::Kind::k##to; + SIMD256_TERNARY_OP(TERNARY_KIND_MAPPING) +#undef TERNARY_KIND_MAPPING + default: + UNIMPLEMENTED(); + } + } + + static Simd256LoadTransformOp::TransformKind Get256LoadTransformKindFrom128( + Simd128LoadTransformOp::TransformKind simd128_kind) { + switch (simd128_kind) { +#define TRANSFORM_KIND_MAPPING(from, to) \ + case Simd128LoadTransformOp::TransformKind::k##from: \ + return Simd256LoadTransformOp::TransformKind::k##to; + SIMD256_LOADTRANSFORM_OP(TRANSFORM_KIND_MAPPING) +#undef TRANSFORM_KIND_MAPPING + default: + UNREACHABLE(); + } + } + const wasm::WasmModule* module_ = PipelineData::Get().wasm_module(); WasmRevecAnalyzer analyzer_ = *PipelineData::Get().wasm_revec_analyzer(); }; diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h index 3cf6fb18403298..0c6fb7d9ff1fd4 100644 --- a/deps/v8/src/compiler/type-cache.h +++ b/deps/v8/src/compiler/type-cache.h @@ -42,6 +42,7 @@ class V8_EXPORT_PRIVATE TypeCache final { Type::Union(kDoubleRepresentableInt64, Type::MinusZero(), zone()); Type const kDoubleRepresentableUint64 = CreateRange( std::numeric_limits::min(), kMaxDoubleRepresentableUint64); + Type const kFloat16 = Type::Number(); Type const kFloat32 = Type::Number(); Type const kFloat64 = Type::Number(); Type const kBigInt64 = Type::SignedBigInt64(); diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc index 3ea390c5558938..859213ca432cc0 100644 --- a/deps/v8/src/compiler/typed-optimization.cc +++ b/deps/v8/src/compiler/typed-optimization.cc @@ -50,6 +50,8 @@ Reduction TypedOptimization::Reduce(Node* node) { return ReduceCheckNumber(node); case IrOpcode::kCheckString: return ReduceCheckString(node); + case IrOpcode::kCheckStringOrStringWrapper: + return ReduceCheckStringOrStringWrapper(node); case IrOpcode::kCheckEqualsInternalizedString: return ReduceCheckEqualsInternalizedString(node); case IrOpcode::kCheckEqualsSymbol: @@ -265,6 +267,16 @@ Reduction TypedOptimization::ReduceCheckString(Node* node) { return NoChange(); } +Reduction TypedOptimization::ReduceCheckStringOrStringWrapper(Node* node) { + Node* const input = NodeProperties::GetValueInput(node, 0); + Type const input_type = NodeProperties::GetType(input); + if (input_type.Is(Type::StringOrStringWrapper())) { + ReplaceWithValue(node, input); + return Replace(input); + } + return NoChange(); +} + Reduction TypedOptimization::ReduceCheckEqualsInternalizedString(Node* node) { Node* const exp = NodeProperties::GetValueInput(node, 0); Type const exp_type = NodeProperties::GetType(exp); @@ -454,7 +466,7 @@ Reduction TypedOptimization:: } break; case IrOpcode::kStringLessThan: - V8_FALLTHROUGH; + [[fallthrough]]; case IrOpcode::kStringLessThanOrEqual: if (string.length() == 0) { // String.fromCharCode(x) <= "" is always false, diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h index d9b8e3e31127a1..1f3ba402a65eb6 100644 --- a/deps/v8/src/compiler/typed-optimization.h +++ b/deps/v8/src/compiler/typed-optimization.h @@ -44,6 +44,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final Reduction ReduceCheckMaps(Node* node); Reduction ReduceCheckNumber(Node* node); Reduction ReduceCheckString(Node* node); + Reduction ReduceCheckStringOrStringWrapper(Node* node); Reduction ReduceCheckEqualsInternalizedString(Node* node); Reduction ReduceCheckEqualsSymbol(Node* node); Reduction ReduceLoadField(Node* node); diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc index 9c86d40535a09f..9a346d134b9557 100644 --- a/deps/v8/src/compiler/typer.cc +++ b/deps/v8/src/compiler/typer.cc @@ -707,7 +707,7 @@ Type Typer::Visitor::ToNumeric(Type type, Typer* t) { Type Typer::Visitor::ToObject(Type type, Typer* t) { // ES6 section 7.1.13 ToObject ( argument ) if (type.Is(Type::Receiver())) return type; - if (type.Is(Type::Primitive())) return Type::OtherObject(); + if (type.Is(Type::Primitive())) return Type::StringWrapperOrOtherObject(); if (!type.Maybe(Type::OtherUndetectable())) { return Type::DetectableReceiver(); } @@ -2365,6 +2365,11 @@ Type Typer::Visitor::TypeCheckString(Node* node) { return Type::Intersect(arg, Type::String(), zone()); } +Type Typer::Visitor::TypeCheckStringOrStringWrapper(Node* node) { + Type arg = Operand(node, 0); + return Type::Intersect(arg, Type::StringOrStringWrapper(), zone()); +} + Type Typer::Visitor::TypeCheckSymbol(Node* node) { Type arg = Operand(node, 0); return Type::Intersect(arg, Type::Symbol(), zone()); diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc index 2e1da6d9902cd3..f534f1614a464a 100644 --- a/deps/v8/src/compiler/types.cc +++ b/deps/v8/src/compiler/types.cc @@ -8,6 +8,7 @@ #include "src/compiler/js-heap-broker.h" #include "src/numbers/conversions-inl.h" +#include "src/objects/elements-kind.h" #include "src/objects/instance-type.h" #include "src/objects/turbofan-types.h" #include "src/utils/ostreams.h" @@ -226,7 +227,16 @@ Type::bitset BitsetType::Lub(MapRefLike map, JSHeapBroker* broker) { return kOtherObject; case JS_ARRAY_TYPE: return kArray; - case JS_PRIMITIVE_WRAPPER_TYPE: + case JS_PRIMITIVE_WRAPPER_TYPE: { + DCHECK(!map.is_callable()); + DCHECK(!map.is_undetectable()); + auto elements_kind = map.elements_kind(); + if (elements_kind == ElementsKind::FAST_STRING_WRAPPER_ELEMENTS || + elements_kind == ElementsKind::SLOW_STRING_WRAPPER_ELEMENTS) { + return kStringWrapper; + } + return kOtherObject; + } case JS_MESSAGE_OBJECT_TYPE: case JS_DATE_TYPE: #ifdef V8_INTL_SUPPORT @@ -918,6 +928,10 @@ Type Type::Constant(JSHeapBroker* broker, ObjectRef ref, Zone* zone) { if (ref.IsString() && !ref.IsInternalizedString()) { return Type::String(); } + if (ref.IsJSPrimitiveWrapper() && + ref.AsJSPrimitiveWrapper().IsStringWrapper(broker)) { + return Type::StringWrapper(); + } if (ref.HoleType() != HoleType::kNone) { return Type::Hole(); } diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h index 12348c9722feb2..25a99f8d1f7a39 100644 --- a/deps/v8/src/compiler/types.h +++ b/deps/v8/src/compiler/types.h @@ -142,7 +142,8 @@ namespace compiler { // turbofan-types.tq uses two 32bit bitfield structs. #define PROPER_ATOMIC_BITSET_TYPE_HIGH_LIST(V) \ V(Machine, uint64_t{1} << 32) \ - V(Hole, uint64_t{1} << 33) + V(Hole, uint64_t{1} << 33) \ + V(StringWrapper, uint64_t{1} << 34) #define PROPER_BITSET_TYPE_LIST(V) \ V(None, uint64_t{0}) \ @@ -172,6 +173,7 @@ namespace compiler { kOtherBigInt) \ V(Numeric, kNumber | kBigInt) \ V(String, kInternalizedString | kOtherString) \ + V(StringOrStringWrapper, kString | kStringWrapper) \ V(UniqueName, kSymbol | kInternalizedString) \ V(Name, kSymbol | kString) \ V(InternalizedStringOrNull, kInternalizedString | kNull) \ @@ -194,14 +196,17 @@ namespace compiler { V(Proxy, kCallableProxy | kOtherProxy) \ V(ArrayOrOtherObject, kArray | kOtherObject) \ V(ArrayOrProxy, kArray | kProxy) \ + V(StringWrapperOrOtherObject, kStringWrapper | kOtherObject) \ V(Function, kCallableFunction | kClassConstructor) \ V(DetectableCallable, kFunction | kBoundFunction | \ kOtherCallable | kCallableProxy) \ V(Callable, kDetectableCallable | kOtherUndetectable) \ - V(NonCallable, kArray | kOtherObject | kOtherProxy) \ + V(NonCallable, kArray | kStringWrapper | kOtherObject | \ + kOtherProxy) \ V(NonCallableOrNull, kNonCallable | kNull) \ V(DetectableObject, kArray | kFunction | kBoundFunction | \ - kOtherCallable | kOtherObject) \ + kStringWrapper | kOtherCallable | \ + kOtherObject) \ V(DetectableReceiver, kDetectableObject | kProxy | kWasmObject) \ V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \ V(Object, kDetectableObject | kOtherUndetectable) \ diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc index 17fe87449d1a1e..e49766be1981c0 100644 --- a/deps/v8/src/compiler/verifier.cc +++ b/deps/v8/src/compiler/verifier.cc @@ -1533,6 +1533,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckValueInputIs(node, 0, Type::Any()); CheckTypeIs(node, Type::String()); break; + case IrOpcode::kCheckStringOrStringWrapper: + CheckValueInputIs(node, 0, Type::Any()); + CheckTypeIs(node, Type::StringOrStringWrapper()); + break; case IrOpcode::kCheckSymbol: CheckValueInputIs(node, 0, Type::Any()); CheckTypeIs(node, Type::Symbol()); diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc index d28fb58961db71..16f1f1470b782c 100644 --- a/deps/v8/src/compiler/wasm-compiler.cc +++ b/deps/v8/src/compiler/wasm-compiler.cc @@ -8,7 +8,6 @@ #include "src/base/optional.h" #include "src/base/small-vector.h" -#include "src/base/v8-fallthrough.h" #include "src/base/vector.h" #include "src/codegen/assembler.h" #include "src/codegen/compiler.h" @@ -401,10 +400,9 @@ Node* WasmGraphBuilder::RefNull(wasm::ValueType type) { } Node* WasmGraphBuilder::RefFunc(uint32_t function_index) { - Node* functions = - LOAD_INSTANCE_FIELD(WasmInternalFunctions, MachineType::TaggedPointer()); + Node* func_refs = LOAD_INSTANCE_FIELD(FuncRefs, MachineType::TaggedPointer()); Node* maybe_function = - gasm_->LoadFixedArrayElementPtr(functions, function_index); + gasm_->LoadFixedArrayElementPtr(func_refs, function_index); auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer); auto create_funcref = gasm_->MakeDeferredLabel(); // We only care to distinguish between zero and funcref, "IsI31" is close @@ -1377,14 +1375,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore( case wasm::kF64: value = gasm_->BitcastFloat64ToInt64(node); isFloat = true; - V8_FALLTHROUGH; + [[fallthrough]]; case wasm::kI64: result = Int64Constant(0); break; case wasm::kF32: value = gasm_->BitcastFloat32ToInt32(node); isFloat = true; - V8_FALLTHROUGH; + [[fallthrough]]; case wasm::kI32: result = Int32Constant(0); break; @@ -1495,14 +1493,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node, case MachineRepresentation::kFloat64: value = gasm_->BitcastFloat64ToInt64(node); isFloat = true; - V8_FALLTHROUGH; + [[fallthrough]]; case MachineRepresentation::kWord64: result = Int64Constant(0); break; case MachineRepresentation::kFloat32: value = gasm_->BitcastFloat32ToInt32(node); isFloat = true; - V8_FALLTHROUGH; + [[fallthrough]]; case MachineRepresentation::kWord32: case MachineRepresentation::kWord16: result = Int32Constant(0); @@ -2276,13 +2274,13 @@ Node* WasmGraphBuilder::Throw(uint32_t tag_index, const wasm::WasmTag* tag, switch (sig->GetParam(i).kind()) { case wasm::kF32: value = gasm_->BitcastFloat32ToInt32(value); - V8_FALLTHROUGH; + [[fallthrough]]; case wasm::kI32: BuildEncodeException32BitValue(values_array, &index, value); break; case wasm::kF64: value = gasm_->BitcastFloat64ToInt64(value); - V8_FALLTHROUGH; + [[fallthrough]]; case wasm::kI64: { Node* upper32 = gasm_->TruncateInt64ToInt32( Binop(wasm::kExprI64ShrU, value, Int64Constant(32))); @@ -2871,13 +2869,14 @@ Node* WasmGraphBuilder::BuildImportCall( base::Vector rets, wasm::WasmCodePosition position, Node* func_index, IsReturnCall continuation, Node* frame_state) { // Load the imported function refs array from the instance. - Node* imported_function_refs = - LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer()); + Node* imported_function_refs = gasm_->LoadProtectedPointerFromObject( + GetInstanceData(), + wasm::ObjectAccess::ToTagged( + WasmTrustedInstanceData::kImportedFunctionRefsOffset)); // Access fixed array at {header_size - tag + func_index * kTaggedSize}. Node* func_index_intptr = gasm_->BuildChangeUint32ToUintPtr(func_index); - Node* ref_node = gasm_->LoadFixedArrayElement( - imported_function_refs, func_index_intptr, MachineType::TaggedPointer()); - Node* first_param = LoadTrustedDataFromMaybeInstanceObject(ref_node); + Node* ref = gasm_->LoadProtectedFixedArrayElement(imported_function_refs, + func_index_intptr); // Load the target from the imported_targets array at the offset of // {func_index}. @@ -2888,16 +2887,16 @@ Node* WasmGraphBuilder::BuildImportCall( wasm::ObjectAccess::ToTagged(FixedAddressArray::kHeaderSize))); Node* imported_targets = LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::TaggedPointer()); - Node* target_node = gasm_->LoadImmutableFromObject(MachineType::Pointer(), - imported_targets, offset); - args[0] = target_node; + Node* target = gasm_->LoadImmutableFromObject(MachineType::Pointer(), + imported_targets, offset); + args[0] = target; switch (continuation) { case kCallContinues: - return BuildWasmCall(sig, args, rets, position, first_param, frame_state); + return BuildWasmCall(sig, args, rets, position, ref, frame_state); case kReturnCall: DCHECK(rets.empty()); - return BuildWasmReturnCall(sig, args, position, first_param); + return BuildWasmReturnCall(sig, args, position, ref); } } @@ -3060,11 +3059,10 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, gasm_->Word32Equal(loaded_sig, Int32Constant(-1)), position); } - Node* target_ref = gasm_->LoadFromObject( - MachineType::TaggedPointer(), dispatch_table, + Node* target_ref = gasm_->LoadProtectedPointerFromObject( + dispatch_table, gasm_->IntAdd(dispatch_table_entry_offset, gasm_->IntPtrConstant(WasmDispatchTable::kRefBias))); - Node* first_param = LoadTrustedDataFromMaybeInstanceObject(target_ref); Node* target = gasm_->LoadFromObject( MachineType::Pointer(), dispatch_table, @@ -3076,9 +3074,9 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, switch (continuation) { case kCallContinues: - return BuildWasmCall(sig, args, rets, position, first_param); + return BuildWasmCall(sig, args, rets, position, target_ref); case kReturnCall: - return BuildWasmReturnCall(sig, args, position, first_param); + return BuildWasmReturnCall(sig, args, position, target_ref); } } @@ -3097,7 +3095,8 @@ Node* WasmGraphBuilder::BuildLoadCodeEntrypointViaCodePointer(Node* object, Node* table = gasm_->ExternalConstant(ExternalReference::code_pointer_table_address()); - return gasm_->Load(MachineType::Pointer(), table, table_offset); + Node* entry = gasm_->Load(MachineType::Pointer(), table, table_offset); + return gasm_->WordXor(entry, gasm_->IntPtrConstant(kWasmEntrypointTag)); #else // In this case we have to load the Code object, then load its entrypoint. Node* code_object = @@ -3127,33 +3126,38 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig, CheckForNull null_check, IsReturnCall continuation, wasm::WasmCodePosition position) { + Node* func_ref = args[0]; if (null_check == kWithNullCheck && null_check_strategy_ == NullCheckStrategy::kExplicit) { - args[0] = - AssertNotNull(args[0], wasm::kWasmFuncRef /* good enough */, position); + func_ref = + AssertNotNull(func_ref, wasm::kWasmFuncRef /* good enough */, position); } - Node* function = args[0]; - - auto load_target = gasm_->MakeLabel(); auto end_label = gasm_->MakeLabel(MachineType::PointerRepresentation()); - Node* ref_node = - null_check == kWithNullCheck && - null_check_strategy_ == NullCheckStrategy::kTrapHandler - ? gasm_->LoadTrapOnNull( - MachineType::TaggedPointer(), function, - gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged( - WasmInternalFunction::kRefOffset))) - : gasm_->LoadImmutableFromObject( - MachineType::TaggedPointer(), function, - wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset)); - SetSourcePosition(ref_node, position); - - Node* first_param = LoadTrustedDataFromMaybeInstanceObject(ref_node); + Node* internal_function; + if (null_check == kWithNullCheck && + null_check_strategy_ == NullCheckStrategy::kTrapHandler) { + // TODO(14564): Move WasmInternalFunction to trusted space and make + // this a load of a trusted (immutable) pointer. + internal_function = gasm_->LoadTrapOnNull( + MachineType::TaggedPointer(), func_ref, + gasm_->IntPtrConstant( + wasm::ObjectAccess::ToTagged(WasmFuncRef::kInternalOffset))); + SetSourcePosition(internal_function, position); + } else { + internal_function = gasm_->LoadImmutableFromObject( + MachineType::TaggedPointer(), func_ref, + gasm_->IntPtrConstant( + wasm::ObjectAccess::ToTagged(WasmFuncRef::kInternalOffset))); + } + Node* ref = gasm_->LoadTrustedPointerFromObject( + internal_function, + wasm::ObjectAccess::ToTagged(WasmInternalFunction::kIndirectRefOffset), + kUnknownIndirectPointerTag); Node* target = gasm_->BuildLoadExternalPointerFromObject( - function, WasmInternalFunction::kCallTargetOffset, + internal_function, WasmInternalFunction::kCallTargetOffset, kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot()); Node* is_null_target = gasm_->WordEqual(target, gasm_->IntPtrConstant(0)); gasm_->GotoIfNot(is_null_target, &end_label, target); @@ -3161,7 +3165,7 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig, // Compute the call target from the (on-heap) wrapper code. The cached // target can only be null for WasmJSFunctions. Node* call_target = BuildLoadCodeEntrypointViaCodePointer( - function, WasmInternalFunction::kCodeOffset); + internal_function, WasmInternalFunction::kCodeOffset); gasm_->Goto(&end_label, call_target); } @@ -3170,45 +3174,27 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig, args[0] = end_label.PhiAt(0); Node* call = continuation == kCallContinues - ? BuildWasmCall(sig, args, rets, position, first_param) - : BuildWasmReturnCall(sig, args, position, first_param); + ? BuildWasmCall(sig, args, rets, position, ref) + : BuildWasmReturnCall(sig, args, position, ref); return call; } -Node* WasmGraphBuilder::LoadTrustedDataFromMaybeInstanceObject( - Node* maybe_instance_object) { - // If the "ref" is a WasmInstanceObject, load the WasmTrustedInstanceData from - // it. - auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer); - Node* instance_type = - gasm_->LoadInstanceType(gasm_->LoadMap(maybe_instance_object)); - gasm_->GotoIfNot( - gasm_->Word32Equal(instance_type, - gasm_->Uint32Constant(WASM_INSTANCE_OBJECT_TYPE)), - &done, maybe_instance_object); - gasm_->Goto(&done, - gasm_->LoadTrustedDataFromInstanceObject(maybe_instance_object)); - gasm_->Bind(&done); - return done.PhiAt(0); -} - -void WasmGraphBuilder::CompareToInternalFunctionAtIndex(Node* func_ref, - uint32_t function_index, - Node** success_control, - Node** failure_control, - bool is_last_case) { +void WasmGraphBuilder::CompareToFuncRefAtIndex(Node* func_ref, + uint32_t function_index, + Node** success_control, + Node** failure_control, + bool is_last_case) { // Since we are comparing to a function reference, it is guaranteed that // instance->wasm_internal_functions() has been initialized. - Node* internal_functions = gasm_->LoadImmutable( + Node* func_refs = gasm_->LoadImmutable( MachineType::TaggedPointer(), GetInstanceData(), - wasm::ObjectAccess::ToTagged( - WasmTrustedInstanceData::kWasmInternalFunctionsOffset)); + wasm::ObjectAccess::ToTagged(WasmTrustedInstanceData::kFuncRefsOffset)); // We cannot use an immutable load here, since function references are // initialized lazily: Calling {RefFunc()} between two invocations of this // function may initialize the function, i.e. mutate the object we are // loading. Node* function_ref_at_index = gasm_->LoadFixedArrayElement( - internal_functions, gasm_->IntPtrConstant(function_index), + func_refs, gasm_->IntPtrConstant(function_index), MachineType::AnyTagged()); BranchHint hint = is_last_case ? BranchHint::kTrue : BranchHint::kNone; gasm_->Branch(gasm_->TaggedEqual(function_ref_at_index, func_ref), @@ -6200,9 +6186,11 @@ Node* WasmGraphBuilder::StringNewWtf8Array(unibrow::Utf8Variant variant, position); Node* segment_offset_smi = gasm_->BuildChangeInt32ToSmi(segment_offset); Node* segment_length = NodeProperties::GetValueInput(array, 3); + Node* variant_smi = gasm_->SmiConstant(static_cast(variant)); return gasm_->CallBuiltin(Builtin::kWasmStringFromDataSegment, - Operator::kEliminatable, segment_length, start, - end, segment_index_smi, segment_offset_smi); + Operator::Operator::kNoDeopt | Operator::kNoThrow, + segment_length, start, end, segment_index_smi, + segment_offset_smi, variant_smi); } // Regular path if the shortcut wasn't taken. @@ -7167,7 +7155,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { case wasm::kF64: return BuildChangeFloat64ToNumber(node); case wasm::kRef: - switch (type.heap_representation()) { + switch (type.heap_representation_non_shared()) { case wasm::HeapType::kEq: case wasm::HeapType::kI31: case wasm::HeapType::kStruct: @@ -7188,13 +7176,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { UNREACHABLE(); case wasm::HeapType::kFunc: default: - if (type.heap_representation() == wasm::HeapType::kFunc || + if (type.heap_representation_non_shared() == + wasm::HeapType::kFunc || module_->has_signature(type.ref_index())) { - // Typed function. Extract the external function. + // Function reference. Extract the external function. auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer); - Node* maybe_external = gasm_->LoadFromObject( + Node* internal = gasm_->LoadFromObject( MachineType::TaggedPointer(), node, + wasm::ObjectAccess::ToTagged(WasmFuncRef::kInternalOffset)); + Node* maybe_external = gasm_->LoadFromObject( + MachineType::TaggedPointer(), internal, wasm::ObjectAccess::ToTagged( WasmInternalFunction::kExternalOffset)); gasm_->GotoIfNot( @@ -7202,7 +7194,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { maybe_external); Node* from_builtin = gasm_->CallBuiltin( Builtin::kWasmInternalFunctionCreateExternal, - Operator::kNoProperties, node, context); + Operator::kNoProperties, internal, context); gasm_->Goto(&done, from_builtin); gasm_->Bind(&done); return done.PhiAt(0); @@ -7211,7 +7203,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } } case wasm::kRefNull: - switch (type.heap_representation()) { + switch (type.heap_representation_non_shared()) { case wasm::HeapType::kExtern: case wasm::HeapType::kNoExtern: case wasm::HeapType::kExn: @@ -7234,14 +7226,19 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } case wasm::HeapType::kFunc: default: { - if (type == wasm::kWasmFuncRef || + if (type.heap_representation_non_shared() == + wasm::HeapType::kFunc || module_->has_signature(type.ref_index())) { + // Function reference. Extract the external function. auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer); auto null_label = gasm_->MakeLabel(); gasm_->GotoIf(IsNull(node, type), &null_label); - Node* maybe_external = gasm_->LoadFromObject( + Node* internal = gasm_->LoadFromObject( MachineType::TaggedPointer(), node, + wasm::ObjectAccess::ToTagged(WasmFuncRef::kInternalOffset)); + Node* maybe_external = gasm_->LoadFromObject( + MachineType::TaggedPointer(), internal, wasm::ObjectAccess::ToTagged( WasmInternalFunction::kExternalOffset)); gasm_->GotoIfNot( @@ -7249,7 +7246,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { maybe_external); Node* from_builtin = gasm_->CallBuiltin( Builtin::kWasmInternalFunctionCreateExternal, - Operator::kNoProperties, node, context); + Operator::kNoProperties, internal, context); gasm_->Goto(&done, from_builtin); gasm_->Bind(&null_label); gasm_->Goto(&done, LOAD_ROOT(NullValue, null_value)); @@ -7324,7 +7321,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { switch (type.kind()) { case wasm::kRef: case wasm::kRefNull: { - switch (type.heap_representation()) { + switch (type.heap_representation_non_shared()) { // TODO(14034): Add more fast paths? case wasm::HeapType::kExtern: case wasm::HeapType::kNoExtern: @@ -7344,7 +7341,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { gasm_->Unreachable(); gasm_->Bind(&done); - return input; } return input; case wasm::HeapType::kString: @@ -7543,11 +7539,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { args[0] = gasm_->BuildLoadExternalPointerFromObject( internal, WasmInternalFunction::kCallTargetOffset, kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot()); - Node* instance_object = gasm_->LoadFromObject( - MachineType::TaggedPointer(), internal, - wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset)); - Node* instance_data = - gasm_->LoadTrustedDataFromInstanceObject(instance_object); + Node* instance_data = gasm_->LoadTrustedPointerFromObject( + internal, + wasm::ObjectAccess::ToTagged( + WasmInternalFunction::kIndirectRefOffset), + kUnknownIndirectPointerTag); BuildWasmCall(sig_, base::VectorOf(args), base::VectorOf(rets), wasm::kNoCodePosition, instance_data, frame_state); } @@ -7792,9 +7788,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* call_target = GetTargetForBuiltinCall(Builtin::kWasmSuspend); // Trap if there is any JS frame on the stack. Node* has_js_frames = gasm_->Load( - MachineType::Int32(), suspender, + MachineType::TaggedSigned(), suspender, wasm::ObjectAccess::ToTagged(WasmSuspenderObject::kHasJsFramesOffset)); - Node* cond = gasm_->Word32Equal(Int32Constant(0), has_js_frames); + Node* cond = gasm_->TaggedEqual(gasm_->SmiConstant(0), has_js_frames); auto suspend = gasm_->MakeLabel(); gasm_->GotoIf(cond, &suspend); // {ThrowWasmError} expects to be called from wasm code, so set the @@ -7903,6 +7899,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { args[pos++] = control(); DCHECK_EQ(pos, args.size()); Node* call = gasm_->Call(call_descriptor, pos, args.begin()); + // For asm.js the error location can differ depending on whether an + // exception was thrown in imported JS code or an exception was thrown in + // the ToNumber builtin that converts the result of the JS code a + // WebAssembly value. The source position allows asm.js to determine the + // correct error location. Source position 1 encodes the call to ToNumber, + // source position 0 encodes the call to the imported JS code. + SetSourcePosition(call, 0); gasm_->Goto(&end, call); gasm_->Bind(&do_switch); @@ -7911,6 +7914,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { args[pos - 1] = control(); call = gasm_->Call(call_descriptor, pos, args.begin()); + // See comment above. + SetSourcePosition(call, 0); BuildSwitchBackFromCentralStack(old_sp, callable_node); gasm_->Goto(&end, call); @@ -7953,37 +7958,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // ======================================================================= // === JS Functions with matching arity ================================== // ======================================================================= - case wasm::ImportCallKind::kJSFunctionArityMatch: { - base::SmallVector args(wasm_count + 7 - suspend); - int pos = 0; - Node* function_context = - gasm_->LoadContextFromJSFunction(callable_node); - args[pos++] = callable_node; // target callable. - - // Determine receiver at runtime. - args[pos++] = - BuildReceiverNode(callable_node, native_context, undefined_node); - - auto call_descriptor = Linkage::GetJSCallDescriptor( - graph()->zone(), false, wasm_count + 1 - suspend, - CallDescriptor::kNoFlags); - - // Convert wasm numbers to JS values. - pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_, - native_context, suspend); - - args[pos++] = undefined_node; // new target - args[pos++] = Int32Constant( - JSParameterCount(wasm_count - suspend)); // argument count - args[pos++] = function_context; - - call = - BuildCallOnCentralStack(args, pos, call_descriptor, callable_node); - break; - } - // ======================================================================= - // === JS Functions with mismatching arity =============================== - // ======================================================================= + case wasm::ImportCallKind::kJSFunctionArityMatch: + DCHECK_EQ(expected_arity, wasm_count - suspend); + [[fallthrough]]; case wasm::ImportCallKind::kJSFunctionArityMismatch: { int pushed_count = std::max(expected_arity, wasm_count - suspend); base::SmallVector args(pushed_count + 7); @@ -8051,14 +8028,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } DCHECK_NOT_NULL(call); - // For asm.js the error location can differ depending on whether an - // exception was thrown in imported JS code or an exception was thrown in - // the ToNumber builtin that converts the result of the JS code a - // WebAssembly value. The source position allows asm.js to determine the - // correct error location. Source position 1 encodes the call to ToNumber, - // source position 0 encodes the call to the imported JS code. - SetSourcePosition(call, 0); - if (suspend) { call = BuildSuspend(call, Param(1), Param(0)); } @@ -8190,23 +8159,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { if (ContainsInt64(sig_)) LowerInt64(wasm::kCalledFromWasm); } - Node* AdaptHandlifiedArgument(Node* node) { -#ifdef V8_ENABLE_DIRECT_LOCAL - // With direct locals, the argument can be passed directly. - return node; -#else - // With indirect locals, the argument has to be stored on the stack and the - // slot addressed is passed. - constexpr int kAlign = alignof(uintptr_t); - constexpr int kSize = sizeof(uintptr_t); - Node* stack_slot = gasm_->StackSlot(kSize, kAlign); - gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - stack_slot, 0, node); - return stack_slot; -#endif - } - void BuildJSFastApiCallWrapper(Handle callable) { // Here 'callable_node' must be equal to 'callable' but we cannot pass a // HeapConstant(callable) because WasmCode::Validate() fails with @@ -8260,12 +8212,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* function_template_info = gasm_->Load( MachineType::TaggedPointer(), shared_function_info, wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset)); - Node* call_code = gasm_->Load( - MachineType::TaggedPointer(), function_template_info, - wasm::ObjectAccess::ToTagged(FunctionTemplateInfo::kCallCodeOffset)); Node* api_data_argument = - gasm_->Load(MachineType::TaggedPointer(), call_code, - wasm::ObjectAccess::ToTagged(CallHandlerInfo::kDataOffset)); + gasm_->Load(MachineType::TaggedPointer(), function_template_info, + wasm::ObjectAccess::ToTagged( + FunctionTemplateInfo::kCallbackDataOffset)); FastApiCallFunctionVector fast_api_call_function_vector(mcgraph()->zone()); fast_api_call_function_vector.push_back({c_address, c_signature}); @@ -8281,11 +8231,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { CHECK(!overloads.is_valid()); if (param_index == 0) { - return AdaptHandlifiedArgument(receiver_node); + return gasm_->AdaptLocalArgument(receiver_node); } switch (c_signature->ArgumentInfo(param_index).GetType()) { case CTypeInfo::Type::kV8Value: - return AdaptHandlifiedArgument(Param(param_index)); + return gasm_->AdaptLocalArgument(Param(param_index)); default: return Param(param_index); } @@ -8374,90 +8324,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Return(call); } - void BuildJSToJSWrapper() { - int wasm_count = static_cast(sig_->parameter_count()); - - // Build the start and the parameter nodes. - int param_count = 1 /* closure */ + 1 /* receiver */ + wasm_count + - 1 /* new.target */ + 1 /* #arg */ + 1 /* context */; - Start(param_count); - Node* closure = Param(Linkage::kJSCallClosureParamIndex); - Node* context = Param(Linkage::GetJSCallContextParamIndex(wasm_count + 1)); - - // Throw a TypeError if the signature is incompatible with JavaScript. - if (!wasm::IsJSCompatibleSignature(sig_)) { - BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, context, - nullptr, 0); - TerminateThrow(effect(), control()); - return; - } - - // Load the original callable from the closure. - Node* func_data = gasm_->LoadFunctionDataFromJSFunction(closure); - Node* internal = gasm_->LoadFromObject( - MachineType::AnyTagged(), func_data, - wasm::ObjectAccess::ToTagged(WasmFunctionData::kInternalOffset)); - Node* ref = gasm_->LoadFromObject( - MachineType::AnyTagged(), internal, - wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset)); - Node* callable = gasm_->LoadFromObject( - MachineType::AnyTagged(), ref, - wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kCallableOffset)); - - // Call the underlying closure. - base::SmallVector args(wasm_count + 7); - int pos = 0; - args[pos++] = gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny); - args[pos++] = callable; - args[pos++] = - Int32Constant(JSParameterCount(wasm_count)); // argument count - args[pos++] = UndefinedValue(); // receiver - - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1, - CallDescriptor::kNoFlags, Operator::kNoProperties, - StubCallMode::kCallBuiltinPointer); - - // Convert parameter JS values to wasm numbers and back to JS values. - for (int i = 0; i < wasm_count; ++i) { - Node* param = Param(i + 1); // Start from index 1 to skip receiver. - args[pos++] = ToJS(FromJS(param, context, sig_->GetParam(i), nullptr), - sig_->GetParam(i), context); - } - - args[pos++] = context; - args[pos++] = effect(); - args[pos++] = control(); - - DCHECK_EQ(pos, args.size()); - Node* call = gasm_->Call(call_descriptor, pos, args.begin()); - - // Convert return JS values to wasm numbers and back to JS values. - Node* jsval; - if (sig_->return_count() == 0) { - jsval = UndefinedValue(); - } else if (sig_->return_count() == 1) { - jsval = ToJS(FromJS(call, context, sig_->GetReturn(), nullptr), - sig_->GetReturn(), context); - } else { - Node* fixed_array = - BuildMultiReturnFixedArrayFromIterable(sig_, call, context); - int32_t return_count = static_cast(sig_->return_count()); - Node* size = gasm_->NumberConstant(return_count); - jsval = BuildCallAllocateJSArray(size, context); - Node* result_fixed_array = gasm_->LoadJSArrayElements(jsval); - for (unsigned i = 0; i < sig_->return_count(); ++i) { - const auto& type = sig_->GetReturn(i); - Node* elem = gasm_->LoadFixedArrayElementAny(fixed_array, i); - Node* cast = ToJS(FromJS(elem, context, type, nullptr), type, context); - gasm_->StoreFixedArrayElementAny(result_fixed_array, i, cast); - } - } - Return(jsval); - - if (ContainsInt64(sig_)) LowerInt64(wasm::kCalledFromJS); - } - void BuildCWasmEntry() { // +1 offset for first parameter index being -1. Start(CWasmEntryParameters::kNumParameters + 1); @@ -8573,8 +8439,10 @@ std::unique_ptr NewJSToWasmCompilationJob( std::unique_ptr debug_name = WasmExportedFunction::GetDebugName(sig); if (v8_flags.turboshaft_wasm_wrappers) { return Pipeline::NewWasmTurboshaftWrapperCompilationJob( - isolate, sig, is_import, module, CodeKind::JS_TO_WASM_FUNCTION, - std::move(debug_name), WasmAssemblerOptions()); + isolate, sig, + wasm::WrapperCompilationInfo{.code_kind = CodeKind::JS_TO_WASM_FUNCTION, + .is_import = is_import}, + module, std::move(debug_name), WasmAssemblerOptions()); } else { std::unique_ptr zone = std::make_unique( wasm::GetWasmEngine()->allocator(), ZONE_NAME, kCompressGraphZone); @@ -8725,6 +8593,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( if (v8_flags.wasm_math_intrinsics && kind >= wasm::ImportCallKind::kFirstMathIntrinsic && kind <= wasm::ImportCallKind::kLastMathIntrinsic) { + // TODO(thibaudm): Port to Turboshaft. return CompileWasmMathIntrinsic(kind, sig); } @@ -8735,27 +8604,6 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( start_time = base::TimeTicks::Now(); } - //---------------------------------------------------------------------------- - // Create the Graph - //---------------------------------------------------------------------------- - Zone zone(wasm::GetWasmEngine()->allocator(), ZONE_NAME, kCompressGraphZone); - Graph* graph = zone.New(&zone); - CommonOperatorBuilder* common = zone.New(&zone); - MachineOperatorBuilder* machine = zone.New( - &zone, MachineType::PointerRepresentation(), - InstructionSelector::SupportedMachineOperatorFlags(), - InstructionSelector::AlignmentRequirements()); - MachineGraph* mcgraph = zone.New(graph, common, machine); - - SourcePositionTable* source_position_table = - source_positions ? zone.New(graph) : nullptr; - - WasmWrapperGraphBuilder builder( - &zone, mcgraph, sig, env->module, - WasmGraphBuilder::kWasmApiFunctionRefMode, nullptr, source_position_table, - StubCallMode::kCallWasmRuntimeStub, env->enabled_features); - builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, env->module); - // Build a name in the form "wasm-to-js--". constexpr size_t kMaxNameLen = 128; char func_name[kMaxNameLen]; @@ -8764,16 +8612,51 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( PrintSignature(base::VectorOf(func_name, kMaxNameLen) + name_prefix_len, sig, '-'); - // Schedule and compile to machine code. - CallDescriptor* incoming = - GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper); - if (machine->Is32()) { - incoming = GetI32WasmCallDescriptor(&zone, incoming); - } - wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub( - incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, func_name, - WasmStubAssemblerOptions(), source_position_table); + auto compile_with_turboshaft = [&]() { + return Pipeline::GenerateCodeForWasmNativeStubFromTurboshaft( + env->module, sig, + wasm::WrapperCompilationInfo{ + .code_kind = CodeKind::WASM_TO_JS_FUNCTION, + .import_info = {kind, expected_arity, suspend}}, + func_name, WasmStubAssemblerOptions(), nullptr); + }; + auto compile_with_turbofan = [&]() { + //-------------------------------------------------------------------------- + // Create the Graph + //-------------------------------------------------------------------------- + Zone zone(wasm::GetWasmEngine()->allocator(), ZONE_NAME, + kCompressGraphZone); + Graph* graph = zone.New(&zone); + CommonOperatorBuilder* common = zone.New(&zone); + MachineOperatorBuilder* machine = zone.New( + &zone, MachineType::PointerRepresentation(), + InstructionSelector::SupportedMachineOperatorFlags(), + InstructionSelector::AlignmentRequirements()); + MachineGraph* mcgraph = zone.New(graph, common, machine); + + SourcePositionTable* source_position_table = + source_positions ? zone.New(graph) : nullptr; + + WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, env->module, + WasmGraphBuilder::kWasmApiFunctionRefMode, + nullptr, source_position_table, + StubCallMode::kCallWasmRuntimeStub, + env->enabled_features); + builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, env->module); + + // Schedule and compile to machine code. + CallDescriptor* incoming = + GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper); + if (machine->Is32()) { + incoming = GetI32WasmCallDescriptor(&zone, incoming); + } + return Pipeline::GenerateCodeForWasmNativeStub( + incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, func_name, + WasmStubAssemblerOptions(), source_position_table); + }; + auto result = v8_flags.turboshaft_wasm_wrappers ? compile_with_turboshaft() + : compile_with_turbofan(); if (V8_UNLIKELY(v8_flags.trace_wasm_compilation_times)) { base::TimeDelta time = base::TimeTicks::Now() - start_time; int codesize = result.code_desc.body_size(); @@ -8819,8 +8702,10 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module, wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count, result.tagged_parameter_slots, result.protected_instructions_data.as_vector(), - result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper, - wasm::ExecutionTier::kNone, wasm::kNotForDebugging); + result.source_positions.as_vector(), + result.inlining_positions.as_vector(), + wasm::WasmCode::kWasmToCapiWrapper, wasm::ExecutionTier::kNone, + wasm::kNotForDebugging); published_code = native_module->PublishCode(std::move(wasm_code)); } return published_code; @@ -8868,7 +8753,8 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module, wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count, result.tagged_parameter_slots, result.protected_instructions_data.as_vector(), - result.source_positions.as_vector(), wasm::WasmCode::kWasmToJsWrapper, + result.source_positions.as_vector(), + result.inlining_positions.as_vector(), wasm::WasmCode::kWasmToJsWrapper, wasm::ExecutionTier::kNone, wasm::kNotForDebugging); return native_module->PublishCode(std::move(wasm_code)); } @@ -8879,25 +8765,6 @@ MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, wasm::ImportCallKind kind, int expected_arity, wasm::Suspend suspend) { - std::unique_ptr zone = std::make_unique( - isolate->allocator(), ZONE_NAME, kCompressGraphZone); - - // Create the Graph - Graph* graph = zone->New(zone.get()); - CommonOperatorBuilder* common = zone->New(zone.get()); - MachineOperatorBuilder* machine = zone->New( - zone.get(), MachineType::PointerRepresentation(), - InstructionSelector::SupportedMachineOperatorFlags(), - InstructionSelector::AlignmentRequirements()); - MachineGraph* mcgraph = zone->New(graph, common, machine); - - WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr, - WasmGraphBuilder::kWasmApiFunctionRefMode, - nullptr, nullptr, - StubCallMode::kCallBuiltinPointer, - wasm::WasmFeatures::FromIsolate(isolate)); - builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, nullptr); - // Build a name in the form "wasm-to-js--". constexpr size_t kMaxNameLen = 128; constexpr size_t kNamePrefixLen = 11; @@ -8906,74 +8773,68 @@ MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, PrintSignature( base::VectorOf(name_buffer.get(), kMaxNameLen) + kNamePrefixLen, sig); - // Generate the call descriptor. - CallDescriptor* incoming = - GetWasmCallDescriptor(zone.get(), sig, WasmCallKind::kWasmImportWrapper); - if (machine->Is32()) { - incoming = GetI32WasmCallDescriptor(zone.get(), incoming); - } - - // Run the compilation job synchronously. - std::unique_ptr job( - Pipeline::NewWasmHeapStubCompilationJob( - isolate, incoming, std::move(zone), graph, - CodeKind::WASM_TO_JS_FUNCTION, std::move(name_buffer), - AssemblerOptions::Default(isolate))); - - // Compile the wrapper - if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) == - CompilationJob::FAILED || - job->FinalizeJob(isolate) == CompilationJob::FAILED) { - return {}; - } - return job->compilation_info()->code(); -} - -MaybeHandle CompileJSToJSWrapper(Isolate* isolate, - const wasm::FunctionSig* sig, - const wasm::WasmModule* module) { - std::unique_ptr zone = std::make_unique( - isolate->allocator(), ZONE_NAME, kCompressGraphZone); - Graph* graph = zone->New(zone.get()); - CommonOperatorBuilder* common = zone->New(zone.get()); - MachineOperatorBuilder* machine = zone->New( - zone.get(), MachineType::PointerRepresentation(), - InstructionSelector::SupportedMachineOperatorFlags(), - InstructionSelector::AlignmentRequirements()); - MachineGraph* mcgraph = zone->New(graph, common, machine); - - WasmWrapperGraphBuilder builder( - zone.get(), mcgraph, sig, module, - WasmGraphBuilder::kNoSpecialParameterMode, nullptr /* isolate */, - nullptr /* source position table */, StubCallMode::kCallBuiltinPointer, - wasm::WasmFeatures::FromIsolate(isolate)); - builder.BuildJSToJSWrapper(); - - int wasm_count = static_cast(sig->parameter_count()); - CallDescriptor* incoming = Linkage::GetJSCallDescriptor( - zone.get(), false, wasm_count + 1, CallDescriptor::kNoFlags); + auto compile_with_turboshaft = [&]() { + std::unique_ptr job = + Pipeline::NewWasmTurboshaftWrapperCompilationJob( + isolate, sig, + wasm::WrapperCompilationInfo{ + .code_kind = CodeKind::WASM_TO_JS_FUNCTION, + .import_info = {kind, expected_arity, suspend}}, + nullptr, std::move(name_buffer), WasmAssemblerOptions()); + + // Compile the wrapper + if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) == + CompilationJob::FAILED || + job->FinalizeJob(isolate) == CompilationJob::FAILED) { + return Handle(); + } + return job->compilation_info()->code(); + }; + auto compile_with_turbofan = [&]() { + std::unique_ptr zone = std::make_unique( + isolate->allocator(), ZONE_NAME, kCompressGraphZone); - // Build a name in the form "js-to-js::". - constexpr size_t kMaxNameLen = 128; - constexpr size_t kNamePrefixLen = 9; - auto name_buffer = std::unique_ptr(new char[kMaxNameLen]); - memcpy(name_buffer.get(), "js-to-js:", kNamePrefixLen); - PrintSignature( - base::VectorOf(name_buffer.get(), kMaxNameLen) + kNamePrefixLen, sig); + // Create the Graph + Graph* graph = zone->New(zone.get()); + CommonOperatorBuilder* common = + zone->New(zone.get()); + MachineOperatorBuilder* machine = zone->New( + zone.get(), MachineType::PointerRepresentation(), + InstructionSelector::SupportedMachineOperatorFlags(), + InstructionSelector::AlignmentRequirements()); + MachineGraph* mcgraph = zone->New(graph, common, machine); - // Run the compilation job synchronously. - std::unique_ptr job( - Pipeline::NewWasmHeapStubCompilationJob( - isolate, incoming, std::move(zone), graph, - CodeKind::JS_TO_JS_FUNCTION, std::move(name_buffer), - AssemblerOptions::Default(isolate))); + WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr, + WasmGraphBuilder::kWasmApiFunctionRefMode, + nullptr, nullptr, + StubCallMode::kCallBuiltinPointer, + wasm::WasmFeatures::FromIsolate(isolate)); + builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, nullptr); + + // Generate the call descriptor. + CallDescriptor* incoming = GetWasmCallDescriptor( + zone.get(), sig, WasmCallKind::kWasmImportWrapper); + if (machine->Is32()) { + incoming = GetI32WasmCallDescriptor(zone.get(), incoming); + } - if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) == - CompilationJob::FAILED || - job->FinalizeJob(isolate) == CompilationJob::FAILED) { - return {}; - } - return job->compilation_info()->code(); + // Run the compilation job synchronously. + std::unique_ptr job( + Pipeline::NewWasmHeapStubCompilationJob( + isolate, incoming, std::move(zone), graph, + CodeKind::WASM_TO_JS_FUNCTION, std::move(name_buffer), + AssemblerOptions::Default(isolate))); + + // Compile the wrapper + if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) == + CompilationJob::FAILED || + job->FinalizeJob(isolate) == CompilationJob::FAILED) { + return Handle(); + } + return job->compilation_info()->code(); + }; + return v8_flags.turboshaft_wasm_wrappers ? compile_with_turboshaft() + : compile_with_turbofan(); } Handle CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig, diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h index 4e3ff578d762e8..fcff46c449768a 100644 --- a/deps/v8/src/compiler/wasm-compiler.h +++ b/deps/v8/src/compiler/wasm-compiler.h @@ -86,7 +86,8 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule*, const wasm::FunctionSig*, Handle callable); -// Returns an TurbofanCompilationJob object for a JS to Wasm wrapper. +// Returns an TurbofanCompilationJob or TurboshaftCompilationJob object +// (depending on the --turboshaft-wasm-wrappers flag) for a JS to Wasm wrapper. std::unique_ptr NewJSToWasmCompilationJob( Isolate* isolate, const wasm::FunctionSig* sig, const wasm::WasmModule* module, bool is_import, @@ -98,12 +99,6 @@ MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, int expected_arity, wasm::Suspend suspend); -// Compiles a stub with JS linkage that serves as an adapter for function -// objects constructed via {WebAssembly.Function}. It performs a round-trip -// simulating a JS-to-Wasm-to-JS coercion of parameter and return values. -MaybeHandle CompileJSToJSWrapper(Isolate*, const wasm::FunctionSig*, - const wasm::WasmModule* module); - enum CWasmEntryParameters { kCodeEntry, kObjectRef, @@ -316,10 +311,9 @@ class WasmGraphBuilder { Node* ReturnCallRef(const wasm::FunctionSig* sig, base::Vector args, CheckForNull null_check, wasm::WasmCodePosition position); - void CompareToInternalFunctionAtIndex(Node* func_ref, uint32_t function_index, - Node** success_control, - Node** failure_control, - bool is_last_case); + void CompareToFuncRefAtIndex(Node* func_ref, uint32_t function_index, + Node** success_control, Node** failure_control, + bool is_last_case); // BrOnNull returns the control for the null and non-null case. std::tuple BrOnNull(Node* ref_object, wasm::ValueType type); @@ -704,14 +698,6 @@ class WasmGraphBuilder { IsReturnCall continuation, wasm::WasmCodePosition position); - // Load the trusted data if the given object is a WasmInstanceObject. - // Otherwise return the value unmodified. - // This is used when calling via WasmInternalFunction where the "ref" is - // either an instance object or a WasmApiFunctionRef. - // TODO(14499): Refactor WasmInternalFunction to avoid this conditional - // indirect load. - Node* LoadTrustedDataFromMaybeInstanceObject(Node* maybe_instance_object); - Node* BuildF32CopySign(Node* left, Node* right); Node* BuildF64CopySign(Node* left, Node* right); diff --git a/deps/v8/src/compiler/wasm-gc-lowering.cc b/deps/v8/src/compiler/wasm-gc-lowering.cc index 7b61489ec5d331..1bcb07abd638da 100644 --- a/deps/v8/src/compiler/wasm-gc-lowering.cc +++ b/deps/v8/src/compiler/wasm-gc-lowering.cc @@ -100,20 +100,22 @@ Node* WasmGCLowering::Null(wasm::ValueType type) { wasm::IsSubtypeOf(type, wasm::kWasmExnRef, module_) ? RootIndex::kNullValue : RootIndex::kWasmNull; - return gasm_.LoadImmutable(MachineType::TaggedPointer(), - gasm_.LoadRootRegister(), + return gasm_.LoadImmutable(MachineType::Pointer(), gasm_.LoadRootRegister(), IsolateData::root_slot_offset(index)); } Node* WasmGCLowering::IsNull(Node* object, wasm::ValueType type) { - Tagged_t static_null = - wasm::GetWasmEngine()->compressed_wasm_null_value_or_zero(); - Node* null_value = +#if V8_STATIC_ROOTS_BOOL + // TODO(14616): Extend this for shared types. + const bool is_wasm_null = !wasm::IsSubtypeOf(type, wasm::kWasmExternRef, module_) && - !wasm::IsSubtypeOf(type, wasm::kWasmExnRef, module_) && - static_null != 0 - ? gasm_.UintPtrConstant(static_null) - : Null(type); + !wasm::IsSubtypeOf(type, wasm::kWasmExnRef, module_); + Node* null_value = + gasm_.UintPtrConstant(is_wasm_null ? StaticReadOnlyRoot::kWasmNull + : StaticReadOnlyRoot::kNullValue); +#else + Node* null_value = Null(type); +#endif return gasm_.TaggedEqual(object, null_value); } diff --git a/deps/v8/src/compiler/wasm-graph-assembler.cc b/deps/v8/src/compiler/wasm-graph-assembler.cc index e9a850e542f9d1..e32b3b10356b11 100644 --- a/deps/v8/src/compiler/wasm-graph-assembler.cc +++ b/deps/v8/src/compiler/wasm-graph-assembler.cc @@ -145,14 +145,14 @@ Node* WasmGraphAssembler::LoadFromObject(MachineType type, Node* base, } Node* WasmGraphAssembler::LoadProtectedPointerFromObject(Node* object, - int offset) { + Node* offset) { #if V8_ENABLE_SANDBOX static_assert(COMPRESS_POINTERS_BOOL); - Node* tagged = - LoadFromObject(MachineType::Int32(), object, IntPtrConstant(offset)); + Node* tagged = LoadFromObject(MachineType::Int32(), object, offset); Node* trusted_cage_base = Load(MachineType::Pointer(), LoadRootRegister(), IsolateData::trusted_cage_base_offset()); - return WordOr(trusted_cage_base, BuildChangeUint32ToUintPtr(tagged)); + return BitcastWordToTagged( + WordOr(trusted_cage_base, BuildChangeUint32ToUintPtr(tagged))); #else return LoadFromObject(MachineType::AnyTagged(), object, offset); #endif // V8_ENABLE_SANDBOX @@ -220,11 +220,9 @@ Node* WasmGraphAssembler::BuildDecodeTrustedPointer(Node* handle, IsolateData::trusted_pointer_table_offset() + Internals::kTrustedPointerTableBasePointerOffset); Node* decoded_ptr = Load(MachineType::Pointer(), table, offset); - // Mask out the tag. - // TODO(saelo): Enable this once we tag pointers in the trusted table. - // decoded_ptr = WordAnd(decoded_ptr, IntPtrConstant(~tag)); - // Always set the tagged bit, used as a marking bit in that table. - decoded_ptr = WordOr(decoded_ptr, IntPtrConstant(kHeapObjectTag)); + // Untag the pointer and remove the marking bit in one operation. + decoded_ptr = WordAnd(decoded_ptr, + IntPtrConstant(~(tag | kTrustedPointerTableMarkBit))); // We have to change the type of the result value to Tagged, so if the value // gets spilled on the stack, it will get processed by the GC. decoded_ptr = BitcastWordToTagged(decoded_ptr); @@ -332,6 +330,20 @@ Node* WasmGraphAssembler::LoadFixedArrayElement(Node* array, int index, type, array, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index)); } +Node* WasmGraphAssembler::LoadProtectedFixedArrayElement(Node* array, + int index) { + return LoadProtectedPointerFromObject( + array, wasm::ObjectAccess::ElementOffsetInProtectedFixedArray(index)); +} + +Node* WasmGraphAssembler::LoadProtectedFixedArrayElement(Node* array, + Node* index_intptr) { + Node* offset = IntAdd(WordShl(index_intptr, IntPtrConstant(kTaggedSizeLog2)), + IntPtrConstant(wasm::ObjectAccess::ToTagged( + ProtectedFixedArray::kHeaderSize))); + return LoadProtectedPointerFromObject(array, offset); +} + Node* WasmGraphAssembler::LoadByteArrayElement(Node* byte_array, Node* index_intptr, MachineType type) { @@ -368,6 +380,31 @@ Node* WasmGraphAssembler::LoadImmutableTrustedPointerFromObject( #endif } +Node* WasmGraphAssembler::LoadTrustedPointerFromObject(Node* object, + int field_offset, + IndirectPointerTag tag) { + Node* offset = IntPtrConstant(field_offset); +#ifdef V8_ENABLE_SANDBOX + Node* handle = LoadFromObject(MachineType::Uint32(), object, offset); + return BuildDecodeTrustedPointer(handle, tag); +#else + return LoadFromObject(MachineType::TaggedPointer(), object, offset); +#endif +} + +std::pair +WasmGraphAssembler::LoadTrustedPointerFromObjectTrapOnNull( + Node* object, int field_offset, IndirectPointerTag tag) { + Node* offset = IntPtrConstant(field_offset); +#ifdef V8_ENABLE_SANDBOX + Node* handle = LoadTrapOnNull(MachineType::Uint32(), object, offset); + return {handle, BuildDecodeTrustedPointer(handle, tag)}; +#else + Node* value = LoadTrapOnNull(MachineType::TaggedPointer(), object, offset); + return {value, value}; +#endif +} + Node* WasmGraphAssembler::StoreFixedArrayElement(Node* array, int index, Node* value, ObjectAccess access) { diff --git a/deps/v8/src/compiler/wasm-graph-assembler.h b/deps/v8/src/compiler/wasm-graph-assembler.h index 36c1829ff2cc9c..d7e4075feb2210 100644 --- a/deps/v8/src/compiler/wasm-graph-assembler.h +++ b/deps/v8/src/compiler/wasm-graph-assembler.h @@ -122,7 +122,10 @@ class WasmGraphAssembler : public GraphAssembler { return LoadFromObject(type, base, IntPtrConstant(offset)); } - Node* LoadProtectedPointerFromObject(Node* object, int offset); + Node* LoadProtectedPointerFromObject(Node* object, Node* offset); + Node* LoadProtectedPointerFromObject(Node* object, int offset) { + return LoadProtectedPointerFromObject(object, IntPtrConstant(offset)); + } Node* LoadImmutableFromObject(MachineType type, Node* base, Node* offset); @@ -166,6 +169,12 @@ class WasmGraphAssembler : public GraphAssembler { Node* LoadImmutableTrustedPointerFromObject(Node* object, int offset, IndirectPointerTag tag); + Node* LoadTrustedPointerFromObject(Node* object, int offset, + IndirectPointerTag tag); + // Returns the load node (where the source position for the trap needs to be + // set by the caller) and the result. + std::pair LoadTrustedPointerFromObjectTrapOnNull( + Node* object, int offset, IndirectPointerTag tag); Node* BuildDecodeTrustedPointer(Node* handle, IndirectPointerTag tag); Node* IsSmi(Node* object); @@ -205,6 +214,9 @@ class WasmGraphAssembler : public GraphAssembler { return LoadFixedArrayElement(array, index, MachineType::AnyTagged()); } + Node* LoadProtectedFixedArrayElement(Node* array, int index); + Node* LoadProtectedFixedArrayElement(Node* array, Node* index_intptr); + Node* LoadByteArrayElement(Node* byte_array, Node* index_intptr, MachineType type); diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc index 0a0d71af592566..a350c9df4612a0 100644 --- a/deps/v8/src/compiler/wasm-inlining.cc +++ b/deps/v8/src/compiler/wasm-inlining.cc @@ -97,7 +97,8 @@ Reduction WasmInliner::ReduceCall(Node* call) { int call_count = GetCallCount(call); int wire_byte_size = static_cast(function_bytes.size()); - int min_count_for_inlining = wire_byte_size / 2; + int min_count_for_inlining = + v8_flags.wasm_inlining_ignore_call_counts ? 0 : wire_byte_size / 2; // If liftoff ran and collected call counts, only inline calls that have been // invoked often, except for truly tiny functions. diff --git a/deps/v8/src/d8/d8-platforms.cc b/deps/v8/src/d8/d8-platforms.cc index 387f9556b23c54..42bb0754562790 100644 --- a/deps/v8/src/d8/d8-platforms.cc +++ b/deps/v8/src/d8/d8-platforms.cc @@ -213,32 +213,37 @@ class DelayedTasksPlatform final : public Platform { DelayedTasksPlatform* platform) : task_runner_(task_runner), platform_(platform) {} - void PostTask(std::unique_ptr task) final { + bool IdleTasksEnabled() final { return task_runner_->IdleTasksEnabled(); } + + bool NonNestableTasksEnabled() const final { + return task_runner_->NonNestableTasksEnabled(); + } + + private: + void PostTaskImpl(std::unique_ptr task, + const SourceLocation& location) final { task_runner_->PostTask(platform_->MakeDelayedTask(std::move(task))); } - void PostNonNestableTask(std::unique_ptr task) final { + void PostNonNestableTaskImpl(std::unique_ptr task, + const SourceLocation& location) final { task_runner_->PostNonNestableTask( platform_->MakeDelayedTask(std::move(task))); } - void PostDelayedTask(std::unique_ptr task, - double delay_in_seconds) final { + void PostDelayedTaskImpl(std::unique_ptr task, + double delay_in_seconds, + const SourceLocation& location) final { task_runner_->PostDelayedTask(platform_->MakeDelayedTask(std::move(task)), delay_in_seconds); } - void PostIdleTask(std::unique_ptr task) final { + void PostIdleTaskImpl(std::unique_ptr task, + const SourceLocation& location) final { task_runner_->PostIdleTask( platform_->MakeDelayedIdleTask(std::move(task))); } - bool IdleTasksEnabled() final { return task_runner_->IdleTasksEnabled(); } - - bool NonNestableTasksEnabled() const final { - return task_runner_->NonNestableTasksEnabled(); - } - private: friend class DelayedTaskRunnerDeleter; std::shared_ptr task_runner_; diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc index 66cb05825bb117..715ee06c0d075f 100644 --- a/deps/v8/src/d8/d8.cc +++ b/deps/v8/src/d8/d8.cc @@ -1435,6 +1435,14 @@ void Shell::DoHostImportModuleDynamically(void* import_data) { } if (!global_result.IsEmpty()) { + // This method is invoked from a microtask, where in general we may have an + // non-trivial stack. Emptying the message queue below may trigger the + // execution of a stackless GC. We need to override the embedder stack + // state, to force scanning the stack, if this happens. + i::Heap* heap = reinterpret_cast(isolate)->heap(); + i::EmbedderStackStateScope scope( + heap, i::EmbedderStackStateOrigin::kExplicitInvocation, + StackState::kMayContainHeapPointers); EmptyMessageQueues(isolate); } else { DCHECK(try_catch.HasCaught()); @@ -2185,7 +2193,7 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo& info) { } // Realm.shared is an accessor for a single shared value across realms. -void Shell::RealmSharedGet(Local property, +void Shell::RealmSharedGet(Local property, const PropertyCallbackInfo& info) { DCHECK(i::ValidateCallbackInfo(info)); Isolate* isolate = info.GetIsolate(); @@ -2194,7 +2202,7 @@ void Shell::RealmSharedGet(Local property, info.GetReturnValue().Set(data->realm_shared_); } -void Shell::RealmSharedSet(Local property, Local value, +void Shell::RealmSharedSet(Local property, Local value, const PropertyCallbackInfo& info) { DCHECK(i::ValidateCallbackInfo(info)); Isolate* isolate = info.GetIsolate(); @@ -2286,11 +2294,11 @@ void Shell::TestVerifySourcePositions( handle(function->shared()->GetBytecodeArray(i_isolate), i_isolate); i::interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes); bool has_baseline = function->shared()->HasBaselineCode(); - i::Handle bytecode_offsets; + i::Handle bytecode_offsets; std::unique_ptr offset_iterator; if (has_baseline) { bytecode_offsets = handle( - i::ByteArray::cast( + i::TrustedByteArray::cast( function->shared()->GetCode(i_isolate)->bytecode_offset_table()), i_isolate); offset_iterator = std::make_unique( @@ -3227,24 +3235,33 @@ void Shell::NodeTypeCallback(const v8::FunctionCallbackInfo& info) { info.GetReturnValue().Set(v8::Number::New(isolate, 1)); } -Local NewDOMFunctionTemplate(Isolate* isolate, - uint16_t instance_type) { +enum class JSApiInstanceType : uint16_t { + kGenericApiObject = 0, // FunctionTemplateInfo::kNoJSApiObjectType. + kEventTarget, + kNode, + kElement, + kHTMLElement, + kHTMLDivElement, +}; + +Local NewDOMFunctionTemplate( + Isolate* isolate, JSApiInstanceType instance_type) { return FunctionTemplate::New( isolate, nullptr, Local(), Local(), 0, ConstructorBehavior::kAllow, SideEffectType::kHasSideEffect, nullptr, - instance_type); + static_cast(instance_type)); } Local Shell::CreateEventTargetTemplate(Isolate* isolate) { Local event_target = - NewDOMFunctionTemplate(isolate, i::Internals::kFirstJSApiObjectType + 1); + NewDOMFunctionTemplate(isolate, JSApiInstanceType::kEventTarget); return event_target; } Local Shell::CreateNodeTemplates( Isolate* isolate, Local event_target) { Local node = - NewDOMFunctionTemplate(isolate, i::Internals::kFirstJSApiObjectType + 2); + NewDOMFunctionTemplate(isolate, JSApiInstanceType::kNode); node->Inherit(event_target); PerIsolateData* data = PerIsolateData::Get(isolate); @@ -3255,23 +3272,23 @@ Local Shell::CreateNodeTemplates( Local nodeType = FunctionTemplate::New( isolate, NodeTypeCallback, Local(), signature, 0, ConstructorBehavior::kThrow, SideEffectType::kHasSideEffect, nullptr, - i::Internals::kFirstJSApiObjectType, - i::Internals::kFirstJSApiObjectType + 3, - i::Internals::kFirstJSApiObjectType + 5); + static_cast(JSApiInstanceType::kGenericApiObject), + static_cast(JSApiInstanceType::kElement), + static_cast(JSApiInstanceType::kHTMLDivElement)); nodeType->SetAcceptAnyReceiver(false); proto_template->SetAccessorProperty( String::NewFromUtf8Literal(isolate, "nodeType"), nodeType); Local element = - NewDOMFunctionTemplate(isolate, i::Internals::kFirstJSApiObjectType + 3); + NewDOMFunctionTemplate(isolate, JSApiInstanceType::kElement); element->Inherit(node); Local html_element = - NewDOMFunctionTemplate(isolate, i::Internals::kFirstJSApiObjectType + 4); + NewDOMFunctionTemplate(isolate, JSApiInstanceType::kHTMLElement); html_element->Inherit(element); Local div_element = - NewDOMFunctionTemplate(isolate, i::Internals::kFirstJSApiObjectType + 5); + NewDOMFunctionTemplate(isolate, JSApiInstanceType::kHTMLDivElement); div_element->Inherit(html_element); return div_element; @@ -4235,6 +4252,14 @@ class InspectorClient : public v8_inspector::V8InspectorClient { context->Global()->Get(context, callback_name).ToLocalChecked(); if (!callback->IsFunction()) return; + // Running the message loop below may trigger the execution of a stackless + // GC. We need to override the embedder stack state, to force scanning the + // stack, if this happens. + i::Heap* heap = reinterpret_cast(isolate_)->heap(); + i::EmbedderStackStateScope stack_scanning_scope( + heap, i::EmbedderStackStateOrigin::kExplicitInvocation, + v8::StackState::kMayContainHeapPointers); + v8::TryCatch try_catch(isolate_); try_catch.SetVerbose(true); is_paused = true; @@ -5062,16 +5087,6 @@ bool Shell::SetOptions(int argc, char* argv[]) { } else if (strcmp(argv[i], "--expose-fast-api") == 0) { options.expose_fast_api = true; argv[i] = nullptr; -#if V8_ENABLE_SANDBOX - } else if (strcmp(argv[i], "--enable-sandbox-crash-filter") == 0) { - options.enable_sandbox_crash_filter = true; - // Enable the "soft" abort mode in addition to the crash filter. This is - // mostly so that we get better error output for (safe) fatal errors. - // TODO(saelo): consider renaming --enable-sandbox-crash-filter to - // --sandbox-fuzzing and make it a V8 flag that implies --soft-abort. - i::v8_flags.soft_abort = true; - argv[i] = nullptr; -#endif // V8_ENABLE_SANDBOX } else { #ifdef V8_TARGET_OS_WIN PreProcessUnicodeFilenameArg(argv, i); @@ -5876,13 +5891,19 @@ int Shell::Main(int argc, char* argv[]) { } #ifdef V8_ENABLE_SANDBOX - if (options.enable_sandbox_crash_filter) { - // Note: this must happen before the Wasm trap handler is installed, so - // that the Wasm trap handler is invoked first (and can handle Wasm OOB - // accesses), then forwards all "real" crashes to the sandbox crash filter. - i::SandboxTesting::InstallSandboxCrashFilter(); + // Enable sandbox testing mode if requested. + // + // This will install the sandbox crash filter to ignore all crashes that do + // not represent sandbox violations. + // + // Note: this must happen before the Wasm trap handler is installed, so that + // the wasm trap handler is invoked first (and can handle Wasm OOB accesses), + // then forwards all "real" crashes to the sandbox crash filter. + if (i::v8_flags.sandbox_fuzzing) { + i::SandboxTesting::Mode mode = i::SandboxTesting::Mode::kForFuzzing; + i::SandboxTesting::Enable(mode); } -#endif +#endif // V8_ENABLE_SANDBOX #if V8_ENABLE_WEBASSEMBLY if (V8_TRAP_HANDLER_SUPPORTED && options.wasm_trap_handler) { diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h index c95afb79ce0032..b1388f129b881d 100644 --- a/deps/v8/src/d8/d8.h +++ b/deps/v8/src/d8/d8.h @@ -479,10 +479,6 @@ class ShellOptions { DisallowReassignment wasm_trap_handler = {"wasm-trap-handler", true}; #endif // V8_ENABLE_WEBASSEMBLY DisallowReassignment expose_fast_api = {"expose-fast-api", false}; -#if V8_ENABLE_SANDBOX - DisallowReassignment enable_sandbox_crash_filter = { - "enable-sandbox-crash-filter", false}; -#endif // V8_ENABLE_SANDBOX DisallowReassignment max_serializer_memory = {"max-serializer-memory", 1 * i::MB}; }; @@ -558,9 +554,9 @@ class Shell : public i::AllStatic { static void RealmDispose(const v8::FunctionCallbackInfo& info); static void RealmSwitch(const v8::FunctionCallbackInfo& info); static void RealmEval(const v8::FunctionCallbackInfo& info); - static void RealmSharedGet(Local property, + static void RealmSharedGet(Local property, const PropertyCallbackInfo& info); - static void RealmSharedSet(Local property, Local value, + static void RealmSharedSet(Local property, Local value, const PropertyCallbackInfo& info); static void LogGetAndStop(const v8::FunctionCallbackInfo& info); diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc index 2ac00b1fdf44df..8e0acf3b817945 100644 --- a/deps/v8/src/debug/debug-evaluate.cc +++ b/deps/v8/src/debug/debug-evaluate.cc @@ -648,6 +648,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) { case Builtin::kDataViewPrototypeGetUint16: case Builtin::kDataViewPrototypeGetInt32: case Builtin::kDataViewPrototypeGetUint32: + case Builtin::kDataViewPrototypeGetFloat16: case Builtin::kDataViewPrototypeGetFloat32: case Builtin::kDataViewPrototypeGetFloat64: case Builtin::kDataViewPrototypeGetBigInt64: @@ -723,6 +724,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) { case Builtin::kMathCosh: case Builtin::kMathExp: case Builtin::kMathFloor: + case Builtin::kMathF16round: case Builtin::kMathFround: case Builtin::kMathHypot: case Builtin::kMathImul: diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc index 938ea891f6abba..49b769a63b1680 100644 --- a/deps/v8/src/debug/debug-interface.cc +++ b/deps/v8/src/debug/debug-interface.cc @@ -792,7 +792,7 @@ Platform* GetCurrentPlatform() { return i::V8::GetCurrentPlatform(); } void ForceGarbageCollection(Isolate* isolate, StackState embedder_stack_state) { i::EmbedderStackStateScope stack_scope( reinterpret_cast(isolate)->heap(), - i::EmbedderStackStateScope::kImplicitThroughTask, embedder_stack_state); + i::EmbedderStackStateOrigin::kImplicitThroughTask, embedder_stack_state); isolate->LowMemoryNotification(); } diff --git a/deps/v8/src/debug/debug-wasm-objects.cc b/deps/v8/src/debug/debug-wasm-objects.cc index 5108304a8db26d..ffe48c22641656 100644 --- a/deps/v8/src/debug/debug-wasm-objects.cc +++ b/deps/v8/src/debug/debug-wasm-objects.cc @@ -278,10 +278,11 @@ struct FunctionsProxy : NamedDebugProxy { uint32_t index) { Handle trusted_data{ instance->trusted_data(isolate), isolate}; - Handle internal = - WasmTrustedInstanceData::GetOrCreateWasmInternalFunction( - isolate, trusted_data, index); - return WasmInternalFunction::GetOrCreateExternal(internal); + Handle func_ref = WasmTrustedInstanceData::GetOrCreateFuncRef( + isolate, trusted_data, index); + Handle internal_function{func_ref->internal(), + isolate}; + return WasmInternalFunction::GetOrCreateExternal(internal_function); } static Handle GetName(Isolate* isolate, @@ -950,18 +951,20 @@ Handle WasmValueObject::New( isolate); t = GetRefTypeName(isolate, type, module->native_module()); v = ArrayProxy::Create(isolate, Handle::cast(ref), module); - } else if (IsWasmInternalFunction(*ref)) { - auto internal_fct = Handle::cast(ref); + } else if (IsWasmFuncRef(*ref)) { + Handle internal_fct{ + WasmFuncRef::cast(*ref)->internal(), isolate}; v = WasmInternalFunction::GetOrCreateExternal(internal_fct); // If the module is not provided by the caller, retrieve it from the // instance object. If the function was created in JavaScript using // `new WebAssembly.Function(...)`, a module for name resolution is not // available. if (module_object.is_null() && - IsWasmInstanceObject(internal_fct->ref())) { - module_object = handle( - WasmInstanceObject::cast(internal_fct->ref())->module_object(), - isolate); + IsWasmTrustedInstanceData(internal_fct->ref(isolate))) { + module_object = + handle(WasmTrustedInstanceData::cast(internal_fct->ref(isolate)) + ->module_object(), + isolate); } t = GetRefTypeName(isolate, value.type(), module_object); } else if (IsWasmNull(*ref)) { diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc index c1ee01a898a496..2beeb216848c9b 100644 --- a/deps/v8/src/debug/debug.cc +++ b/deps/v8/src/debug/debug.cc @@ -735,7 +735,7 @@ void Debug::Break(JavaScriptFrame* frame, Handle break_target) { case StepOver: // StepOver should not break in a deeper frame than target frame. if (current_frame_count > target_frame_count) return; - V8_FALLTHROUGH; + [[fallthrough]]; case StepInto: { // StepInto and StepOver should enter "generator stepping" mode, except // for the implicit initial yield in generators, where it should simply @@ -1544,7 +1544,7 @@ void Debug::PrepareStep(StepAction step_action) { } case StepOver: thread_local_.target_frame_count_ = current_frame_count; - V8_FALLTHROUGH; + [[fallthrough]]; case StepInto: FloodWithOneShot(shared); break; @@ -1684,7 +1684,7 @@ void Debug::DiscardBaselineCode(Tagged shared) { // TODO(v8:11429): Avoid this heap walk somehow. HeapObjectIterator iterator(isolate_->heap()); auto trampoline = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline); - shared->FlushBaselineCode(isolate_); + shared->FlushBaselineCode(); for (Tagged obj = iterator.Next(); !obj.is_null(); obj = iterator.Next()) { if (IsJSFunction(obj)) { @@ -1713,7 +1713,7 @@ void Debug::DiscardAllBaselineCode() { } else if (IsSharedFunctionInfo(obj)) { Tagged shared = SharedFunctionInfo::cast(obj); if (shared->HasBaselineCode()) { - shared->FlushBaselineCode(isolate_); + shared->FlushBaselineCode(); } } } @@ -2132,7 +2132,7 @@ MaybeHandle Debug::GetTopLevelWithRecompile( DCHECK_LE(kFunctionLiteralIdTopLevel, script->shared_function_info_count()); DCHECK_LE(script->shared_function_info_count(), script->shared_function_infos()->length()); - MaybeObject maybeToplevel = script->shared_function_infos()->get(0); + Tagged maybeToplevel = script->shared_function_infos()->get(0); Tagged heap_object; const bool topLevelInfoExists = maybeToplevel.GetHeapObject(&heap_object) && !IsUndefined(heap_object); @@ -3109,36 +3109,35 @@ bool Debug::PerformSideEffectCheckForAccessor( } void Debug::IgnoreSideEffectsOnNextCallTo( - Handle call_handler_info) { - DCHECK(call_handler_info->IsSideEffectCallHandlerInfo()); + Handle function) { + DCHECK(function->has_side_effects()); // There must be only one such call handler info. - CHECK(ignore_side_effects_for_call_handler_info_.is_null()); - ignore_side_effects_for_call_handler_info_ = call_handler_info; + CHECK(ignore_side_effects_for_function_template_info_.is_null()); + ignore_side_effects_for_function_template_info_ = function; } bool Debug::PerformSideEffectCheckForCallback( - Handle call_handler_info) { + Handle function) { RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger); DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects); - // If an empty |call_handler_info| handle is passed here then it means that + // If an empty |function| handle is passed here then it means that // the callback IS side-effectful (see CallApiCallbackWithSideEffects // builtin). - if (!call_handler_info.is_null() && - call_handler_info->IsSideEffectFreeCallHandlerInfo()) { + if (!function.is_null() && !function->has_side_effects()) { return true; } - if (!ignore_side_effects_for_call_handler_info_.is_null()) { - // If the |ignore_side_effects_for_call_handler_info_| is set then the next - // API callback call must be made to this function. - CHECK(ignore_side_effects_for_call_handler_info_.is_identical_to( - call_handler_info)); - ignore_side_effects_for_call_handler_info_ = {}; + if (!ignore_side_effects_for_function_template_info_.is_null()) { + // If the |ignore_side_effects_for_function_template_info_| is set then + // the next API callback call must be made to this function. + CHECK(ignore_side_effects_for_function_template_info_.is_identical_to( + function)); + ignore_side_effects_for_function_template_info_ = {}; return true; } if (v8_flags.trace_side_effect_free_debug_evaluate) { - PrintF("[debug-evaluate] API CallHandlerInfo may cause side effect.\n"); + PrintF("[debug-evaluate] FunctionTemplateInfo may cause side effect.\n"); } side_effect_check_failed_ = true; diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h index 0bcab289fe2ce0..68b33168b40ed2 100644 --- a/deps/v8/src/debug/debug.h +++ b/deps/v8/src/debug/debug.h @@ -401,7 +401,7 @@ class V8_EXPORT_PRIVATE Debug { // Make a one-time exception for a next call to given side-effectful API // function. - void IgnoreSideEffectsOnNextCallTo(Handle call_handler_info); + void IgnoreSideEffectsOnNextCallTo(Handle function); bool PerformSideEffectCheck(Handle function, Handle receiver); @@ -409,8 +409,7 @@ class V8_EXPORT_PRIVATE Debug { bool PerformSideEffectCheckForAccessor(Handle accessor_info, Handle receiver, AccessorComponent component); - bool PerformSideEffectCheckForCallback( - Handle call_handler_info); + bool PerformSideEffectCheckForCallback(Handle function); bool PerformSideEffectCheckForInterceptor( Handle interceptor_info); @@ -674,9 +673,9 @@ class V8_EXPORT_PRIVATE Debug { // This is a part of machinery for allowing to ignore side effects for one // call to this API function. See Function::NewInstanceWithSideEffectType(). - // Since the call_handler_info is allowlisted right before the call to + // Since the FunctionTemplateInfo is allowlisted right before the call to // constructor there must be never more than one such object at a time. - Handle ignore_side_effects_for_call_handler_info_; + Handle ignore_side_effects_for_function_template_info_; Isolate* isolate_; diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc index 9318e642136bac..e31d7850d4670f 100644 --- a/deps/v8/src/debug/liveedit.cc +++ b/deps/v8/src/debug/liveedit.cc @@ -729,7 +729,8 @@ void TranslateSourcePositionTable(Isolate* isolate, Handle code, Zone zone(isolate->allocator(), ZONE_NAME); SourcePositionTableBuilder builder(&zone); - Handle source_position_table(code->SourcePositionTable(), isolate); + Handle source_position_table(code->SourcePositionTable(), + isolate); for (SourcePositionTableIterator iterator(*source_position_table); !iterator.done(); iterator.Advance()) { SourcePosition position = iterator.source_position(); @@ -739,7 +740,7 @@ void TranslateSourcePositionTable(Isolate* isolate, Handle code, iterator.is_statement()); } - Handle new_source_position_table( + Handle new_source_position_table( builder.ToSourcePositionTable(isolate)); code->set_source_position_table(*new_source_position_table, kReleaseStore); LOG_CODE_EVENT(isolate, @@ -922,7 +923,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle