From fd4f80ce54d7f7b7503e0999f6a9d293d493846d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Tue, 12 Apr 2022 11:10:15 +0200 Subject: [PATCH] deps: update V8 to 10.1.124.6 PR-URL: https://github.com/nodejs/node/pull/42657 Reviewed-By: Darshan Sen Reviewed-By: Richard Lau Reviewed-By: Jiawen Geng Reviewed-By: Michael Dawson --- deps/v8/.bazelrc | 13 +- deps/v8/.gitignore | 2 +- deps/v8/.style.yapf | 2 + deps/v8/.vpython3 | 25 + deps/v8/AUTHORS | 8 + deps/v8/BUILD.bazel | 401 +- deps/v8/BUILD.gn | 383 +- deps/v8/COMMON_OWNERS | 8 +- deps/v8/DEPS | 44 +- deps/v8/PRESUBMIT.py | 45 +- deps/v8/WATCHLISTS | 23 +- deps/v8/WORKSPACE | 56 +- .../trace_event/common/trace_event_common.h | 4 +- deps/v8/bazel/BUILD.icu | 54 +- deps/v8/bazel/BUILD.trace_event_common | 10 + deps/v8/bazel/BUILD.zlib | 23 +- deps/v8/bazel/OWNERS | 1 - deps/v8/bazel/config/BUILD.bazel | 199 +- deps/v8/bazel/config/v8-target-cpu.bzl | 2 +- deps/v8/bazel/defs.bzl | 87 +- deps/v8/bazel/generate-inspector-files.cmd | 24 - deps/v8/bazel/generate-inspector-files.sh | 19 - deps/v8/bazel/requirements.in | 1 + deps/v8/bazel/requirements.txt | 81 + deps/v8/bazel/v8-non-pointer-compression.bzl | 6 +- deps/v8/gni/OWNERS | 4 + deps/v8/gni/release_branch_toggle.gni | 7 + deps/v8/gni/snapshot_toolchain.gni | 4 + deps/v8/gni/v8.cmx | 8 + deps/v8/gni/v8.gni | 13 +- deps/v8/include/OWNERS | 7 +- deps/v8/include/cppgc/README.md | 121 +- deps/v8/include/cppgc/default-platform.h | 3 +- deps/v8/include/cppgc/explicit-management.h | 26 +- deps/v8/include/cppgc/garbage-collected.h | 3 +- deps/v8/include/cppgc/heap-consistency.h | 13 + deps/v8/include/cppgc/heap.h | 9 +- .../cppgc/internal/prefinalizer-handler.h | 30 - .../v8/include/cppgc/internal/write-barrier.h | 54 +- deps/v8/include/cppgc/member.h | 7 +- deps/v8/include/cppgc/persistent.h | 61 +- deps/v8/include/cppgc/platform.h | 4 +- deps/v8/include/cppgc/prefinalizer.h | 45 +- deps/v8/include/cppgc/testing.h | 11 +- deps/v8/include/js_protocol.pdl | 16 +- deps/v8/include/libplatform/libplatform.h | 11 - deps/v8/include/v8-array-buffer.h | 4 +- deps/v8/include/v8-callbacks.h | 14 + deps/v8/include/v8-context.h | 17 +- deps/v8/include/v8-cppgc.h | 146 +- deps/v8/include/v8-debug.h | 25 +- deps/v8/include/v8-embedder-heap.h | 24 +- deps/v8/include/v8-embedder-state-scope.h | 5 +- deps/v8/include/v8-exception.h | 7 - deps/v8/include/v8-fast-api-calls.h | 5 - deps/v8/include/v8-initialization.h | 78 +- deps/v8/include/v8-inspector.h | 29 + deps/v8/include/v8-internal.h | 318 +- deps/v8/include/v8-isolate.h | 19 +- deps/v8/include/v8-locker.h | 1 + deps/v8/include/v8-message.h | 45 - deps/v8/include/v8-metrics.h | 5 + deps/v8/include/v8-object.h | 8 +- deps/v8/include/v8-platform.h | 160 +- deps/v8/include/v8-primitive.h | 6 +- deps/v8/include/v8-script.h | 47 +- deps/v8/include/v8-snapshot.h | 2 - deps/v8/include/v8-traced-handle.h | 95 +- deps/v8/include/v8-value-serializer-version.h | 2 +- deps/v8/include/v8-value-serializer.h | 34 +- deps/v8/include/v8-version.h | 8 +- deps/v8/include/v8-weak-callback-info.h | 26 +- deps/v8/include/v8.h | 1 - deps/v8/include/v8config.h | 38 +- deps/v8/infra/mb/PRESUBMIT.py | 4 + deps/v8/infra/mb/gn_isolate_map.pyl | 2 +- deps/v8/infra/mb/mb_config.pyl | 33 +- deps/v8/infra/testing/PRESUBMIT.py | 31 +- deps/v8/infra/testing/builders.pyl | 45 +- deps/v8/samples/cppgc/hello-world.cc | 24 +- deps/v8/samples/hello-world.cc | 6 + deps/v8/samples/process.cc | 6 + deps/v8/samples/shell.cc | 6 + deps/v8/src/DEPS | 4 + deps/v8/src/api/OWNERS | 3 + deps/v8/src/api/api-inl.h | 17 +- deps/v8/src/api/api-macros.h | 13 +- deps/v8/src/api/api.cc | 527 +- deps/v8/src/api/api.h | 16 +- deps/v8/src/asmjs/asm-js.cc | 31 +- deps/v8/src/asmjs/asm-parser.cc | 5 +- deps/v8/src/asmjs/asm-parser.h | 1 - deps/v8/src/ast/OWNERS | 1 - deps/v8/src/ast/ast-value-factory.cc | 16 +- deps/v8/src/ast/ast-value-factory.h | 33 +- deps/v8/src/ast/ast.cc | 244 +- deps/v8/src/ast/ast.h | 294 +- deps/v8/src/ast/scopes.cc | 215 +- deps/v8/src/ast/scopes.h | 65 +- deps/v8/src/base/atomic-utils.h | 15 + deps/v8/src/base/atomicops.h | 17 +- deps/v8/src/base/bit-field.h | 2 +- deps/v8/src/base/bounded-page-allocator.cc | 13 +- deps/v8/src/base/bounded-page-allocator.h | 1 - deps/v8/src/base/build_config.h | 11 +- deps/v8/src/base/cpu.cc | 35 +- deps/v8/src/base/cpu.h | 6 + deps/v8/src/base/debug/stack_trace_posix.cc | 2 +- .../base/emulated-virtual-address-subspace.cc | 98 +- .../base/emulated-virtual-address-subspace.h | 22 +- deps/v8/src/base/immediate-crash.h | 6 +- deps/v8/src/base/macros.h | 2 +- deps/v8/src/base/page-allocator.cc | 10 +- .../src/base/platform/condition-variable.cc | 6 +- deps/v8/src/base/platform/elapsed-timer.h | 2 +- deps/v8/src/base/platform/mutex.cc | 6 +- deps/v8/src/base/platform/mutex.h | 2 +- deps/v8/src/base/platform/platform-aix.cc | 38 + deps/v8/src/base/platform/platform-cygwin.cc | 12 +- deps/v8/src/base/platform/platform-darwin.cc | 107 + deps/v8/src/base/platform/platform-fuchsia.cc | 262 +- deps/v8/src/base/platform/platform-linux.cc | 2 +- deps/v8/src/base/platform/platform-macos.cc | 162 +- deps/v8/src/base/platform/platform-openbsd.cc | 2 +- deps/v8/src/base/platform/platform-posix.cc | 177 +- deps/v8/src/base/platform/platform-posix.h | 2 + .../src/base/platform/platform-starboard.cc | 12 +- deps/v8/src/base/platform/platform-win32.cc | 152 +- deps/v8/src/base/platform/platform.h | 43 +- deps/v8/src/base/platform/semaphore.cc | 6 +- deps/v8/src/base/platform/semaphore.h | 4 +- deps/v8/src/base/platform/time.cc | 85 +- deps/v8/src/base/platform/time.h | 5 - .../{utils => base}/pointer-with-payload.h | 64 +- deps/v8/src/base/safe_conversions_impl.h | 3 +- .../src/base/sanitizer/lsan-page-allocator.cc | 18 +- .../sanitizer/lsan-virtual-address-space.cc | 32 +- .../sanitizer/lsan-virtual-address-space.h | 19 +- deps/v8/src/base/sys-info.cc | 2 +- deps/v8/src/base/threaded-list.h | 13 + .../src/base/utils/random-number-generator.cc | 5 +- .../virtual-address-space-page-allocator.cc | 6 +- deps/v8/src/base/virtual-address-space.cc | 170 +- deps/v8/src/base/virtual-address-space.h | 45 +- deps/v8/src/base/vlq-base64.h | 3 +- .../baseline/arm/baseline-assembler-arm-inl.h | 11 +- .../arm64/baseline-assembler-arm64-inl.h | 12 +- deps/v8/src/baseline/baseline-assembler.h | 2 + .../src/baseline/baseline-batch-compiler.cc | 56 +- deps/v8/src/baseline/baseline-compiler.cc | 68 +- .../ia32/baseline-assembler-ia32-inl.h | 14 +- .../loong64/baseline-assembler-loong64-inl.h | 14 +- .../mips/baseline-assembler-mips-inl.h | 13 +- .../mips64/baseline-assembler-mips64-inl.h | 13 +- .../baseline/ppc/baseline-assembler-ppc-inl.h | 99 +- .../riscv64/baseline-assembler-riscv64-inl.h | 11 +- .../s390/baseline-assembler-s390-inl.h | 194 +- .../s390/baseline-compiler-s390-inl.h | 77 +- .../baseline/x64/baseline-assembler-x64-inl.h | 14 +- deps/v8/src/bigint/bigint.h | 39 +- deps/v8/src/bigint/tostring.cc | 1 + deps/v8/src/builtins/accessors.cc | 73 +- deps/v8/src/builtins/arm/builtins-arm.cc | 162 +- deps/v8/src/builtins/arm64/builtins-arm64.cc | 281 +- deps/v8/src/builtins/array-join.tq | 54 +- deps/v8/src/builtins/base.tq | 93 +- deps/v8/src/builtins/builtins-array-gen.cc | 38 +- deps/v8/src/builtins/builtins-array-gen.h | 3 +- deps/v8/src/builtins/builtins-array.cc | 314 +- .../builtins/builtins-async-function-gen.cc | 71 +- deps/v8/src/builtins/builtins-async-gen.cc | 247 +- deps/v8/src/builtins/builtins-async-gen.h | 20 - .../builtins/builtins-async-generator-gen.cc | 33 +- deps/v8/src/builtins/builtins-bigint.cc | 2 +- deps/v8/src/builtins/builtins-callsite.cc | 28 +- .../src/builtins/builtins-collections-gen.cc | 45 +- deps/v8/src/builtins/builtins-console.cc | 161 +- .../src/builtins/builtins-constructor-gen.cc | 2 +- deps/v8/src/builtins/builtins-dataview.cc | 3 +- deps/v8/src/builtins/builtins-date.cc | 4 +- deps/v8/src/builtins/builtins-definitions.h | 105 +- deps/v8/src/builtins/builtins-error.cc | 4 +- deps/v8/src/builtins/builtins-function.cc | 19 +- .../v8/src/builtins/builtins-generator-gen.cc | 2 +- deps/v8/src/builtins/builtins-handler-gen.cc | 8 +- deps/v8/src/builtins/builtins-ic-gen.cc | 24 +- deps/v8/src/builtins/builtins-internal-gen.cc | 114 +- deps/v8/src/builtins/builtins-intl.cc | 163 +- deps/v8/src/builtins/builtins-iterator-gen.cc | 11 +- deps/v8/src/builtins/builtins-iterator-gen.h | 4 +- deps/v8/src/builtins/builtins-lazy-gen.cc | 30 +- deps/v8/src/builtins/builtins-object-gen.cc | 2 +- deps/v8/src/builtins/builtins-object.cc | 5 +- .../v8/src/builtins/builtins-shadow-realms.cc | 248 + .../src/builtins/builtins-shadowrealm-gen.cc | 186 + .../builtins-sharedarraybuffer-gen.cc | 49 +- deps/v8/src/builtins/builtins-struct.cc | 123 + deps/v8/src/builtins/builtins-temporal-gen.cc | 98 + deps/v8/src/builtins/builtins-temporal.cc | 735 ++- .../src/builtins/builtins-typed-array-gen.cc | 12 +- .../src/builtins/builtins-typed-array-gen.h | 6 +- deps/v8/src/builtins/builtins-typed-array.cc | 16 +- deps/v8/src/builtins/builtins.cc | 83 +- deps/v8/src/builtins/builtins.h | 58 +- deps/v8/src/builtins/convert.tq | 57 + deps/v8/src/builtins/data-view.tq | 11 +- deps/v8/src/builtins/frame-arguments.tq | 4 +- deps/v8/src/builtins/function.tq | 6 +- deps/v8/src/builtins/ia32/builtins-ia32.cc | 185 +- deps/v8/src/builtins/internal.tq | 9 - .../src/builtins/loong64/builtins-loong64.cc | 177 +- deps/v8/src/builtins/mips/builtins-mips.cc | 163 +- .../v8/src/builtins/mips64/builtins-mips64.cc | 175 +- deps/v8/src/builtins/number.tq | 10 +- deps/v8/src/builtins/object.tq | 5 +- deps/v8/src/builtins/ppc/builtins-ppc.cc | 166 +- deps/v8/src/builtins/promise-all.tq | 8 +- deps/v8/src/builtins/promise-any.tq | 4 +- deps/v8/src/builtins/promise-finally.tq | 1 + deps/v8/src/builtins/promise-misc.tq | 85 +- deps/v8/src/builtins/promise-then.tq | 16 + .../src/builtins/riscv64/builtins-riscv64.cc | 183 +- deps/v8/src/builtins/s390/builtins-s390.cc | 146 +- .../src/builtins/setup-builtins-internal.cc | 31 +- deps/v8/src/builtins/string-repeat.tq | 2 +- deps/v8/src/builtins/torque-internal.tq | 4 + .../builtins/typed-array-createtypedarray.tq | 34 +- deps/v8/src/builtins/typed-array-set.tq | 149 +- deps/v8/src/builtins/typed-array-sort.tq | 39 +- deps/v8/src/builtins/typed-array-subarray.tq | 31 +- deps/v8/src/builtins/typed-array.tq | 2 + deps/v8/src/builtins/wasm.tq | 55 +- deps/v8/src/builtins/x64/builtins-x64.cc | 1066 ++-- deps/v8/src/codegen/OWNERS | 5 +- deps/v8/src/codegen/arm/assembler-arm.cc | 18 +- deps/v8/src/codegen/arm/assembler-arm.h | 17 +- .../arm/interface-descriptors-arm-inl.h | 74 +- .../v8/src/codegen/arm/macro-assembler-arm.cc | 98 +- deps/v8/src/codegen/arm/macro-assembler-arm.h | 37 +- deps/v8/src/codegen/arm/register-arm.h | 46 +- deps/v8/src/codegen/arm/reglist-arm.h | 56 + .../src/codegen/arm64/assembler-arm64-inl.h | 15 +- deps/v8/src/codegen/arm64/assembler-arm64.cc | 20 +- deps/v8/src/codegen/arm64/assembler-arm64.h | 8 +- deps/v8/src/codegen/arm64/cpu-arm64.cc | 4 +- .../arm64/interface-descriptors-arm64-inl.h | 73 +- .../codegen/arm64/macro-assembler-arm64.cc | 124 +- .../src/codegen/arm64/macro-assembler-arm64.h | 85 +- deps/v8/src/codegen/arm64/register-arm64.h | 165 +- deps/v8/src/codegen/arm64/reglist-arm64.h | 176 + deps/v8/src/codegen/assembler.cc | 11 +- deps/v8/src/codegen/assembler.h | 12 +- deps/v8/src/codegen/callable.h | 6 +- deps/v8/src/codegen/code-factory.cc | 18 +- deps/v8/src/codegen/code-factory.h | 8 +- deps/v8/src/codegen/code-stub-assembler.cc | 732 ++- deps/v8/src/codegen/code-stub-assembler.h | 213 +- deps/v8/src/codegen/compiler.cc | 408 +- deps/v8/src/codegen/compiler.h | 37 +- deps/v8/src/codegen/cpu-features.h | 2 + .../v8/src/codegen/external-reference-table.h | 2 - deps/v8/src/codegen/external-reference.cc | 42 +- deps/v8/src/codegen/external-reference.h | 52 +- deps/v8/src/codegen/ia32/assembler-ia32.cc | 8 +- .../ia32/interface-descriptors-ia32-inl.h | 57 +- .../src/codegen/ia32/macro-assembler-ia32.cc | 25 +- .../src/codegen/ia32/macro-assembler-ia32.h | 11 - deps/v8/src/codegen/ia32/register-ia32.h | 18 +- deps/v8/src/codegen/ia32/reglist-ia32.h | 30 + .../src/codegen/interface-descriptors-inl.h | 46 +- deps/v8/src/codegen/interface-descriptors.cc | 10 +- deps/v8/src/codegen/interface-descriptors.h | 352 +- .../src/codegen/loong64/assembler-loong64.cc | 14 +- .../src/codegen/loong64/assembler-loong64.h | 6 +- .../src/codegen/loong64/constants-loong64.h | 4 +- .../interface-descriptors-loong64-inl.h | 74 +- .../loong64/macro-assembler-loong64.cc | 208 +- .../codegen/loong64/macro-assembler-loong64.h | 6 +- .../v8/src/codegen/loong64/register-loong64.h | 73 +- deps/v8/src/codegen/loong64/reglist-loong64.h | 50 + deps/v8/src/codegen/machine-type.cc | 4 +- deps/v8/src/codegen/machine-type.h | 48 +- deps/v8/src/codegen/mips/assembler-mips.cc | 15 +- deps/v8/src/codegen/mips/assembler-mips.h | 6 +- .../mips/interface-descriptors-mips-inl.h | 64 +- .../src/codegen/mips/macro-assembler-mips.cc | 165 +- .../src/codegen/mips/macro-assembler-mips.h | 11 +- deps/v8/src/codegen/mips/register-mips.h | 100 +- deps/v8/src/codegen/mips/reglist-mips.h | 48 + .../v8/src/codegen/mips64/assembler-mips64.cc | 14 +- deps/v8/src/codegen/mips64/assembler-mips64.h | 6 +- .../mips64/interface-descriptors-mips64-inl.h | 74 +- .../codegen/mips64/macro-assembler-mips64.cc | 178 +- .../codegen/mips64/macro-assembler-mips64.h | 15 +- deps/v8/src/codegen/mips64/register-mips64.h | 100 +- deps/v8/src/codegen/mips64/reglist-mips64.h | 48 + .../src/codegen/optimized-compilation-info.cc | 16 +- .../src/codegen/optimized-compilation-info.h | 6 +- deps/v8/src/codegen/ppc/assembler-ppc.cc | 10 +- deps/v8/src/codegen/ppc/assembler-ppc.h | 5 +- deps/v8/src/codegen/ppc/constants-ppc.h | 12 +- .../ppc/interface-descriptors-ppc-inl.h | 72 +- .../v8/src/codegen/ppc/macro-assembler-ppc.cc | 141 +- deps/v8/src/codegen/ppc/macro-assembler-ppc.h | 14 +- deps/v8/src/codegen/ppc/register-ppc.h | 91 +- deps/v8/src/codegen/ppc/reglist-ppc.h | 63 + deps/v8/src/codegen/register-arch.h | 40 +- deps/v8/src/codegen/register-base.h | 85 + deps/v8/src/codegen/register-configuration.cc | 100 +- deps/v8/src/codegen/register-configuration.h | 23 +- deps/v8/src/codegen/register.h | 95 +- deps/v8/src/codegen/reglist-base.h | 232 + deps/v8/src/codegen/reglist.h | 56 +- deps/v8/src/codegen/reloc-info.cc | 2 +- .../src/codegen/riscv64/assembler-riscv64.cc | 82 +- .../src/codegen/riscv64/assembler-riscv64.h | 95 +- .../src/codegen/riscv64/constants-riscv64.h | 48 +- .../interface-descriptors-riscv64-inl.h | 75 +- .../riscv64/macro-assembler-riscv64.cc | 216 +- .../codegen/riscv64/macro-assembler-riscv64.h | 6 +- .../v8/src/codegen/riscv64/register-riscv64.h | 102 +- deps/v8/src/codegen/riscv64/reglist-riscv64.h | 64 + deps/v8/src/codegen/s390/assembler-s390.cc | 11 +- deps/v8/src/codegen/s390/assembler-s390.h | 5 +- .../s390/interface-descriptors-s390-inl.h | 71 +- .../src/codegen/s390/macro-assembler-s390.cc | 998 +++- .../src/codegen/s390/macro-assembler-s390.h | 488 +- deps/v8/src/codegen/s390/register-s390.h | 59 +- deps/v8/src/codegen/s390/reglist-s390.h | 58 + deps/v8/src/codegen/safepoint-table.cc | 75 +- deps/v8/src/codegen/safepoint-table.h | 127 +- .../macro-assembler-shared-ia32-x64.cc | 2 +- deps/v8/src/codegen/signature.h | 9 +- deps/v8/src/codegen/source-position.cc | 28 +- deps/v8/src/codegen/source-position.h | 1 + deps/v8/src/codegen/tnode.h | 15 +- deps/v8/src/codegen/turbo-assembler.cc | 13 +- deps/v8/src/codegen/turbo-assembler.h | 3 +- deps/v8/src/codegen/x64/assembler-x64-inl.h | 11 +- deps/v8/src/codegen/x64/assembler-x64.cc | 50 +- deps/v8/src/codegen/x64/assembler-x64.h | 15 +- .../x64/interface-descriptors-x64-inl.h | 60 +- .../v8/src/codegen/x64/macro-assembler-x64.cc | 115 +- deps/v8/src/codegen/x64/macro-assembler-x64.h | 34 +- deps/v8/src/codegen/x64/register-x64.h | 28 +- deps/v8/src/codegen/x64/reglist-x64.h | 37 + deps/v8/src/common/allow-deprecated.h | 37 + deps/v8/src/common/globals.h | 183 +- deps/v8/src/common/message-template.h | 15 +- deps/v8/src/common/operation.h | 59 + deps/v8/src/common/ptr-compr-inl.h | 6 - deps/v8/src/common/ptr-compr.h | 47 +- .../lazy-compile-dispatcher.cc | 23 +- .../lazy-compile-dispatcher.h | 10 +- .../optimizing-compile-dispatcher.cc | 9 +- deps/v8/src/compiler/OWNERS | 10 +- deps/v8/src/compiler/access-builder.cc | 26 +- deps/v8/src/compiler/access-info.cc | 37 +- .../backend/arm/code-generator-arm.cc | 75 +- .../backend/arm/instruction-selector-arm.cc | 40 +- .../backend/arm64/code-generator-arm64.cc | 60 +- .../backend/arm64/instruction-codes-arm64.h | 4 +- .../arm64/instruction-scheduler-arm64.cc | 4 +- .../arm64/instruction-selector-arm64.cc | 8 +- .../compiler/backend/code-generator-impl.h | 4 +- .../v8/src/compiler/backend/code-generator.cc | 11 +- deps/v8/src/compiler/backend/code-generator.h | 4 +- deps/v8/src/compiler/backend/gap-resolver.cc | 11 +- .../backend/ia32/code-generator-ia32.cc | 59 +- .../backend/ia32/instruction-selector-ia32.cc | 4 +- .../compiler/backend/instruction-selector.cc | 32 +- deps/v8/src/compiler/backend/instruction.cc | 43 +- deps/v8/src/compiler/backend/instruction.h | 28 +- .../backend/loong64/code-generator-loong64.cc | 54 +- .../loong64/instruction-selector-loong64.cc | 4 +- .../backend/mid-tier-register-allocator.cc | 625 +- .../backend/mips/code-generator-mips.cc | 42 +- .../backend/mips/instruction-selector-mips.cc | 8 +- .../backend/mips64/code-generator-mips64.cc | 46 +- .../mips64/instruction-selector-mips64.cc | 8 +- .../v8/src/compiler/backend/move-optimizer.cc | 4 +- .../backend/ppc/code-generator-ppc.cc | 312 +- .../backend/ppc/instruction-selector-ppc.cc | 4 +- .../compiler/backend/register-allocation.h | 10 +- .../compiler/backend/register-allocator.cc | 243 +- .../src/compiler/backend/register-allocator.h | 5 + .../backend/riscv64/code-generator-riscv64.cc | 306 +- .../riscv64/instruction-codes-riscv64.h | 10 +- .../riscv64/instruction-scheduler-riscv64.cc | 4 + .../riscv64/instruction-selector-riscv64.cc | 187 +- .../backend/s390/code-generator-s390.cc | 1210 ++-- .../backend/s390/instruction-selector-s390.cc | 16 +- .../backend/x64/code-generator-x64.cc | 157 +- .../backend/x64/instruction-codes-x64.h | 4 +- .../backend/x64/instruction-scheduler-x64.cc | 4 +- .../backend/x64/instruction-selector-x64.cc | 106 +- deps/v8/src/compiler/branch-elimination.cc | 6 +- deps/v8/src/compiler/bytecode-analysis.cc | 476 +- deps/v8/src/compiler/bytecode-analysis.h | 10 + .../v8/src/compiler/bytecode-graph-builder.cc | 155 +- deps/v8/src/compiler/bytecode-liveness-map.cc | 42 +- deps/v8/src/compiler/bytecode-liveness-map.h | 109 +- deps/v8/src/compiler/c-linkage.cc | 104 +- deps/v8/src/compiler/code-assembler.cc | 8 +- deps/v8/src/compiler/code-assembler.h | 16 +- .../src/compiler/compilation-dependencies.cc | 6 +- deps/v8/src/compiler/csa-load-elimination.cc | 142 +- deps/v8/src/compiler/csa-load-elimination.h | 60 +- .../src/compiler/effect-control-linearizer.cc | 202 +- .../src/compiler/effect-control-linearizer.h | 13 +- .../src/compiler/escape-analysis-reducer.cc | 19 +- .../v8/src/compiler/escape-analysis-reducer.h | 3 +- deps/v8/src/compiler/fast-api-calls.cc | 40 + deps/v8/src/compiler/fast-api-calls.h | 2 + deps/v8/src/compiler/frame-states.cc | 7 +- deps/v8/src/compiler/graph-assembler.cc | 433 +- deps/v8/src/compiler/graph-assembler.h | 52 +- deps/v8/src/compiler/heap-refs.cc | 868 +-- deps/v8/src/compiler/heap-refs.h | 30 +- deps/v8/src/compiler/int64-lowering.cc | 14 + deps/v8/src/compiler/js-call-reducer.cc | 422 +- deps/v8/src/compiler/js-create-lowering.cc | 8 +- deps/v8/src/compiler/js-generic-lowering.cc | 43 +- deps/v8/src/compiler/js-heap-broker.cc | 119 +- deps/v8/src/compiler/js-heap-broker.h | 18 +- deps/v8/src/compiler/js-heap-copy-reducer.cc | 226 - deps/v8/src/compiler/js-heap-copy-reducer.h | 38 - deps/v8/src/compiler/js-inlining-heuristic.cc | 20 +- deps/v8/src/compiler/js-inlining-heuristic.h | 13 +- deps/v8/src/compiler/js-inlining.cc | 30 +- deps/v8/src/compiler/js-intrinsic-lowering.cc | 31 +- deps/v8/src/compiler/js-intrinsic-lowering.h | 1 + .../js-native-context-specialization.cc | 125 +- .../js-native-context-specialization.h | 14 +- deps/v8/src/compiler/js-operator.cc | 98 +- deps/v8/src/compiler/js-operator.h | 87 +- deps/v8/src/compiler/js-type-hint-lowering.cc | 10 +- deps/v8/src/compiler/js-typed-lowering.cc | 20 +- deps/v8/src/compiler/linkage.cc | 10 +- deps/v8/src/compiler/linkage.h | 18 +- deps/v8/src/compiler/load-elimination.cc | 6 +- deps/v8/src/compiler/loop-analysis.cc | 77 +- deps/v8/src/compiler/loop-analysis.h | 24 +- deps/v8/src/compiler/loop-unrolling.cc | 34 +- deps/v8/src/compiler/loop-unrolling.h | 2 +- .../v8/src/compiler/machine-graph-verifier.cc | 6 +- .../src/compiler/machine-operator-reducer.cc | 68 +- deps/v8/src/compiler/machine-operator.cc | 4 +- deps/v8/src/compiler/memory-lowering.cc | 74 +- deps/v8/src/compiler/memory-optimizer.cc | 17 +- deps/v8/src/compiler/node-properties.cc | 2 +- deps/v8/src/compiler/opcodes.h | 44 +- deps/v8/src/compiler/operation-typer.cc | 8 + deps/v8/src/compiler/operator-properties.cc | 20 +- deps/v8/src/compiler/persistent-map.h | 2 +- deps/v8/src/compiler/pipeline.cc | 367 +- .../v8/src/compiler/property-access-builder.h | 2 - deps/v8/src/compiler/representation-change.cc | 36 +- deps/v8/src/compiler/representation-change.h | 18 +- deps/v8/src/compiler/select-lowering.cc | 2 +- .../compiler/simplified-lowering-verifier.cc | 251 + .../compiler/simplified-lowering-verifier.h | 93 + deps/v8/src/compiler/simplified-lowering.cc | 119 +- deps/v8/src/compiler/simplified-operator.cc | 56 +- deps/v8/src/compiler/simplified-operator.h | 88 +- deps/v8/src/compiler/state-values-utils.cc | 40 +- deps/v8/src/compiler/state-values-utils.h | 15 +- .../src/compiler/store-store-elimination.cc | 140 +- deps/v8/src/compiler/typer.cc | 37 +- deps/v8/src/compiler/types.cc | 14 +- deps/v8/src/compiler/types.h | 34 +- deps/v8/src/compiler/verifier.cc | 30 +- deps/v8/src/compiler/wasm-compiler.cc | 1270 ++-- deps/v8/src/compiler/wasm-compiler.h | 79 +- deps/v8/src/compiler/wasm-escape-analysis.cc | 7 +- deps/v8/src/compiler/wasm-inlining.cc | 114 +- deps/v8/src/compiler/wasm-inlining.h | 10 +- deps/v8/src/compiler/wasm-loop-peeling.cc | 133 + deps/v8/src/compiler/wasm-loop-peeling.h | 33 + deps/v8/src/compiler/zone-stats.h | 1 - deps/v8/src/d8/async-hooks-wrapper.cc | 166 +- deps/v8/src/d8/async-hooks-wrapper.h | 29 +- deps/v8/src/d8/cov.h | 1 + deps/v8/src/d8/d8-console.cc | 14 +- deps/v8/src/d8/d8-test.cc | 78 +- deps/v8/src/d8/d8.cc | 351 +- deps/v8/src/d8/d8.h | 13 +- deps/v8/src/debug/debug-coverage.cc | 12 +- deps/v8/src/debug/debug-evaluate.cc | 28 +- deps/v8/src/debug/debug-frames.cc | 4 +- deps/v8/src/debug/debug-interface.cc | 176 +- deps/v8/src/debug/debug-interface.h | 41 +- deps/v8/src/debug/debug-property-iterator.cc | 10 +- deps/v8/src/debug/debug-scopes.cc | 19 +- deps/v8/src/debug/debug-scopes.h | 2 - .../src/debug/debug-stack-trace-iterator.cc | 6 +- deps/v8/src/debug/debug-wasm-objects.cc | 89 +- deps/v8/src/debug/debug.cc | 235 +- deps/v8/src/debug/debug.h | 27 +- deps/v8/src/debug/interface-types.h | 7 +- deps/v8/src/debug/liveedit.cc | 6 +- deps/v8/src/debug/wasm/gdb-server/packet.h | 2 +- deps/v8/src/debug/wasm/gdb-server/transport.h | 3 +- .../wasm/gdb-server/wasm-module-debug.cc | 20 +- deps/v8/src/deoptimizer/deoptimizer.cc | 53 +- deps/v8/src/deoptimizer/deoptimizer.h | 4 +- deps/v8/src/deoptimizer/frame-description.h | 2 +- deps/v8/src/deoptimizer/translated-state.cc | 3 +- deps/v8/src/deoptimizer/translation-array.h | 2 +- deps/v8/src/diagnostics/disassembler.cc | 2 +- deps/v8/src/diagnostics/eh-frame.h | 2 +- deps/v8/src/diagnostics/objects-debug.cc | 136 +- deps/v8/src/diagnostics/objects-printer.cc | 125 +- deps/v8/src/diagnostics/perf-jit.cc | 110 +- deps/v8/src/diagnostics/perf-jit.h | 1 + .../src/diagnostics/riscv64/disasm-riscv64.cc | 73 + .../src/diagnostics/unwinding-info-win64.cc | 30 - deps/v8/src/diagnostics/x64/disasm-x64.cc | 4 + deps/v8/src/execution/OWNERS | 2 +- deps/v8/src/execution/arguments-inl.h | 21 +- deps/v8/src/execution/arguments.cc | 13 - deps/v8/src/execution/arguments.h | 20 +- .../src/execution/arm/frame-constants-arm.h | 26 +- .../execution/arm64/frame-constants-arm64.h | 29 +- .../v8/src/execution/arm64/simulator-arm64.cc | 2 +- deps/v8/src/execution/clobber-registers.cc | 63 + deps/v8/src/execution/clobber-registers.h | 18 + deps/v8/src/execution/execution.cc | 24 +- deps/v8/src/execution/frame-constants.h | 14 +- deps/v8/src/execution/frames-inl.h | 3 +- deps/v8/src/execution/frames.cc | 147 +- deps/v8/src/execution/frames.h | 31 +- deps/v8/src/execution/futex-emulation.cc | 35 +- deps/v8/src/execution/futex-emulation.h | 10 +- .../src/execution/ia32/frame-constants-ia32.h | 25 +- deps/v8/src/execution/isolate-data.h | 51 +- deps/v8/src/execution/isolate-utils-inl.h | 6 +- deps/v8/src/execution/isolate.cc | 1041 ++-- deps/v8/src/execution/isolate.h | 117 +- deps/v8/src/execution/local-isolate.cc | 17 +- deps/v8/src/execution/local-isolate.h | 14 +- .../loong64/frame-constants-loong64.h | 25 +- .../execution/loong64/simulator-loong64.cc | 32 +- deps/v8/src/execution/messages.cc | 104 +- deps/v8/src/execution/messages.h | 9 +- deps/v8/src/execution/microtask-queue.cc | 4 +- .../src/execution/mips/frame-constants-mips.h | 23 +- .../execution/mips64/frame-constants-mips64.h | 23 +- .../src/execution/ppc/frame-constants-ppc.h | 31 +- .../riscv64/frame-constants-riscv64.h | 28 +- .../execution/riscv64/simulator-riscv64.cc | 344 +- deps/v8/src/execution/runtime-profiler.cc | 261 - .../src/execution/s390/frame-constants-s390.h | 29 +- deps/v8/src/execution/s390/simulator-s390.cc | 13 +- deps/v8/src/execution/simulator-base.cc | 5 - deps/v8/src/execution/simulator-base.h | 4 +- deps/v8/src/execution/stack-guard.cc | 1 - deps/v8/src/execution/thread-local-top.cc | 1 + deps/v8/src/execution/thread-local-top.h | 5 +- deps/v8/src/execution/tiering-manager.cc | 412 ++ .../{runtime-profiler.h => tiering-manager.h} | 50 +- deps/v8/src/execution/v8threads.cc | 3 + deps/v8/src/execution/v8threads.h | 2 +- deps/v8/src/execution/vm-state-inl.h | 2 +- .../src/execution/x64/frame-constants-x64.h | 25 +- deps/v8/src/extensions/gc-extension.cc | 27 +- .../v8/src/extensions/statistics-extension.cc | 45 +- deps/v8/src/flags/flag-definitions.h | 195 +- deps/v8/src/flags/flags.h | 2 - deps/v8/src/handles/global-handles.cc | 99 +- deps/v8/src/handles/global-handles.h | 4 + deps/v8/src/handles/handles-inl.h | 1 + deps/v8/src/handles/handles.cc | 48 +- deps/v8/src/handles/handles.h | 51 +- deps/v8/src/handles/local-handles-inl.h | 11 +- deps/v8/src/heap/OWNERS | 3 +- deps/v8/src/heap/allocation-observer.h | 131 +- deps/v8/src/heap/allocation-result.h | 74 + deps/v8/src/heap/base/active-system-pages.cc | 71 + deps/v8/src/heap/base/active-system-pages.h | 51 + .../base/asm/mips64/push_registers_asm.cc | 3 +- deps/v8/src/heap/base/stack.cc | 25 +- deps/v8/src/heap/base/stack.h | 11 +- deps/v8/src/heap/basic-memory-chunk.h | 16 +- deps/v8/src/heap/code-object-registry.h | 1 - deps/v8/src/heap/code-range.cc | 20 +- deps/v8/src/heap/concurrent-allocator-inl.h | 10 +- deps/v8/src/heap/concurrent-allocator.cc | 16 +- deps/v8/src/heap/concurrent-marking.cc | 26 +- deps/v8/src/heap/cppgc-js/cpp-heap.cc | 300 +- deps/v8/src/heap/cppgc-js/cpp-heap.h | 25 +- .../src/heap/cppgc-js/cpp-marking-state-inl.h | 47 + deps/v8/src/heap/cppgc-js/cpp-marking-state.h | 67 + deps/v8/src/heap/cppgc-js/cpp-snapshot.cc | 3 +- .../cppgc-js/unified-heap-marking-visitor.cc | 2 +- .../cppgc-js/unified-heap-marking-visitor.h | 26 +- deps/v8/src/heap/cppgc/default-platform.cc | 2 +- deps/v8/src/heap/cppgc/explicit-management.cc | 30 +- deps/v8/src/heap/cppgc/garbage-collector.h | 9 + deps/v8/src/heap/cppgc/gc-invoker.cc | 12 + deps/v8/src/heap/cppgc/gc-invoker.h | 2 + deps/v8/src/heap/cppgc/globals.h | 21 + deps/v8/src/heap/cppgc/heap-base.cc | 5 +- deps/v8/src/heap/cppgc/heap-base.h | 12 +- deps/v8/src/heap/cppgc/heap-object-header.h | 19 +- deps/v8/src/heap/cppgc/heap-space.cc | 2 + deps/v8/src/heap/cppgc/heap-space.h | 1 + deps/v8/src/heap/cppgc/heap.cc | 7 +- deps/v8/src/heap/cppgc/heap.h | 3 + deps/v8/src/heap/cppgc/marker.cc | 77 +- deps/v8/src/heap/cppgc/marker.h | 40 +- deps/v8/src/heap/cppgc/marking-state.h | 196 +- deps/v8/src/heap/cppgc/marking-visitor.cc | 2 +- deps/v8/src/heap/cppgc/marking-visitor.h | 6 +- deps/v8/src/heap/cppgc/page-memory.h | 3 +- deps/v8/src/heap/cppgc/platform.cc | 8 +- .../v8/src/heap/cppgc/prefinalizer-handler.cc | 17 +- deps/v8/src/heap/cppgc/prefinalizer-handler.h | 13 +- deps/v8/src/heap/cppgc/remembered-set.cc | 135 + deps/v8/src/heap/cppgc/remembered-set.h | 68 + deps/v8/src/heap/cppgc/sweeper.cc | 24 +- deps/v8/src/heap/cppgc/sweeper.h | 1 + deps/v8/src/heap/cppgc/testing.cc | 8 + deps/v8/src/heap/cppgc/write-barrier.cc | 18 +- deps/v8/src/heap/embedder-tracing-inl.h | 46 + deps/v8/src/heap/embedder-tracing.cc | 87 +- deps/v8/src/heap/embedder-tracing.h | 52 +- ...cator-inl.h => evacuation-allocator-inl.h} | 36 +- ...cal-allocator.h => evacuation-allocator.h} | 12 +- deps/v8/src/heap/factory-base.cc | 34 +- deps/v8/src/heap/factory-base.h | 3 +- deps/v8/src/heap/factory-inl.h | 4 + deps/v8/src/heap/factory.cc | 294 +- deps/v8/src/heap/factory.h | 72 +- deps/v8/src/heap/gc-tracer.cc | 542 +- deps/v8/src/heap/gc-tracer.h | 136 +- deps/v8/src/heap/heap-allocator-inl.h | 250 + deps/v8/src/heap/heap-allocator.cc | 163 + deps/v8/src/heap/heap-allocator.h | 119 + deps/v8/src/heap/heap-inl.h | 234 +- deps/v8/src/heap/heap-layout-tracer.cc | 3 +- deps/v8/src/heap/heap-write-barrier-inl.h | 17 +- deps/v8/src/heap/heap-write-barrier.cc | 28 +- deps/v8/src/heap/heap-write-barrier.h | 6 + deps/v8/src/heap/heap.cc | 1069 ++-- deps/v8/src/heap/heap.h | 245 +- deps/v8/src/heap/incremental-marking-job.cc | 5 +- deps/v8/src/heap/incremental-marking.cc | 171 +- deps/v8/src/heap/incremental-marking.h | 4 +- deps/v8/src/heap/invalidated-slots-inl.h | 13 +- deps/v8/src/heap/invalidated-slots.cc | 11 +- deps/v8/src/heap/invalidated-slots.h | 8 +- deps/v8/src/heap/large-spaces.cc | 73 +- deps/v8/src/heap/large-spaces.h | 4 +- deps/v8/src/heap/local-factory.cc | 5 + deps/v8/src/heap/local-factory.h | 17 +- deps/v8/src/heap/local-heap-inl.h | 3 +- deps/v8/src/heap/local-heap.cc | 2 +- deps/v8/src/heap/mark-compact-inl.h | 11 +- deps/v8/src/heap/mark-compact.cc | 722 ++- deps/v8/src/heap/mark-compact.h | 37 +- deps/v8/src/heap/marking-barrier.cc | 19 +- deps/v8/src/heap/marking-visitor-inl.h | 72 +- deps/v8/src/heap/marking-visitor.h | 67 +- deps/v8/src/heap/marking-worklist-inl.h | 38 +- deps/v8/src/heap/marking-worklist.cc | 30 +- deps/v8/src/heap/marking-worklist.h | 44 +- deps/v8/src/heap/memory-allocator.cc | 140 +- deps/v8/src/heap/memory-allocator.h | 176 +- deps/v8/src/heap/memory-chunk-layout.h | 7 +- deps/v8/src/heap/memory-chunk.cc | 28 +- deps/v8/src/heap/memory-chunk.h | 5 +- deps/v8/src/heap/new-spaces-inl.h | 15 +- deps/v8/src/heap/new-spaces.cc | 119 +- deps/v8/src/heap/new-spaces.h | 34 +- deps/v8/src/heap/object-stats.cc | 86 +- deps/v8/src/heap/objects-visiting-inl.h | 13 + deps/v8/src/heap/objects-visiting.h | 2 + deps/v8/src/heap/paged-spaces-inl.h | 23 +- deps/v8/src/heap/paged-spaces.cc | 100 +- deps/v8/src/heap/paged-spaces.h | 41 +- deps/v8/src/heap/read-only-spaces.cc | 11 +- deps/v8/src/heap/reference-summarizer.cc | 116 + deps/v8/src/heap/reference-summarizer.h | 55 + deps/v8/src/heap/remembered-set-inl.h | 16 +- deps/v8/src/heap/remembered-set.h | 13 - deps/v8/src/heap/safepoint.cc | 34 +- deps/v8/src/heap/safepoint.h | 12 +- deps/v8/src/heap/scavenger-inl.h | 24 +- deps/v8/src/heap/scavenger.cc | 70 +- deps/v8/src/heap/scavenger.h | 12 +- deps/v8/src/heap/setup-heap-internal.cc | 40 +- deps/v8/src/heap/slot-set.cc | 2 +- deps/v8/src/heap/slot-set.h | 53 +- deps/v8/src/heap/spaces-inl.h | 33 +- deps/v8/src/heap/spaces.cc | 43 +- deps/v8/src/heap/spaces.h | 46 +- deps/v8/src/heap/sweeper.cc | 70 +- deps/v8/src/heap/sweeper.h | 13 +- deps/v8/src/heap/third-party/heap-api-stub.cc | 12 + deps/v8/src/ic/OWNERS | 1 - deps/v8/src/ic/accessor-assembler.cc | 205 +- deps/v8/src/ic/accessor-assembler.h | 43 +- deps/v8/src/ic/binary-op-assembler.cc | 98 +- deps/v8/src/ic/binary-op-assembler.h | 79 +- deps/v8/src/ic/handler-configuration-inl.h | 18 +- deps/v8/src/ic/handler-configuration.h | 6 + deps/v8/src/ic/ic.cc | 160 +- deps/v8/src/ic/ic.h | 16 +- deps/v8/src/ic/keyed-store-generic.cc | 95 +- deps/v8/src/ic/keyed-store-generic.h | 4 +- deps/v8/src/ic/stub-cache.h | 6 +- deps/v8/src/init/OWNERS | 3 +- deps/v8/src/init/bootstrapper.cc | 384 +- deps/v8/src/init/heap-symbols.h | 37 +- deps/v8/src/init/isolate-allocator.cc | 46 +- deps/v8/src/init/v8.cc | 142 +- deps/v8/src/init/v8.h | 9 +- deps/v8/src/inspector/BUILD.gn | 2 + deps/v8/src/inspector/injected-script.cc | 43 +- deps/v8/src/inspector/injected-script.h | 1 + deps/v8/src/inspector/inspected-context.cc | 2 +- deps/v8/src/inspector/inspected-context.h | 4 +- .../inspector/inspector_protocol_config.json | 1 - deps/v8/src/inspector/string-16.cc | 10 - deps/v8/src/inspector/string-16.h | 11 +- deps/v8/src/inspector/string-util.cc | 4 - deps/v8/src/inspector/string-util.h | 6 - .../v8/src/inspector/v8-console-agent-impl.cc | 2 - deps/v8/src/inspector/v8-console-message.cc | 24 +- deps/v8/src/inspector/v8-console.cc | 90 +- deps/v8/src/inspector/v8-console.h | 13 +- .../src/inspector/v8-debugger-agent-impl.cc | 171 +- .../v8/src/inspector/v8-debugger-agent-impl.h | 19 +- deps/v8/src/inspector/v8-debugger-id.cc | 28 +- deps/v8/src/inspector/v8-debugger-id.h | 17 +- deps/v8/src/inspector/v8-debugger-script.cc | 64 +- deps/v8/src/inspector/v8-debugger-script.h | 4 +- deps/v8/src/inspector/v8-debugger.cc | 173 +- deps/v8/src/inspector/v8-debugger.h | 13 +- deps/v8/src/inspector/v8-inspector-impl.cc | 46 +- deps/v8/src/inspector/v8-inspector-impl.h | 8 +- .../inspector/v8-inspector-session-impl.cc | 4 +- .../src/inspector/v8-profiler-agent-impl.cc | 33 +- .../v8/src/inspector/v8-runtime-agent-impl.cc | 69 +- deps/v8/src/inspector/v8-runtime-agent-impl.h | 5 +- deps/v8/src/inspector/v8-stack-trace-impl.cc | 51 +- deps/v8/src/inspector/v8-stack-trace-impl.h | 17 +- deps/v8/src/inspector/value-mirror.cc | 10 +- deps/v8/src/inspector/value-mirror.h | 3 +- .../src/interpreter/bytecode-array-builder.cc | 50 +- .../src/interpreter/bytecode-array-builder.h | 64 +- .../interpreter/bytecode-array-iterator.cc | 16 +- .../src/interpreter/bytecode-array-iterator.h | 11 +- .../bytecode-array-random-iterator.cc | 2 +- .../bytecode-array-random-iterator.h | 5 +- .../src/interpreter/bytecode-array-writer.cc | 1 + deps/v8/src/interpreter/bytecode-decoder.cc | 17 +- deps/v8/src/interpreter/bytecode-decoder.h | 3 +- deps/v8/src/interpreter/bytecode-generator.cc | 274 +- deps/v8/src/interpreter/bytecode-generator.h | 12 +- .../bytecode-register-optimizer.cc | 2 +- deps/v8/src/interpreter/bytecode-register.cc | 9 +- deps/v8/src/interpreter/bytecode-register.h | 8 +- deps/v8/src/interpreter/bytecodes.cc | 4 +- deps/v8/src/interpreter/bytecodes.h | 28 +- .../src/interpreter/interpreter-assembler.cc | 33 +- .../src/interpreter/interpreter-generator.cc | 93 +- .../src/interpreter/interpreter-generator.h | 4 - .../interpreter-intrinsics-generator.cc | 15 + .../src/interpreter/interpreter-intrinsics.h | 6 +- deps/v8/src/interpreter/interpreter.cc | 17 +- deps/v8/src/interpreter/interpreter.h | 6 +- deps/v8/src/json/json-parser.cc | 28 +- deps/v8/src/json/json-parser.h | 3 +- deps/v8/src/json/json-stringifier.cc | 8 +- deps/v8/src/libplatform/default-platform.cc | 9 +- deps/v8/src/libplatform/tracing/recorder.h | 6 +- .../libplatform/tracing/tracing-controller.cc | 2 +- deps/v8/src/libsampler/sampler.cc | 4 +- deps/v8/src/logging/code-events.h | 7 +- deps/v8/src/logging/counters-definitions.h | 35 +- deps/v8/src/logging/counters-scopes.h | 73 +- deps/v8/src/logging/counters.cc | 22 +- deps/v8/src/logging/counters.h | 45 +- deps/v8/src/logging/log.cc | 36 +- deps/v8/src/logging/log.h | 26 +- .../v8/src/logging/runtime-call-stats-scope.h | 10 +- deps/v8/src/logging/runtime-call-stats.cc | 6 +- deps/v8/src/logging/runtime-call-stats.h | 19 +- deps/v8/src/maglev/DEPS | 6 + deps/v8/src/maglev/OWNERS | 3 + deps/v8/src/maglev/maglev-basic-block.h | 107 + deps/v8/src/maglev/maglev-code-gen-state.h | 135 + deps/v8/src/maglev/maglev-code-generator.cc | 378 ++ deps/v8/src/maglev/maglev-code-generator.h | 27 + deps/v8/src/maglev/maglev-compilation-info.cc | 123 + deps/v8/src/maglev/maglev-compilation-info.h | 137 + deps/v8/src/maglev/maglev-compilation-unit.cc | 45 + deps/v8/src/maglev/maglev-compilation-unit.h | 57 + deps/v8/src/maglev/maglev-compiler.cc | 209 + deps/v8/src/maglev/maglev-compiler.h | 53 + .../maglev/maglev-concurrent-dispatcher.cc | 194 + .../src/maglev/maglev-concurrent-dispatcher.h | 92 + deps/v8/src/maglev/maglev-graph-builder.cc | 616 ++ deps/v8/src/maglev/maglev-graph-builder.h | 383 ++ deps/v8/src/maglev/maglev-graph-labeller.h | 65 + deps/v8/src/maglev/maglev-graph-printer.cc | 446 ++ deps/v8/src/maglev/maglev-graph-printer.h | 85 + deps/v8/src/maglev/maglev-graph-processor.h | 423 ++ deps/v8/src/maglev/maglev-graph.h | 60 + .../maglev/maglev-interpreter-frame-state.h | 400 ++ deps/v8/src/maglev/maglev-ir.cc | 922 +++ deps/v8/src/maglev/maglev-ir.h | 1461 +++++ deps/v8/src/maglev/maglev-regalloc-data.h | 83 + deps/v8/src/maglev/maglev-regalloc.cc | 875 +++ deps/v8/src/maglev/maglev-regalloc.h | 112 + .../src/maglev/maglev-register-frame-array.h | 113 + deps/v8/src/maglev/maglev-vreg-allocator.h | 57 + deps/v8/src/maglev/maglev.cc | 24 + deps/v8/src/maglev/maglev.h | 28 + deps/v8/src/numbers/conversions-inl.h | 13 +- deps/v8/src/numbers/integer-literal-inl.h | 43 + deps/v8/src/numbers/integer-literal.h | 106 + deps/v8/src/objects/all-objects-inl.h | 4 +- deps/v8/src/objects/backing-store.cc | 90 +- deps/v8/src/objects/backing-store.h | 3 +- deps/v8/src/objects/bigint.cc | 2 +- ...-frame-info-inl.h => call-site-info-inl.h} | 31 +- ...{stack-frame-info.cc => call-site-info.cc} | 134 +- .../{stack-frame-info.h => call-site-info.h} | 61 +- ...{stack-frame-info.tq => call-site-info.tq} | 6 +- deps/v8/src/objects/code-inl.h | 261 +- deps/v8/src/objects/code-kind.cc | 2 - deps/v8/src/objects/code-kind.h | 94 +- deps/v8/src/objects/code.cc | 12 +- deps/v8/src/objects/code.h | 138 +- .../v8/src/objects/compilation-cache-table.cc | 2 +- deps/v8/src/objects/contexts-inl.h | 16 +- deps/v8/src/objects/contexts.cc | 68 +- deps/v8/src/objects/contexts.h | 38 +- deps/v8/src/objects/debug-objects-inl.h | 33 + deps/v8/src/objects/debug-objects.cc | 62 + deps/v8/src/objects/debug-objects.h | 48 + deps/v8/src/objects/debug-objects.tq | 60 + deps/v8/src/objects/descriptor-array-inl.h | 3 +- deps/v8/src/objects/elements-kind.h | 5 + deps/v8/src/objects/elements.cc | 94 +- deps/v8/src/objects/embedder-data-slot-inl.h | 112 +- deps/v8/src/objects/embedder-data-slot.h | 96 +- deps/v8/src/objects/feedback-cell-inl.h | 7 +- deps/v8/src/objects/feedback-vector-inl.h | 50 +- deps/v8/src/objects/feedback-vector.cc | 201 +- deps/v8/src/objects/feedback-vector.h | 109 +- deps/v8/src/objects/feedback-vector.tq | 8 +- deps/v8/src/objects/fixed-array-inl.h | 5 +- deps/v8/src/objects/fixed-array.h | 32 +- deps/v8/src/objects/foreign-inl.h | 7 +- deps/v8/src/objects/hash-table-inl.h | 74 + deps/v8/src/objects/hash-table.h | 81 +- deps/v8/src/objects/heap-object.h | 26 +- deps/v8/src/objects/internal-index.h | 4 + deps/v8/src/objects/intl-objects.cc | 462 +- deps/v8/src/objects/intl-objects.h | 80 +- deps/v8/src/objects/intl-objects.tq | 2 +- deps/v8/src/objects/js-array-buffer-inl.h | 39 +- deps/v8/src/objects/js-array-buffer.cc | 2 +- deps/v8/src/objects/js-array-buffer.h | 8 +- deps/v8/src/objects/js-array-buffer.tq | 27 +- deps/v8/src/objects/js-date-time-format.cc | 274 +- deps/v8/src/objects/js-date-time-format.h | 19 +- deps/v8/src/objects/js-function-inl.h | 95 +- deps/v8/src/objects/js-function.cc | 180 +- deps/v8/src/objects/js-function.h | 61 +- deps/v8/src/objects/js-function.tq | 15 +- deps/v8/src/objects/js-list-format.cc | 22 +- deps/v8/src/objects/js-list-format.h | 4 +- deps/v8/src/objects/js-locale.cc | 2 +- deps/v8/src/objects/js-number-format-inl.h | 3 + deps/v8/src/objects/js-number-format.cc | 939 ++- deps/v8/src/objects/js-number-format.h | 41 +- deps/v8/src/objects/js-number-format.tq | 2 + deps/v8/src/objects/js-objects-inl.h | 132 +- deps/v8/src/objects/js-objects.cc | 146 +- deps/v8/src/objects/js-objects.h | 100 +- deps/v8/src/objects/js-objects.tq | 14 +- deps/v8/src/objects/js-plural-rules-inl.h | 3 + deps/v8/src/objects/js-plural-rules.cc | 51 +- deps/v8/src/objects/js-plural-rules.h | 6 + deps/v8/src/objects/js-plural-rules.tq | 2 + deps/v8/src/objects/js-promise.h | 3 +- deps/v8/src/objects/js-promise.tq | 2 +- deps/v8/src/objects/js-proxy.h | 4 +- deps/v8/src/objects/js-regexp.cc | 7 +- .../v8/src/objects/js-relative-time-format.cc | 43 +- deps/v8/src/objects/js-shadow-realms-inl.h | 28 + deps/v8/src/objects/js-shadow-realms.h | 39 + deps/v8/src/objects/js-shadow-realms.tq | 5 + deps/v8/src/objects/js-struct-inl.h | 30 + deps/v8/src/objects/js-struct.h | 35 + deps/v8/src/objects/js-struct.tq | 7 + deps/v8/src/objects/js-temporal-objects-inl.h | 26 +- deps/v8/src/objects/js-temporal-objects.cc | 5196 +++++++++++++++++ deps/v8/src/objects/js-temporal-objects.h | 240 + deps/v8/src/objects/js-temporal-objects.tq | 6 +- deps/v8/src/objects/lookup.cc | 34 +- deps/v8/src/objects/lookup.h | 2 + deps/v8/src/objects/map-inl.h | 7 +- deps/v8/src/objects/map-updater.cc | 68 +- deps/v8/src/objects/map.cc | 173 +- deps/v8/src/objects/map.h | 2 + deps/v8/src/objects/name-inl.h | 25 +- deps/v8/src/objects/name.h | 59 +- deps/v8/src/objects/name.tq | 35 +- deps/v8/src/objects/object-list-macros.h | 372 +- deps/v8/src/objects/object-macros-undef.h | 13 +- deps/v8/src/objects/object-macros.h | 91 +- .../objects/objects-body-descriptors-inl.h | 92 +- deps/v8/src/objects/objects-definitions.h | 2 + deps/v8/src/objects/objects-inl.h | 219 +- deps/v8/src/objects/objects.cc | 349 +- deps/v8/src/objects/objects.h | 97 +- deps/v8/src/objects/oddball-inl.h | 5 + deps/v8/src/objects/oddball.h | 2 + deps/v8/src/objects/option-utils.h | 71 + deps/v8/src/objects/ordered-hash-table.cc | 7 + deps/v8/src/objects/ordered-hash-table.h | 3 + .../src/objects/osr-optimized-code-cache.cc | 77 +- .../v8/src/objects/osr-optimized-code-cache.h | 25 +- deps/v8/src/objects/property-array-inl.h | 26 + deps/v8/src/objects/property-array.h | 4 + deps/v8/src/objects/property-cell.h | 8 +- deps/v8/src/objects/property-descriptor.cc | 2 +- deps/v8/src/objects/property-details.h | 5 + deps/v8/src/objects/scope-info-inl.h | 110 + deps/v8/src/objects/scope-info.cc | 168 +- deps/v8/src/objects/scope-info.h | 48 +- deps/v8/src/objects/scope-info.tq | 64 +- deps/v8/src/objects/script.h | 7 + .../v8/src/objects/shared-function-info-inl.h | 32 +- deps/v8/src/objects/shared-function-info.cc | 21 +- deps/v8/src/objects/shared-function-info.h | 12 +- deps/v8/src/objects/shared-function-info.tq | 23 +- deps/v8/src/objects/source-text-module.cc | 3 +- deps/v8/src/objects/string-inl.h | 113 +- deps/v8/src/objects/string-table-inl.h | 2 +- deps/v8/src/objects/string-table.cc | 92 +- deps/v8/src/objects/string.cc | 27 +- deps/v8/src/objects/string.h | 40 +- .../v8/src/objects/swiss-hash-table-helpers.h | 1 + deps/v8/src/objects/symbol-table.cc | 22 + deps/v8/src/objects/tagged-field-inl.h | 34 + deps/v8/src/objects/tagged-field.h | 9 +- deps/v8/src/objects/template-objects.cc | 51 +- deps/v8/src/objects/transitions-inl.h | 76 +- deps/v8/src/objects/transitions.cc | 223 +- deps/v8/src/objects/transitions.h | 81 +- deps/v8/src/objects/turbofan-types.tq | 65 +- deps/v8/src/objects/value-serializer.cc | 369 +- deps/v8/src/objects/value-serializer.h | 32 +- deps/v8/src/objects/visitors.h | 5 + deps/v8/src/parsing/OWNERS | 1 - deps/v8/src/parsing/expression-scope.h | 2 + deps/v8/src/parsing/func-name-inferrer.cc | 2 +- deps/v8/src/parsing/func-name-inferrer.h | 22 +- deps/v8/src/parsing/parse-info.cc | 16 +- deps/v8/src/parsing/parse-info.h | 23 +- deps/v8/src/parsing/parser-base.h | 80 +- deps/v8/src/parsing/parser.cc | 152 +- deps/v8/src/parsing/parser.h | 16 +- deps/v8/src/parsing/preparse-data.cc | 4 +- deps/v8/src/parsing/preparser.h | 3 +- .../src/parsing/scanner-character-streams.cc | 13 +- deps/v8/src/parsing/scanner.cc | 16 +- deps/v8/src/parsing/scanner.h | 1 - deps/v8/src/profiler/cpu-profiler.cc | 13 +- deps/v8/src/profiler/heap-profiler.cc | 5 +- .../src/profiler/heap-snapshot-generator.cc | 356 +- .../v8/src/profiler/heap-snapshot-generator.h | 91 +- deps/v8/src/profiler/profile-generator.cc | 11 +- deps/v8/src/profiler/profiler-listener.cc | 5 +- deps/v8/src/profiler/profiler-listener.h | 3 +- deps/v8/src/profiler/strings-storage.cc | 2 +- deps/v8/src/profiler/tick-sample.cc | 16 +- .../regexp/arm/regexp-macro-assembler-arm.cc | 9 +- .../loong64/regexp-macro-assembler-loong64.cc | 11 +- .../mips/regexp-macro-assembler-mips.cc | 9 +- .../mips64/regexp-macro-assembler-mips64.cc | 11 +- .../regexp/ppc/regexp-macro-assembler-ppc.cc | 17 +- .../regexp/ppc/regexp-macro-assembler-ppc.h | 3 +- deps/v8/src/regexp/regexp-compiler-tonode.cc | 101 +- deps/v8/src/regexp/regexp-compiler.cc | 6 + deps/v8/src/regexp/regexp-compiler.h | 13 + deps/v8/src/regexp/regexp-interpreter.cc | 4 + deps/v8/src/regexp/regexp-parser.cc | 5 - deps/v8/src/regexp/regexp-utils.cc | 3 +- deps/v8/src/regexp/regexp.cc | 5 +- .../riscv64/regexp-macro-assembler-riscv64.cc | 74 +- .../riscv64/regexp-macro-assembler-riscv64.h | 3 +- .../s390/regexp-macro-assembler-s390.cc | 14 +- .../regexp/s390/regexp-macro-assembler-s390.h | 3 +- deps/v8/src/roots/OWNERS | 2 +- deps/v8/src/roots/roots.h | 61 +- deps/v8/src/runtime/runtime-array.cc | 35 +- deps/v8/src/runtime/runtime-atomics.cc | 63 +- deps/v8/src/runtime/runtime-bigint.cc | 52 +- deps/v8/src/runtime/runtime-classes.cc | 42 +- deps/v8/src/runtime/runtime-collections.cc | 22 +- deps/v8/src/runtime/runtime-compiler.cc | 106 +- deps/v8/src/runtime/runtime-debug.cc | 128 +- deps/v8/src/runtime/runtime-forin.cc | 6 +- deps/v8/src/runtime/runtime-function.cc | 16 +- deps/v8/src/runtime/runtime-futex.cc | 10 +- deps/v8/src/runtime/runtime-generator.cc | 8 +- deps/v8/src/runtime/runtime-internal.cc | 192 +- deps/v8/src/runtime/runtime-intl.cc | 12 +- deps/v8/src/runtime/runtime-literals.cc | 36 +- deps/v8/src/runtime/runtime-module.cc | 7 +- deps/v8/src/runtime/runtime-numbers.cc | 15 +- deps/v8/src/runtime/runtime-object.cc | 440 +- deps/v8/src/runtime/runtime-operators.cc | 40 +- deps/v8/src/runtime/runtime-promise.cc | 153 +- deps/v8/src/runtime/runtime-proxy.cc | 40 +- deps/v8/src/runtime/runtime-regexp.cc | 82 +- deps/v8/src/runtime/runtime-scopes.cc | 68 +- deps/v8/src/runtime/runtime-strings.cc | 64 +- deps/v8/src/runtime/runtime-symbol.cc | 10 +- deps/v8/src/runtime/runtime-test-wasm.cc | 60 +- deps/v8/src/runtime/runtime-test.cc | 442 +- deps/v8/src/runtime/runtime-trace.cc | 25 +- deps/v8/src/runtime/runtime-typedarray.cc | 35 +- deps/v8/src/runtime/runtime-utils.h | 97 - deps/v8/src/runtime/runtime-wasm.cc | 259 +- deps/v8/src/runtime/runtime-weak-refs.cc | 10 +- deps/v8/src/runtime/runtime.h | 304 +- deps/v8/src/{security => sandbox}/OWNERS | 0 .../external-pointer-inl.h | 79 +- .../src/sandbox/external-pointer-table-inl.h | 149 + deps/v8/src/sandbox/external-pointer-table.cc | 97 + deps/v8/src/sandbox/external-pointer-table.h | 205 + .../{security => sandbox}/external-pointer.h | 18 +- deps/v8/src/sandbox/sandbox.cc | 332 ++ deps/v8/src/sandbox/sandbox.h | 195 + deps/v8/src/sandbox/sandboxed-pointer-inl.h | 49 + deps/v8/src/sandbox/sandboxed-pointer.h | 23 + deps/v8/src/security/caged-pointer-inl.h | 53 - deps/v8/src/security/caged-pointer.h | 23 - .../v8/src/security/external-pointer-table.cc | 24 - deps/v8/src/security/external-pointer-table.h | 74 - deps/v8/src/security/vm-cage.cc | 322 - deps/v8/src/security/vm-cage.h | 205 - deps/v8/src/snapshot/OWNERS | 1 - deps/v8/src/snapshot/context-deserializer.cc | 2 +- deps/v8/src/snapshot/deserializer.cc | 300 +- deps/v8/src/snapshot/deserializer.h | 16 +- .../v8/src/snapshot/embedded/embedded-data.cc | 12 +- .../snapshot/embedded/embedded-file-writer.cc | 2 +- .../platform-embedded-file-writer-base.cc | 2 +- deps/v8/src/snapshot/mksnapshot.cc | 6 +- .../v8/src/snapshot/read-only-deserializer.cc | 2 +- deps/v8/src/snapshot/read-only-serializer.cc | 2 +- .../src/snapshot/serializer-deserializer.cc | 14 +- .../v8/src/snapshot/serializer-deserializer.h | 6 +- deps/v8/src/snapshot/serializer.cc | 82 +- deps/v8/src/snapshot/serializer.h | 19 +- .../src/snapshot/shared-heap-deserializer.cc | 2 +- .../v8/src/snapshot/shared-heap-serializer.cc | 52 +- deps/v8/src/snapshot/shared-heap-serializer.h | 4 + deps/v8/src/snapshot/snapshot.cc | 37 +- deps/v8/src/snapshot/snapshot.h | 20 +- deps/v8/src/snapshot/startup-deserializer.cc | 6 +- deps/v8/src/snapshot/startup-serializer.cc | 5 +- deps/v8/src/strings/string-builder-inl.h | 2 + deps/v8/src/strings/string-hasher-inl.h | 27 +- deps/v8/src/strings/string-stream.cc | 2 +- deps/v8/src/strings/uri.cc | 9 +- deps/v8/src/temporal/temporal-parser.cc | 12 +- deps/v8/src/temporal/temporal-parser.h | 6 +- deps/v8/src/torque/OWNERS | 2 +- deps/v8/src/torque/ast.h | 26 +- deps/v8/src/torque/constants.h | 2 + deps/v8/src/torque/declaration-visitor.h | 18 +- deps/v8/src/torque/declarations.cc | 6 +- deps/v8/src/torque/declarations.h | 6 +- deps/v8/src/torque/earley-parser.cc | 1 + deps/v8/src/torque/earley-parser.h | 2 + deps/v8/src/torque/implementation-visitor.cc | 86 +- deps/v8/src/torque/implementation-visitor.h | 3 +- deps/v8/src/torque/kythe-data.h | 2 - deps/v8/src/torque/runtime-macro-shims.h | 13 + deps/v8/src/torque/torque-parser.cc | 175 +- deps/v8/src/torque/type-oracle.h | 4 + deps/v8/src/torque/type-visitor.cc | 10 +- deps/v8/src/torque/type-visitor.h | 2 - deps/v8/src/torque/types.cc | 6 +- deps/v8/src/torque/types.h | 1 - deps/v8/src/tracing/trace-categories.h | 5 +- .../src/trap-handler/handler-inside-posix.cc | 4 +- .../src/trap-handler/handler-inside-posix.h | 2 +- .../trap-handler/handler-outside-simulator.cc | 6 +- deps/v8/src/trap-handler/handler-shared.cc | 4 +- deps/v8/src/trap-handler/trap-handler.h | 6 +- deps/v8/src/utils/allocation.cc | 45 +- deps/v8/src/utils/allocation.h | 56 +- deps/v8/src/utils/bit-vector.h | 160 +- deps/v8/src/wasm/OWNERS | 2 - .../wasm/baseline/arm/liftoff-assembler-arm.h | 50 +- .../baseline/arm64/liftoff-assembler-arm64.h | 63 +- .../baseline/ia32/liftoff-assembler-ia32.h | 52 +- .../wasm/baseline/liftoff-assembler-defs.h | 94 +- .../v8/src/wasm/baseline/liftoff-assembler.cc | 25 +- deps/v8/src/wasm/baseline/liftoff-assembler.h | 19 +- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 432 +- deps/v8/src/wasm/baseline/liftoff-register.h | 32 +- .../loong64/liftoff-assembler-loong64.h | 412 +- .../baseline/mips/liftoff-assembler-mips.h | 24 +- .../mips64/liftoff-assembler-mips64.h | 512 +- .../wasm/baseline/ppc/liftoff-assembler-ppc.h | 27 +- .../riscv64/liftoff-assembler-riscv64.h | 87 +- .../baseline/s390/liftoff-assembler-s390.h | 973 ++- .../wasm/baseline/x64/liftoff-assembler-x64.h | 59 +- deps/v8/src/wasm/c-api.cc | 78 +- deps/v8/src/wasm/code-space-access.cc | 56 +- deps/v8/src/wasm/code-space-access.h | 31 +- deps/v8/src/wasm/compilation-environment.h | 6 +- deps/v8/src/wasm/decoder.h | 9 + deps/v8/src/wasm/function-body-decoder-impl.h | 906 +-- deps/v8/src/wasm/function-body-decoder.cc | 15 +- deps/v8/src/wasm/function-compiler.cc | 23 +- deps/v8/src/wasm/graph-builder-interface.cc | 201 +- deps/v8/src/wasm/graph-builder-interface.h | 10 +- deps/v8/src/wasm/init-expr-interface.cc | 144 +- deps/v8/src/wasm/init-expr-interface.h | 18 +- deps/v8/src/wasm/local-decl-encoder.cc | 5 - deps/v8/src/wasm/memory-protection-key.cc | 199 +- deps/v8/src/wasm/memory-protection-key.h | 4 + deps/v8/src/wasm/module-compiler.cc | 128 +- deps/v8/src/wasm/module-compiler.h | 4 +- deps/v8/src/wasm/module-decoder.cc | 598 +- deps/v8/src/wasm/module-decoder.h | 2 +- deps/v8/src/wasm/module-instantiate.cc | 480 +- deps/v8/src/wasm/module-instantiate.h | 6 +- deps/v8/src/wasm/stacks.h | 21 +- deps/v8/src/wasm/streaming-decoder.cc | 2 + deps/v8/src/wasm/streaming-decoder.h | 1 - deps/v8/src/wasm/value-type.h | 132 +- deps/v8/src/wasm/wasm-code-manager.cc | 252 +- deps/v8/src/wasm/wasm-code-manager.h | 25 +- deps/v8/src/wasm/wasm-constants.h | 27 +- deps/v8/src/wasm/wasm-debug.cc | 47 +- deps/v8/src/wasm/wasm-engine.cc | 42 +- deps/v8/src/wasm/wasm-external-refs.cc | 31 +- deps/v8/src/wasm/wasm-feature-flags.h | 14 +- deps/v8/src/wasm/wasm-features.cc | 5 +- deps/v8/src/wasm/wasm-features.h | 10 +- deps/v8/src/wasm/wasm-import-wrapper-cache.cc | 10 +- deps/v8/src/wasm/wasm-import-wrapper-cache.h | 14 +- deps/v8/src/wasm/wasm-init-expr.cc | 12 +- deps/v8/src/wasm/wasm-init-expr.h | 29 +- deps/v8/src/wasm/wasm-js.cc | 196 +- deps/v8/src/wasm/wasm-limits.h | 4 +- deps/v8/src/wasm/wasm-linkage.h | 2 +- deps/v8/src/wasm/wasm-module-builder.cc | 179 +- deps/v8/src/wasm/wasm-module-builder.h | 64 +- deps/v8/src/wasm/wasm-module.cc | 23 +- deps/v8/src/wasm/wasm-module.h | 249 +- deps/v8/src/wasm/wasm-objects-inl.h | 74 +- deps/v8/src/wasm/wasm-objects.cc | 294 +- deps/v8/src/wasm/wasm-objects.h | 63 +- deps/v8/src/wasm/wasm-objects.tq | 19 +- deps/v8/src/wasm/wasm-opcodes-inl.h | 16 +- deps/v8/src/wasm/wasm-opcodes.h | 409 +- deps/v8/src/wasm/wasm-serialization.cc | 207 +- deps/v8/src/wasm/wasm-subtyping.cc | 380 +- deps/v8/src/wasm/wasm-subtyping.h | 74 +- deps/v8/src/wasm/wasm-value.h | 1 - deps/v8/src/web-snapshot/web-snapshot.cc | 1633 ++++-- deps/v8/src/web-snapshot/web-snapshot.h | 168 +- deps/v8/src/zone/accounting-allocator.cc | 2 +- deps/v8/src/zone/zone.cc | 26 +- deps/v8/src/zone/zone.h | 24 +- deps/v8/test/benchmarks/benchmarks.status | 2 + deps/v8/test/cctest/BUILD.gn | 7 +- deps/v8/test/cctest/cctest.cc | 49 +- deps/v8/test/cctest/cctest.h | 108 +- deps/v8/test/cctest/cctest.status | 50 +- deps/v8/test/cctest/compiler/call-tester.h | 4 + deps/v8/test/cctest/compiler/codegen-tester.h | 2 + .../test/cctest/compiler/function-tester.cc | 2 +- .../cctest/compiler/test-code-generator.cc | 18 +- .../test-concurrent-shared-function-info.cc | 7 +- .../test/cctest/compiler/test-gap-resolver.cc | 20 +- .../cctest/compiler/test-js-typed-lowering.cc | 3 - .../compiler/test-representation-change.cc | 14 +- .../test-run-bytecode-graph-builder.cc | 2 +- .../test/cctest/compiler/test-run-machops.cc | 4 +- .../cctest/compiler/test-run-native-calls.cc | 39 +- .../cctest/compiler/test-run-tail-calls.cc | 13 +- deps/v8/test/cctest/disasm-regex-helper.cc | 8 +- deps/v8/test/cctest/heap/heap-utils.cc | 14 +- deps/v8/test/cctest/heap/test-alloc.cc | 2 +- deps/v8/test/cctest/heap/test-compaction.cc | 15 +- .../cctest/heap/test-concurrent-allocation.cc | 2 +- .../cctest/heap/test-concurrent-marking.cc | 9 +- .../test/cctest/heap/test-embedder-tracing.cc | 28 +- deps/v8/test/cctest/heap/test-heap.cc | 202 +- .../cctest/heap/test-incremental-marking.cc | 7 +- deps/v8/test/cctest/heap/test-lab.cc | 30 +- deps/v8/test/cctest/heap/test-mark-compact.cc | 25 +- deps/v8/test/cctest/heap/test-spaces.cc | 52 +- .../test/cctest/heap/test-weak-references.cc | 13 +- .../bytecode-expectations-printer.cc | 2 +- .../ArrayLiterals.golden | 8 +- .../AsyncGenerators.golden | 26 +- .../bytecode_expectations/AsyncModules.golden | 56 +- .../CallAndSpread.golden | 34 +- .../ClassAndSuperClass.golden | 24 +- .../CompoundExpressions.golden | 8 +- .../CountOperators.golden | 18 +- .../CreateArguments.golden | 4 +- .../CreateRestParameter.golden | 6 +- .../DestructuringAssignment.golden | 61 +- .../bytecode_expectations/ForAwaitOf.golden | 108 +- .../bytecode_expectations/ForIn.golden | 10 +- .../bytecode_expectations/ForOf.golden | 36 +- .../bytecode_expectations/ForOfLoop.golden | 92 +- .../bytecode_expectations/Generators.golden | 20 +- .../bytecode_expectations/IIFE.golden | 34 +- .../bytecode_expectations/LoadGlobal.golden | 256 +- .../bytecode_expectations/Modules.golden | 4 +- .../bytecode_expectations/NewAndSpread.golden | 28 +- .../ObjectLiterals.golden | 20 +- .../PrivateAccessorAccess.golden | 32 +- .../PrivateClassFieldAccess.golden | 12 +- .../PrivateClassFields.golden | 10 +- .../PrivateMethodAccess.golden | 165 +- .../bytecode_expectations/PropertyCall.golden | 272 +- .../PropertyLoadStore.golden | 16 +- .../PropertyLoads.golden | 528 +- .../PropertyStores.golden | 1052 ++-- .../PublicClassFields.golden | 10 +- .../RegExpLiterals.golden | 2 +- .../StandardForLoop.golden | 32 +- .../StaticClassFields.golden | 10 +- .../StaticPrivateMethodAccess.golden | 92 +- .../bytecode_expectations/StoreGlobal.golden | 512 +- .../SuperCallAndSpread.golden | 24 +- .../TopLevelObjectLiterals.golden | 2 +- .../cctest/interpreter/interpreter-tester.h | 2 +- .../interpreter/test-bytecode-generator.cc | 33 +- .../cctest/interpreter/test-interpreter.cc | 34 +- deps/v8/test/cctest/test-api-accessors.cc | 39 + deps/v8/test/cctest/test-api-interceptors.cc | 86 +- deps/v8/test/cctest/test-api-stack-traces.cc | 55 + deps/v8/test/cctest/test-api.cc | 280 +- deps/v8/test/cctest/test-array-list.cc | 3 +- deps/v8/test/cctest/test-assembler-arm.cc | 59 +- deps/v8/test/cctest/test-assembler-arm64.cc | 126 +- deps/v8/test/cctest/test-assembler-ia32.cc | 113 +- deps/v8/test/cctest/test-assembler-loong64.cc | 8 +- deps/v8/test/cctest/test-assembler-ppc.cc | 16 +- deps/v8/test/cctest/test-assembler-riscv64.cc | 484 +- deps/v8/test/cctest/test-assembler-x64.cc | 5 +- deps/v8/test/cctest/test-bit-vector.cc | 25 +- .../test/cctest/test-code-stub-assembler.cc | 28 +- deps/v8/test/cctest/test-compiler.cc | 5 +- .../test/cctest/test-concurrent-prototype.cc | 5 +- .../test-concurrent-script-context-table.cc | 16 +- .../test-concurrent-transition-array.cc | 87 +- deps/v8/test/cctest/test-cpu-profiler.cc | 18 +- deps/v8/test/cctest/test-debug.cc | 70 +- deps/v8/test/cctest/test-descriptor-array.cc | 43 +- deps/v8/test/cctest/test-disasm-loong64.cc | 8 +- deps/v8/test/cctest/test-disasm-riscv64.cc | 50 +- deps/v8/test/cctest/test-disasm-x64.cc | 5 +- deps/v8/test/cctest/test-feedback-vector.cc | 19 +- .../test/cctest/test-field-type-tracking.cc | 28 +- deps/v8/test/cctest/test-global-handles.cc | 6 + deps/v8/test/cctest/test-heap-profiler.cc | 57 +- deps/v8/test/cctest/test-helper-riscv64.h | 1 - deps/v8/test/cctest/test-icache.cc | 28 +- .../cctest/test-inobject-slack-tracking.cc | 3 +- deps/v8/test/cctest/test-log.cc | 30 +- .../test/cctest/test-macro-assembler-arm.cc | 8 +- .../cctest/test-macro-assembler-loong64.cc | 4 +- .../test/cctest/test-macro-assembler-x64.cc | 5 +- deps/v8/test/cctest/test-parsing.cc | 19 +- deps/v8/test/cctest/test-profile-generator.cc | 19 +- deps/v8/test/cctest/test-serialize.cc | 62 +- deps/v8/test/cctest/test-shared-strings.cc | 215 +- deps/v8/test/cctest/test-strings.cc | 43 +- .../cctest/test-swiss-name-dictionary-csa.cc | 9 +- .../test-swiss-name-dictionary-infra.cc | 13 +- deps/v8/test/cctest/test-temporal-parser.cc | 112 +- .../v8/test/cctest/test-thread-termination.cc | 10 + deps/v8/test/cctest/test-transitions.cc | 68 +- deps/v8/test/cctest/test-transitions.h | 7 +- .../test/cctest/test-unwinder-code-pages.cc | 8 +- deps/v8/test/cctest/test-utils-arm64.cc | 140 +- deps/v8/test/cctest/test-utils-arm64.h | 7 +- deps/v8/test/cctest/test-web-snapshots.cc | 214 +- deps/v8/test/cctest/wasm/test-gc.cc | 389 +- .../cctest/wasm/test-jump-table-assembler.cc | 23 +- .../cctest/wasm/test-liftoff-inspection.cc | 4 +- .../test/cctest/wasm/test-run-wasm-atomics.cc | 6 +- .../cctest/wasm/test-run-wasm-atomics64.cc | 16 +- .../cctest/wasm/test-run-wasm-bulk-memory.cc | 31 +- .../cctest/wasm/test-run-wasm-exceptions.cc | 1 - .../cctest/wasm/test-run-wasm-interpreter.cc | 9 +- .../test/cctest/wasm/test-run-wasm-module.cc | 2 + .../cctest/wasm/test-run-wasm-relaxed-simd.cc | 18 +- .../cctest/wasm/test-run-wasm-wrappers.cc | 11 +- deps/v8/test/cctest/wasm/test-run-wasm.cc | 5 - .../cctest/wasm/test-streaming-compilation.cc | 3 + .../wasm/test-wasm-import-wrapper-cache.cc | 34 +- deps/v8/test/cctest/wasm/test-wasm-metrics.cc | 1 + .../cctest/wasm/test-wasm-serialization.cc | 5 +- deps/v8/test/cctest/wasm/test-wasm-stack.cc | 14 +- .../cctest/wasm/test-wasm-trap-position.cc | 15 +- deps/v8/test/cctest/wasm/wasm-atomics-utils.h | 12 +- deps/v8/test/cctest/wasm/wasm-run-utils.cc | 25 +- deps/v8/test/common/wasm/test-signatures.h | 42 +- deps/v8/test/common/wasm/wasm-interpreter.cc | 79 +- deps/v8/test/common/wasm/wasm-macro-gen.h | 18 +- .../v8/test/common/wasm/wasm-module-runner.cc | 2 - .../debug-evaluate-no-side-effect-builtins.js | 19 + ...g-evaluate-no-side-effect-runtime-check.js | 8 +- deps/v8/test/debugger/debugger.status | 7 - deps/v8/test/fuzzer/fuzzer-support.cc | 10 +- deps/v8/test/fuzzer/fuzzer.status | 1 + deps/v8/test/fuzzer/inspector/regress-1297964 | 411 ++ deps/v8/test/fuzzer/wasm-compile.cc | 292 +- deps/v8/test/fuzzer/wasm-fuzzer-common.cc | 274 +- deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1 | 2 +- .../async-instrumentation-expected.txt | 4 +- .../debugger/async-instrumentation.js | 8 +- .../async-promise-late-then-expected.txt | 6 +- .../debugger/async-promise-late-then.js | 7 +- .../debugger/async-set-timeout-expected.txt | 8 +- .../inspector/debugger/async-set-timeout.js | 7 +- .../async-stack-created-frame-expected.txt | 30 +- .../debugger/async-stack-created-frame.js | 8 +- .../async-stack-for-promise-expected.txt | 42 +- .../debugger/async-stack-for-promise.js | 7 +- .../async-stack-load-more-expected.txt | 2 + .../debugger/async-stack-load-more.js | 7 +- .../debugger/call-frame-url-expected.txt | 6 +- .../class-private-methods-expected.txt | 38 +- ...-private-methods-nested-super-expected.txt | 66 + .../class-private-methods-nested-super.js | 79 + .../debugger/class-private-methods.js | 10 +- ...o-location-target-call-frames-expected.txt | 6 +- ...continue-to-location-target-call-frames.js | 16 +- .../debugger/destroy-in-break-program2.js | 4 +- .../debugger/external-stack-trace.js | 10 +- .../debugger/framework-break-expected.txt | 4 +- .../inspector/debugger/framework-break.js | 16 +- .../framework-nested-scheduled-break.js | 16 +- .../debugger/framework-precise-ranges.js | 7 +- .../debugger/framework-stepping-expected.txt | 62 +- .../inspector/debugger/framework-stepping.js | 14 +- ...possible-breakpoints-after-gc-expected.txt | 6 +- .../get-possible-breakpoints-after-gc.js | 3 - .../debugger/other-pause-reasons-expected.txt | 34 +- .../inspector/debugger/other-pause-reasons.js | 79 +- .../debugger/pause-at-negative-offset.js | 2 +- ...e-inside-blackboxed-optimized-expected.txt | 2 + .../pause-inside-blackboxed-optimized.js | 7 +- .../resource-name-to-url-expected.txt | 6 +- .../set-breakpoint-after-gc-expected.txt | 4 +- ...breakpoint-on-instrumentation-expected.txt | 26 + .../set-breakpoint-on-instrumentation.js | 87 + ...et-instrumentation-breakpoint-expected.txt | 37 + .../set-instrumentation-breakpoint.js | 95 + .../debugger/step-into-break-on-async-call.js | 2 + ...step-into-external-async-task-expected.txt | 2 + .../debugger/step-into-external-async-task.js | 15 +- .../step-into-next-script-expected.txt | 10 +- .../debugger/step-into-next-script.js | 14 +- .../step-out-async-await-expected.txt | 32 +- .../debugger/step-out-async-await.js | 94 +- .../debugger/wasm-externref-global.js | 2 - .../debugger/wasm-gc-breakpoints-expected.txt | 6 +- .../inspector/debugger/wasm-gc-breakpoints.js | 10 +- .../wasm-gc-in-debug-break-expected.txt | 2 +- .../debugger/wasm-gc-in-debug-break.js | 2 +- ...sm-instrumentation-breakpoint-expected.txt | 66 + .../wasm-instrumentation-breakpoint.js | 115 +- ...breakpoint-on-instrumentation-expected.txt | 49 + .../wasm-set-breakpoint-on-instrumentation.js | 106 + .../inspector/debugger/wasm-set-breakpoint.js | 7 +- .../debugger/wasm-step-after-trap.js | 3 +- .../heap-snapshot-js-weak-refs-expected.txt | 7 + .../heap-snapshot-js-weak-refs.js | 129 + deps/v8/test/inspector/inspector-test.cc | 35 +- deps/v8/test/inspector/inspector.status | 9 +- deps/v8/test/inspector/isolate-data.cc | 19 + deps/v8/test/inspector/protocol-test.js | 8 +- .../regress-crbug-1220203-expected.txt | 8 + .../regress/regress-crbug-1220203.js | 42 + .../regress-crbug-1281031-expected.txt | 2 + .../regress/regress-crbug-1281031.js | 9 + .../regress-crbug-1283049-expected.txt | 5 + .../regress/regress-crbug-1283049.js | 29 + .../regress-crbug-1290861-expected.txt | 4 + .../regress/regress-crbug-1290861.js | 39 + .../client-console-api-message-expected.txt | 4 +- .../runtime/console-context-expected.txt | 38 +- .../test/inspector/runtime/console-context.js | 9 +- .../runtime/console-formatter-expected.txt | 700 +++ .../inspector/runtime/console-formatter.js | 144 + ...console-message-before-enable-expected.txt | 93 + .../runtime/console-message-before-enable.js | 37 + .../runtime/error-stack-expected.txt | 420 ++ .../error-stack-trace-limit-expected.txt | 820 +++ .../runtime/error-stack-trace-limit.js | 61 + deps/v8/test/inspector/runtime/error-stack.js | 39 + .../inspector/runtime/es6-module-expected.txt | 2 +- .../get-exception-details-expected.txt | 112 + .../runtime/get-exception-details.js | 49 + .../set-max-call-stack-size-expected.txt | 146 +- .../runtime/set-max-call-stack-size.js | 106 +- ...gger-stepping-and-breakpoints-expected.txt | 10 +- .../debugger-stepping-and-breakpoints.js | 14 +- .../test/intl/enumeration/calendar-sorted.js | 2 - .../enumeration/callendar-syntax-valid.js | 2 - .../test/intl/enumeration/collation-sorted.js | 2 - .../enumeration/collation-syntax-valid.js | 2 - .../test/intl/enumeration/currency-sorted.js | 2 - .../intl/enumeration/currency-syntax-valid.js | 2 - .../numberingSystem-no-algorithm.js | 2 - .../enumeration/numberingSystem-sorted.js | 2 - .../numberingSystem-syntax-valid.js | 2 - .../supported-values-of-invalid-key.js | 2 - .../enumeration/supported-values-of-name.js | 2 - .../supported-values-of-property.js | 2 - .../supported-values-of-valid-key.js | 2 - .../test/intl/enumeration/timeZone-sorted.js | 2 - deps/v8/test/intl/enumeration/unit-sorted.js | 2 - deps/v8/test/intl/intl.status | 3 +- deps/v8/test/intl/locale/locale-calendars.js | 2 - deps/v8/test/intl/locale/locale-collations.js | 2 - .../intl/locale/locale-info-check-property.js | 2 - .../locale/locale-info-check-return-types.js | 2 - deps/v8/test/intl/locale/locale-info-ext.js | 2 - .../intl/locale/locale-info-no-undefined.js | 2 - .../locale/locale-info-timezones-sorted.js | 2 - .../intl/locale/locale-numberingSystems.js | 2 - .../intl/number-format/format-range-v3.js | 158 + .../rounding-increment-resolved-match-v3.js | 13 + .../number-format/rounding-increment-v3.js | 23 + .../rounding-increment-value-v3.js | 23 + .../number-format/rounding-mode-table-v3.js | 30 + .../intl/number-format/rounding-mode-v3.js | 60 + .../intl/number-format/sign-display-v3.js | 29 + ...ailing-zero-display-resolved-options-v3.js | 19 + .../number-format/trailing-zero-display-v3.js | 24 + .../intl/number-format/use-grouping-v3.js | 114 + .../v8/test/intl/plural-rules/select-range.js | 7 + deps/v8/test/intl/testcfg.py | 6 +- deps/v8/test/js-perf-test/Array/includes.js | 67 + deps/v8/test/js-perf-test/Array/index-of.js | 67 + deps/v8/test/js-perf-test/Array/run.js | 27 +- ...daKeyedProperty.js => GetKeyedProperty.js} | 0 ...daNamedProperty.js => GetNamedProperty.js} | 0 deps/v8/test/js-perf-test/JSTests2.json | 21 +- deps/v8/test/js-perf-test/JSTests3.json | 12 +- deps/v8/test/message/README.md | 4 - .../test/message/asm-assignment-undeclared.js | 2 +- .../test/message/asm-function-mismatch-def.js | 2 +- .../test/message/asm-function-mismatch-use.js | 2 +- .../v8/test/message/asm-function-redefined.js | 2 +- .../v8/test/message/asm-function-undefined.js | 2 +- .../asm-function-variable-collision.js | 2 +- .../message/asm-import-wrong-annotation.js | 2 +- .../test/message/asm-import-wrong-object.js | 2 +- .../v8/test/message/asm-linking-bogus-heap.js | 2 +- .../test/message/asm-linking-bogus-stdlib.js | 2 +- .../test/message/asm-linking-missing-heap.js | 2 +- .../asm-missing-parameter-annotation.js | 2 +- .../message/asm-missing-return-annotation.js | 2 +- .../v8/test/message/asm-table-mismatch-def.js | 2 +- .../v8/test/message/asm-table-mismatch-use.js | 2 +- deps/v8/test/message/asm-table-redefined.js | 2 +- deps/v8/test/message/asm-table-undefined.js | 2 +- .../message/asm-table-variable-collision.js | 2 +- deps/v8/test/message/fail/console.js | 2 - deps/v8/test/message/fail/console.out | 6 +- .../fail/data-view-invalid-length-1.js | 6 + .../fail/data-view-invalid-length-1.out | 6 + .../fail/data-view-invalid-length-2.js | 6 + .../fail/data-view-invalid-length-2.out | 6 + .../fail/data-view-invalid-length-3.js | 6 + .../fail/data-view-invalid-length-3.out | 6 + deps/v8/test/message/fail/settimeout.js | 2 - deps/v8/test/message/fail/settimeout.out | 4 +- deps/v8/test/message/message.status | 7 +- .../message/mjsunit/fail/assert_not_same.out | 2 +- .../test/message/mjsunit/fail/assert_true.out | 2 +- .../success/assert-promise-result-rejects.js | 2 - .../assert-promise-result-resolves-empty.js | 2 - .../success/assert-promise-result-resolves.js | 2 - deps/v8/test/message/testcfg.py | 3 +- .../test/message/wasm-finish-compilation.js | 2 - .../test/message/wasm-function-name-async.js | 2 +- .../message/wasm-function-name-streaming.js | 2 +- .../wasm-module-and-function-name-async.js | 2 +- ...wasm-module-and-function-name-streaming.js | 2 +- .../v8/test/message/wasm-module-name-async.js | 2 +- .../message/wasm-module-name-streaming.js | 2 +- deps/v8/test/message/wasm-no-name-async.js | 2 +- .../v8/test/message/wasm-no-name-streaming.js | 2 +- deps/v8/test/message/wasm-trace-liftoff.js | 2 +- .../test/message/wasm-trace-memory-liftoff.js | 2 +- deps/v8/test/message/wasm-trace-memory.js | 2 +- deps/v8/test/message/wasm-trace-turbofan.js | 2 +- .../weakref-finalizationregistry-error.js | 1 - deps/v8/test/mjsunit/BUILD.gn | 7 +- deps/v8/test/mjsunit/asm/asm-validation.js | 28 +- .../mjsunit/baseline/batch-compilation.js | 2 +- .../compiler/abstract-equal-receiver.js | 20 +- .../compiler/bound-functions-serialize.js | 4 +- .../call-with-arraylike-or-spread-2.js | 2 +- .../call-with-arraylike-or-spread-3.js | 4 +- .../call-with-arraylike-or-spread-4.js | 2 +- .../call-with-arraylike-or-spread-5.js | 4 +- .../call-with-arraylike-or-spread-6.js | 4 +- .../call-with-arraylike-or-spread-7.js | 6 +- .../compiler/call-with-arraylike-or-spread.js | 66 +- .../test/mjsunit/compiler/catch-block-load.js | 4 +- .../mjsunit/compiler/concurrent-inlining-1.js | 2 +- .../mjsunit/compiler/concurrent-inlining-2.js | 2 +- .../compiler/constant-fold-add-static.js | 2 +- .../compiler/constant-fold-cow-array.js | 16 +- .../compiler/construct-bound-function.js | 2 +- .../test/mjsunit/compiler/construct-object.js | 2 +- .../mjsunit/compiler/construct-receiver.js | 2 +- .../test/mjsunit/compiler/deopt-pretenure.js | 2 +- .../compiler/diamond-followedby-branch.js | 2 +- .../mjsunit/compiler/fast-api-calls-wasm.js | 141 + .../compiler/inlined-call-polymorphic.js | 2 +- .../compiler/is-being-interpreted-1.js | 2 +- .../compiler/is-being-interpreted-2.js | 2 +- .../mjsunit/compiler/js-create-arguments.js | 2 +- deps/v8/test/mjsunit/compiler/js-create.js | 2 +- .../compiler/load-elimination-const-field.js | 26 +- .../compiler/opt-higher-order-functions.js | 36 +- .../compiler/promise-resolve-stable-maps.js | 4 +- .../test/mjsunit/compiler/regress-1225607.js | 2 +- .../test/mjsunit/compiler/regress-1226988.js | 21 - .../test/mjsunit/compiler/regress-1227324.js | 2 +- .../test/mjsunit/compiler/regress-9945-1.js | 4 +- .../test/mjsunit/compiler/regress-9945-2.js | 6 +- .../compiler/regress-store-store-elim.js | 29 + .../mjsunit/compiler/serializer-accessors.js | 2 +- .../test/mjsunit/compiler/serializer-apply.js | 2 +- .../test/mjsunit/compiler/serializer-call.js | 2 +- .../compiler/serializer-dead-after-jump.js | 2 +- .../compiler/serializer-dead-after-return.js | 2 +- .../serializer-feedback-propagation-1.js | 2 +- .../serializer-feedback-propagation-2.js | 2 +- .../serializer-transition-propagation.js | 2 +- .../store-data-property-in-literal-private.js | 5 +- ...c-map-check-deprecated-maps-polymorphic.js | 35 - .../test-dynamic-map-check-deprecated-maps.js | 36 - ...test-dynamic-map-check-deprecated-maps2.js | 46 - ...test-dynamic-map-check-deprecated-maps3.js | 41 - .../test-dynamic-map-checks-poly-mono.js | 36 - .../test-dynamic-map-checks-wrong-handler.js | 33 - .../test-dynamic-map-checks-wrong-handler1.js | 32 - .../compiler/test-dynamic-map-checks.js | 45 - .../concurrent-initial-prototype-change-1.js | 2 +- .../v8/test/mjsunit/const-field-tracking-2.js | 227 - .../mjsunit/d8/d8-multiple-module-exec.js | 8 + deps/v8/test/mjsunit/d8/d8-worker.js | 17 +- deps/v8/test/mjsunit/debugPrint.js | 27 +- .../mjsunit/es6/classes-super-in-heritage.js | 49 + .../es6/super-ic-opt-dynamic-map-checks.js | 42 - .../harmony/array-prototype-groupby.js | 183 + .../harmony/array-prototype-groupbytomap.js | 172 + .../harmony/optional-chaining-this-private.js | 5 + .../harmony/private-brand-nested-super.js | 131 + .../harmony/private-name-surrogate-pair.js | 17 + .../mjsunit/harmony/shadowrealm-evaluate.js | 64 + .../harmony/shadowrealm-wrapped-function.js | 30 + ...nalizationregistry-independent-lifetime.js | 2 +- .../test/mjsunit/interrupt-budget-override.js | 2 +- deps/v8/test/mjsunit/maglev/00.js | 18 + deps/v8/test/mjsunit/maglev/01.js | 20 + deps/v8/test/mjsunit/maglev/02.js | 20 + deps/v8/test/mjsunit/maglev/03.js | 21 + deps/v8/test/mjsunit/maglev/04.js | 16 + deps/v8/test/mjsunit/maglev/05.js | 21 + deps/v8/test/mjsunit/maglev/06.js | 25 + deps/v8/test/mjsunit/maglev/07.js | 19 + deps/v8/test/mjsunit/maglev/08.js | 19 + deps/v8/test/mjsunit/maglev/09.js | 21 + deps/v8/test/mjsunit/maglev/10.js | 26 + deps/v8/test/mjsunit/maglev/11.js | 39 + deps/v8/test/mjsunit/maglev/12.js | 27 + deps/v8/test/mjsunit/maglev/13.js | 17 + deps/v8/test/mjsunit/maglev/14.js | 31 + deps/v8/test/mjsunit/maglev/15.js | 17 + deps/v8/test/mjsunit/maglev/16.js | 30 + deps/v8/test/mjsunit/maglev/17.js | 27 + deps/v8/test/mjsunit/maglev/18.js | 26 + deps/v8/test/mjsunit/mjsunit.js | 173 +- deps/v8/test/mjsunit/mjsunit.status | 152 +- .../test/mjsunit/optimized-array-includes.js | 358 ++ .../test/mjsunit/optimized-array-indexof.js | 360 ++ deps/v8/test/mjsunit/promise-hooks.js | 137 +- deps/v8/test/mjsunit/regress-1146106.js | 2 +- .../test/mjsunit/regress/regress-1000635.js | 2 +- .../test/mjsunit/regress/regress-1003730.js | 2 +- .../test/mjsunit/regress/regress-1076569.js | 16 - .../test/mjsunit/regress/regress-1079446.js | 17 - .../test/mjsunit/regress/regress-1083272.js | 19 - .../test/mjsunit/regress/regress-1083763.js | 19 - .../test/mjsunit/regress/regress-1084953.js | 16 - .../test/mjsunit/regress/regress-1137979.js | 21 - .../test/mjsunit/regress/regress-1138075.js | 27 - .../test/mjsunit/regress/regress-1138611.js | 34 - .../test/mjsunit/regress/regress-1154961.js | 42 - .../test/mjsunit/regress/regress-1163715.js | 27 - .../test/mjsunit/regress/regress-1168435.js | 2 +- .../test/mjsunit/regress/regress-1172797.js | 48 - .../test/mjsunit/regress/regress-1201114.js | 19 - .../test/mjsunit/regress/regress-1208805.js | 25 + .../test/mjsunit/regress/regress-1223733.js | 16 - .../test/mjsunit/regress/regress-1225561.js | 27 - deps/v8/test/mjsunit/regress/regress-12495.js | 21 + deps/v8/test/mjsunit/regress/regress-12580.js | 11 + deps/v8/test/mjsunit/regress/regress-12657.js | 11 + .../v8/test/mjsunit/regress/regress-385565.js | 11 +- deps/v8/test/mjsunit/regress/regress-3969.js | 2 +- deps/v8/test/mjsunit/regress/regress-4578.js | 2 +- .../v8/test/mjsunit/regress/regress-752764.js | 13 - .../v8/test/mjsunit/regress/regress-794822.js | 2 +- .../v8/test/mjsunit/regress/regress-936077.js | 3 +- ...binary-bitwise-bigint-smi-mix-opt-depot.js | 56 + .../mjsunit/regress/regress-crbug-1017159.js | 2 +- .../mjsunit/regress/regress-crbug-1031479.js | 2 +- .../mjsunit/regress/regress-crbug-1206289.js | 22 + .../mjsunit/regress/regress-crbug-1236962.js | 2 +- .../mjsunit/regress/regress-crbug-1262750.js | 36 + .../mjsunit/regress/regress-crbug-1276923.js | 30 + .../mjsunit/regress/regress-crbug-1277863.js | 12 + .../mjsunit/regress/regress-crbug-1278086.js | 79 + .../mjsunit/regress/regress-crbug-1290587.js | 14 + .../mjsunit/regress/regress-crbug-977089.js | 2 +- .../mjsunit/regress/regress-crbug-990582.js | 2 +- .../regress-unlink-closures-on-deopt.js | 4 - .../test/mjsunit/regress/regress-v8-12122.js | 20 + .../test/mjsunit/regress/regress-v8-12472.js | 10 + .../test/mjsunit/regress/regress-v8-12595.js | 18 + .../test/mjsunit/regress/regress-v8-12671.js | 74 + .../test/mjsunit/regress/regress-v8-12688.js | 33 + .../mjsunit/regress/wasm/regress-1185464.js | 1 - .../mjsunit/regress/wasm/regress-1200231.js | 2 +- .../mjsunit/regress/wasm/regress-12624.js | 51 + .../mjsunit/regress/wasm/regress-1271244.js | 19 + .../mjsunit/regress/wasm/regress-1271538.js | 44 + .../mjsunit/regress/wasm/regress-1272204.js | 19 + .../mjsunit/regress/wasm/regress-1279151.js | 15 + .../mjsunit/regress/wasm/regress-1282224.js | 31 + .../mjsunit/regress/wasm/regress-1283042.js | 29 + .../mjsunit/regress/wasm/regress-1283395.js | 51 + .../mjsunit/regress/wasm/regress-1284980.js | 38 + .../mjsunit/regress/wasm/regress-1286253.js | 26 + .../mjsunit/regress/wasm/regress-1289678.js | 30 + .../mjsunit/regress/wasm/regress-1290079.js | 47 + .../mjsunit/regress/wasm/regress-1294384.js | 91 + .../mjsunit/regress/wasm/regress-1296876.js | 21 + .../test/mjsunit/regress/wasm/regress-7785.js | 4 +- .../mjsunit/regress/wasm/regress-808848.js | 4 +- .../mjsunit/regress/wasm/regress-808980.js | 4 +- .../mjsunit/regress/wasm/regress-964607.js | 2 +- .../regress/wasm/regress-crbug-1172912.js | 2 +- .../regress/wasm/regress-inlining-throw.js | 71 + .../test/mjsunit/shared-memory/client-gc.js | 7 + .../shared-string-in-code-object.js | 18 + .../shared-string-in-weak-map.js | 23 + .../mjsunit/shared-memory/shared-string.js | 37 + .../shared-struct-atomics-workers.js | 41 + .../shared-memory/shared-struct-atomics.js | 35 + .../shared-memory/shared-struct-surface.js | 54 + .../shared-memory/shared-struct-workers.js | 39 + deps/v8/test/mjsunit/statistics-extension.js | 12 + .../mjsunit/temporal/calendar-constructor.js | 9 +- .../temporal/calendar-date-from-fields.js | 127 +- .../mjsunit/temporal/calendar-date-until.js | 4 +- .../test/mjsunit/temporal/calendar-fields.js | 53 +- .../v8/test/mjsunit/temporal/calendar-from.js | 2 - .../mjsunit/temporal/calendar-merge-fields.js | 15 +- .../calendar-month-day-from-fields.js | 153 +- .../test/mjsunit/temporal/calendar-month.js | 3 +- .../calendar-year-month-from-fields.js | 64 +- deps/v8/test/mjsunit/temporal/duration-abs.js | 6 +- deps/v8/test/mjsunit/temporal/duration-add.js | 54 +- .../mjsunit/temporal/duration-constructor.js | 84 +- .../v8/test/mjsunit/temporal/duration-from.js | 66 +- .../test/mjsunit/temporal/duration-negated.js | 12 +- .../test/mjsunit/temporal/duration-valueOf.js | 3 +- .../v8/test/mjsunit/temporal/duration-with.js | 90 +- .../test/mjsunit/temporal/plain-date-add.js | 2 +- .../mjsunit/temporal/plain-date-time-from.js | 2 +- .../test/mjsunit/temporal/temporal-helpers.js | 24 +- .../mjsunit/temporal/time-zone-constructor.js | 12 + deps/v8/test/mjsunit/testcfg.py | 12 +- deps/v8/test/mjsunit/tools/foozzie.js | 2 +- deps/v8/test/mjsunit/tools/foozzie_archs.js | 4 +- .../test/mjsunit/tools/foozzie_webassembly.js | 4 +- .../tools/tickprocessor-test-large.log | 54 +- .../typedarray-growablesharedarraybuffer.js | 1345 ++++- deps/v8/test/mjsunit/typedarray-helpers.js | 72 + .../typedarray-resizablearraybuffer-detach.js | 683 ++- .../typedarray-resizablearraybuffer.js | 2650 ++++++++- deps/v8/test/mjsunit/wasm/anyfunc.js | 2 +- deps/v8/test/mjsunit/wasm/call-ref.js | 3 +- .../wasm/compiled-module-serialization.js | 2 +- .../wasm/element-segments-with-reftypes.js | 21 +- deps/v8/test/mjsunit/wasm/exceptions-api.js | 2 +- .../test/mjsunit/wasm/exceptions-externref.js | 2 +- .../test/mjsunit/wasm/extended-constants.js | 74 + .../mjsunit/wasm/externref-globals-liftoff.js | 3 +- .../v8/test/mjsunit/wasm/externref-globals.js | 2 +- .../v8/test/mjsunit/wasm/externref-liftoff.js | 3 +- deps/v8/test/mjsunit/wasm/externref.js | 3 +- deps/v8/test/mjsunit/wasm/gc-nominal.js | 157 +- deps/v8/test/mjsunit/wasm/gc-optimizations.js | 5 +- .../test/mjsunit/wasm/grow-shared-memory.js | 90 +- .../mjsunit/wasm/imported-function-types.js | 3 +- .../wasm/indirect-call-non-zero-table.js | 2 +- deps/v8/test/mjsunit/wasm/indirect-tables.js | 18 +- deps/v8/test/mjsunit/wasm/inlining.js | 84 +- deps/v8/test/mjsunit/wasm/js-api.js | 10 +- deps/v8/test/mjsunit/wasm/load-immutable.js | 137 + deps/v8/test/mjsunit/wasm/memory64.js | 109 + .../wasm/multi-table-element-section.js | 2 +- deps/v8/test/mjsunit/wasm/print-code.js | 4 +- .../v8/test/mjsunit/wasm/reference-globals.js | 2 + deps/v8/test/mjsunit/wasm/reference-tables.js | 73 +- .../serialization-with-compilation-hints.js | 61 + .../test/mjsunit/wasm/speculative-inlining.js | 22 +- deps/v8/test/mjsunit/wasm/stack-switching.js | 198 +- .../test/mjsunit/wasm/table-access-liftoff.js | 3 +- deps/v8/test/mjsunit/wasm/table-access.js | 2 +- .../test/mjsunit/wasm/table-copy-externref.js | 2 - deps/v8/test/mjsunit/wasm/table-fill.js | 2 +- .../test/mjsunit/wasm/table-grow-from-wasm.js | 2 +- .../wasm/test-partial-serialization.js | 5 +- .../wasm/type-reflection-with-externref.js | 13 +- deps/v8/test/mjsunit/wasm/type-reflection.js | 63 +- .../test/mjsunit/wasm/wasm-module-builder.js | 205 +- deps/v8/test/mjsunit/web-snapshot-helpers.js | 0 .../mjsunit/web-snapshot/web-snapshot-1.js | 246 + .../{web-snapshot.js => web-snapshot-2.js} | 306 +- .../mjsunit/web-snapshot/web-snapshot-3.js | 104 + .../web-snapshot/web-snapshot-externals.js | 83 + .../web-snapshot/web-snapshot-helpers.js | 24 + deps/v8/test/mkgrokdump/mkgrokdump.cc | 20 +- deps/v8/test/mozilla/mozilla.status | 10 +- deps/v8/test/test262/BUILD.gn | 2 +- deps/v8/test/test262/test262.status | 2690 ++++++++- deps/v8/test/test262/testcfg.py | 18 +- deps/v8/test/torque/test-torque.tq | 4 +- deps/v8/test/unittests/BUILD.gn | 29 +- .../unittests/api/interceptor-unittest.cc | 4 +- .../unittests/base/platform/time-unittest.cc | 30 +- .../base/virtual-address-space-unittest.cc | 266 + .../register-configuration-unittest.cc | 16 +- .../backend/instruction-selector-unittest.h | 4 +- .../backend/instruction-sequence-unittest.cc | 10 +- .../backend/instruction-sequence-unittest.h | 1 + .../compiler/backend/instruction-unittest.cc | 4 +- .../compiler/bytecode-analysis-unittest.cc | 25 +- .../effect-control-linearizer-unittest.cc | 212 - .../test/unittests/compiler/graph-unittest.cc | 1 - .../compiler/js-typed-lowering-unittest.cc | 3 - .../compiler/linkage-tail-call-unittest.cc | 6 +- .../regalloc/move-optimizer-unittest.cc | 2 +- .../instruction-selector-riscv64-unittest.cc | 71 +- .../compiler/state-values-utils-unittest.cc | 12 +- .../execution/microtask-queue-unittest.cc | 24 +- .../heap/base/active-system-pages-unittest.cc | 81 + .../heap/cppgc-js/unified-heap-unittest.cc | 89 +- .../heap/cppgc-js/unified-heap-utils.cc | 6 +- .../heap/cppgc/ephemeron-pair-unittest.cc | 5 +- .../cppgc/explicit-management-unittest.cc | 8 + .../heap/cppgc/gc-invoker-unittest.cc | 2 + .../heap/cppgc/heap-growing-unittest.cc | 5 + .../unittests/heap/cppgc/marker-unittest.cc | 8 +- .../heap/cppgc/marking-visitor-unittest.cc | 8 +- .../unittests/heap/cppgc/minor-gc-unittest.cc | 348 +- .../heap/cppgc/page-memory-unittest.cc | 17 +- .../cppgc/stats-collector-scopes-unittest.cc | 3 +- .../unittests/heap/cppgc/testing-unittest.cc | 7 +- deps/v8/test/unittests/heap/cppgc/tests.h | 4 +- .../heap/cppgc/weak-container-unittest.cc | 10 +- .../heap/cppgc/write-barrier-unittest.cc | 7 +- .../heap/embedder-tracing-unittest.cc | 31 +- .../test/unittests/heap/gc-tracer-unittest.cc | 183 +- deps/v8/test/unittests/heap/heap-unittest.cc | 4 +- deps/v8/test/unittests/heap/heap-utils.cc | 3 +- .../unittests/heap/local-heap-unittest.cc | 2 +- .../heap/marking-worklist-unittest.cc | 4 +- .../test/unittests/heap/slot-set-unittest.cc | 13 +- .../test/unittests/heap/unmapper-unittest.cc | 31 +- .../bytecode-array-builder-unittest.cc | 29 +- .../bytecode-array-iterator-unittest.cc | 6 +- ...bytecode-array-random-iterator-unittest.cc | 32 +- .../interpreter/bytecode-decoder-unittest.cc | 31 +- .../interpreter/bytecode-node-unittest.cc | 4 +- .../bytecode-register-optimizer-unittest.cc | 8 +- .../interpreter/bytecodes-unittest.cc | 6 +- ...ult-worker-threads-task-runner-unittest.cc | 2 +- .../logging/runtime-call-stats-unittest.cc | 6 +- .../osr-optimized-code-cache-unittest.cc | 40 +- .../objects/value-serializer-unittest.cc | 1444 +++-- .../objects/wasm-backing-store-unittest.cc | 2 +- deps/v8/test/unittests/run-all-unittests.cc | 4 +- .../unittests/sandbox/sandbox-unittest.cc | 155 + .../security/virtual-memory-cage-unittest.cc | 152 - .../tasks/background-compile-task-unittest.cc | 10 +- .../test/unittests/torque/torque-unittest.cc | 13 + deps/v8/test/unittests/unittests.status | 4 + .../unittests/utils/allocation-unittest.cc | 15 +- .../wasm/function-body-decoder-unittest.cc | 423 +- .../wasm/memory-protection-unittest.cc | 9 +- .../unittests/wasm/module-decoder-unittest.cc | 552 +- .../test/unittests/wasm/subtyping-unittest.cc | 160 +- .../wasm/trap-handler-x64-unittest.cc | 12 +- .../unittests/wasm/wasm-compiler-unittest.cc | 2 +- deps/v8/test/wasm-api-tests/callbacks.cc | 6 +- deps/v8/test/wasm-api-tests/hostref.cc | 10 +- deps/v8/test/wasm-api-tests/reflect.cc | 4 +- deps/v8/test/wasm-api-tests/serialize.cc | 18 + deps/v8/test/wasm-api-tests/wasm-api-test.h | 2 + deps/v8/test/wasm-js/tests.tar.gz.sha1 | 2 +- deps/v8/test/wasm-js/wasm-js.status | 12 +- deps/v8/test/wasm-spec-tests/testcfg.py | 2 +- .../v8/test/wasm-spec-tests/tests.tar.gz.sha1 | 2 +- deps/v8/third_party/google_benchmark/BUILD.gn | 6 +- .../precompiled_headers/benchmark/export.h | 31 + deps/v8/third_party/googletest/BUILD.gn | 2 + .../src/googletest/include/gtest/gtest_prod.h | 4 +- .../third_party/inspector_protocol/BUILD.gn | 2 - .../third_party/inspector_protocol/README.v8 | 2 +- .../inspector_protocol/code_generator.py | 7 + .../convert_protocol_to_json.py | 10 +- .../inspector_protocol/crdtp/cbor.cc | 10 +- .../crdtp/protocol_core_test.cc | 23 +- .../crdtp/serializer_traits.h | 158 - .../crdtp/serializer_traits_test.cc | 226 - .../inspector_protocol/lib/Forward_h.template | 3 - .../lib/ValueConversions_cpp.template | 2 +- .../inspector_protocol/lib/Values_h.template | 13 +- .../lib/base_string_adapter_cc.template | 202 +- .../lib/base_string_adapter_h.template | 77 +- deps/v8/third_party/test262-harness/LICENSE | 28 + deps/v8/third_party/test262-harness/OWNERS | 2 + deps/v8/third_party/test262-harness/README.md | 19 + deps/v8/third_party/test262-harness/README.v8 | 15 + .../third_party/test262-harness/__init__.py | 1 + .../test262-harness/excludelist.xml | 3 + .../test262-harness/src/__init__.py | 0 .../test262-harness/src/_common.py | 18 + .../test262-harness/src/_monkeyYaml.py | 147 + .../test262-harness/src/_packager.py | 335 ++ .../test262-harness/src/_packagerConfig.py | 117 + .../test262-harness/src/parseTestRecord.py | 113 + .../src/templates/runner.bestPractice.html | 170 + .../src/templates/runner.intl402.html | 192 + .../src/templates/runner.test262.html | 203 + .../test262-harness/src/test262.py | 664 +++ .../test262-harness/test/README.md | 11 + .../test262-harness/test/fixtures/negative.js | 11 + .../test/fixtures/test262-old-headers.js | 19 + .../test/fixtures/test262-yaml-headers.js | 18 + .../test262-harness/test/test_common.py | 64 + .../test262-harness/test/test_monkeyYaml.py | 210 + .../test/test_parseTestRecord.py | 183 + .../test262-harness/test/test_test262.py | 274 + deps/v8/third_party/zlib/google/BUILD.gn | 2 + .../zlib/google/compression_utils_unittest.cc | 16 +- deps/v8/third_party/zlib/google/redact.h | 31 + deps/v8/third_party/zlib/google/zip.cc | 112 +- deps/v8/third_party/zlib/google/zip.h | 58 +- .../third_party/zlib/google/zip_internal.cc | 2 +- .../v8/third_party/zlib/google/zip_internal.h | 2 +- deps/v8/third_party/zlib/google/zip_reader.cc | 560 +- deps/v8/third_party/zlib/google/zip_reader.h | 310 +- .../zlib/google/zip_reader_unittest.cc | 699 ++- .../third_party/zlib/google/zip_unittest.cc | 337 +- deps/v8/third_party/zlib/google/zip_writer.cc | 15 +- deps/v8/third_party/zlib/google/zip_writer.h | 2 +- deps/v8/tools/BUILD.gn | 2 +- deps/v8/tools/PRESUBMIT.py | 5 + deps/v8/tools/callstats-from-telemetry.sh | 15 +- deps/v8/tools/callstats.html | 24 +- deps/v8/tools/callstats.py | 17 +- deps/v8/tools/clusterfuzz/PRESUBMIT.py | 8 - .../tools/clusterfuzz/{ => foozzie}/BUILD.gn | 2 +- .../v8/tools/clusterfuzz/foozzie/PRESUBMIT.py | 28 + .../{ => foozzie}/testdata/baseline/d8.py | 0 .../testdata/baseline/v8_build_config.json | 0 .../{ => foozzie}/testdata/build1/d8.py | 0 .../testdata/build1/v8_build_config.json | 0 .../{ => foozzie}/testdata/build2/d8.py | 0 .../testdata/build2/v8_build_config.json | 0 .../{ => foozzie}/testdata/build3/d8.py | 0 .../testdata/build3/v8_build_config.json | 0 .../{ => foozzie}/testdata/failure_output.txt | 4 +- .../testdata/failure_output_arch.txt | 4 +- .../testdata/failure_output_second.txt | 4 +- .../{ => foozzie}/testdata/fuzz-123.js | 0 .../testdata/smoke_test_output.txt | 4 +- .../{ => foozzie}/toolchain/BUILD.gn | 0 .../clusterfuzz/{ => foozzie}/v8_commands.py | 20 +- .../clusterfuzz/{ => foozzie}/v8_foozzie.py | 0 .../v8_foozzie_harness_adjust.js | 0 .../{ => foozzie}/v8_foozzie_test.py | 0 .../{ => foozzie}/v8_fuzz_config.py | 0 .../{ => foozzie}/v8_fuzz_experiments.json | 0 .../{ => foozzie}/v8_fuzz_flags.json | 5 +- .../clusterfuzz/{ => foozzie}/v8_mock.js | 0 .../{ => foozzie}/v8_mock_archs.js | 0 .../{ => foozzie}/v8_mock_webassembly.js | 0 .../{ => foozzie}/v8_smoke_tests.js | 0 .../{ => foozzie}/v8_suppressions.js | 0 .../{ => foozzie}/v8_suppressions.py | 0 .../tools/clusterfuzz/js_fuzzer/exceptions.js | 4 +- deps/v8/tools/clusterfuzz/trials/BUILD.gn | 8 + deps/v8/tools/clusterfuzz/trials/PRESUBMIT.py | 57 + .../trials/clusterfuzz_trials_config.json | 38 + deps/v8/tools/codemap.mjs | 51 +- deps/v8/tools/compare_torque_output.py | 2 +- deps/v8/tools/csvparser.mjs | 7 +- .../v8/tools/debug_helper/debug-macro-shims.h | 25 + .../tools/debug_helper/gen-heap-constants.py | 6 +- .../debug_helper/get-object-properties.cc | 4 +- deps/v8/tools/debug_helper/heap-constants.cc | 10 + deps/v8/tools/debug_helper/heap-constants.h | 1 + deps/v8/tools/dev/gm.py | 14 +- deps/v8/tools/dumpcpp.mjs | 1 + deps/v8/tools/gcmole/BUILD.gn | 10 +- deps/v8/tools/gcmole/GCMOLE.gn | 6 - deps/v8/tools/gcmole/Makefile | 11 +- deps/v8/tools/gcmole/README | 2 +- deps/v8/tools/gcmole/bootstrap.sh | 36 +- deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1 | 2 +- deps/v8/tools/gcmole/gcmole.cc | 303 +- deps/v8/tools/gcmole/gcmole.py | 703 ++- deps/v8/tools/gcmole/package.sh | 6 +- deps/v8/tools/gdbinit | 15 +- deps/v8/tools/gen-keywords-gen-h.py | 3 +- deps/v8/tools/gen-postmortem-metadata.py | 14 +- .../tools/generate-header-include-checks.py | 17 +- deps/v8/tools/grokdump.py | 297 +- .../heap-layout-viewer-template.html | 14 + .../tools/heap-layout/heap-layout-viewer.mjs | 225 + .../heap-size-trend-viewer-template.html | 14 + .../heap-layout/heap-size-trend-viewer.mjs | 266 + deps/v8/tools/heap-layout/index.css | 24 + deps/v8/tools/heap-layout/index.html | 72 + .../v8/tools/heap-layout/space-categories.mjs | 32 + .../tools/heap-layout/trace-file-reader.mjs | 110 + deps/v8/tools/heap-stats/categories.js | 2 +- deps/v8/tools/heap-stats/index.html | 31 +- deps/v8/tools/heap-stats/trace-file-reader.js | 21 + deps/v8/tools/index.html | 8 +- deps/v8/tools/js/helper.mjs | 53 + .../view => js}/log-file-reader-template.html | 14 +- deps/v8/tools/js/web-api-helper.mjs | 261 + deps/v8/tools/logreader.mjs | 20 +- deps/v8/tools/mb/PRESUBMIT.py | 16 +- deps/v8/tools/mb/mb.py | 2 + deps/v8/tools/predictable_wrapper.py | 10 +- .../tools/process-wasm-compilation-times.py | 11 +- deps/v8/tools/profile.mjs | 81 +- deps/v8/tools/release/PRESUBMIT.py | 8 +- deps/v8/tools/release/auto_push.py | 5 +- deps/v8/tools/release/auto_roll.py | 5 +- deps/v8/tools/release/auto_tag.py | 204 - deps/v8/tools/release/check_clusterfuzz.py | 231 - deps/v8/tools/release/common_includes.py | 30 +- deps/v8/tools/release/create_release.py | 11 +- deps/v8/tools/release/git_recipes.py | 2 +- deps/v8/tools/release/list_deprecated.py | 35 +- deps/v8/tools/release/merge_to_branch.py | 5 +- deps/v8/tools/release/roll_merge.py | 5 +- deps/v8/tools/release/script_test.py | 5 +- .../tools/release/search_related_commits.py | 221 - deps/v8/tools/release/test_scripts.py | 11 +- .../release/test_search_related_commits.py | 274 - deps/v8/tools/run_perf.py | 25 +- .../v8/tools/sanitizers/tsan_suppressions.txt | 4 + deps/v8/tools/splaytree.mjs | 34 +- deps/v8/tools/system-analyzer/helper.mjs | 53 +- deps/v8/tools/system-analyzer/index.css | 1 + deps/v8/tools/system-analyzer/index.html | 2 +- deps/v8/tools/system-analyzer/index.mjs | 29 +- deps/v8/tools/system-analyzer/log/code.mjs | 4 + deps/v8/tools/system-analyzer/log/tick.mjs | 24 +- deps/v8/tools/system-analyzer/processor.mjs | 11 +- .../view/code-panel-template.html | 11 +- .../tools/system-analyzer/view/code-panel.mjs | 176 +- deps/v8/tools/system-analyzer/view/helper.mjs | 156 +- .../system-analyzer/view/log-file-reader.mjs | 116 +- .../view/property-link-table.mjs | 249 +- .../system-analyzer/view/script-panel.mjs | 95 +- .../view/timeline/timeline-track-base.mjs | 37 +- .../view/timeline/timeline-track-tick.mjs | 15 +- deps/v8/tools/testrunner/PRESUBMIT.py | 5 + deps/v8/tools/testrunner/base_runner.py | 24 +- deps/v8/tools/testrunner/local/command.py | 11 +- .../v8/tools/testrunner/local/junit_output.py | 49 - deps/v8/tools/testrunner/local/variants.py | 76 +- deps/v8/tools/testrunner/objects/testcase.py | 10 +- deps/v8/tools/testrunner/outproc/base.py | 15 +- deps/v8/tools/testrunner/outproc/message.py | 8 +- deps/v8/tools/testrunner/standard_runner.py | 2 +- deps/v8/tools/testrunner/testproc/fuzzer.py | 80 +- deps/v8/tools/testrunner/testproc/progress.py | 40 - deps/v8/tools/tick-processor.html | 157 - deps/v8/tools/tickprocessor.mjs | 19 +- deps/v8/tools/torque/format-torque.py | 14 +- deps/v8/tools/turbolizer/OWNERS | 1 - .../unittests/compare_torque_output_test.py | 15 +- .../unittests/predictable_wrapper_test.py | 2 +- deps/v8/tools/unittests/run_perf_test.py | 10 +- deps/v8/tools/unittests/run_tests_test.py | 13 +- .../unittests/testdata/predictable_mocked.py | 2 +- .../testdata/testroot1/v8_build_config.json | 3 +- .../testdata/testroot2/v8_build_config.json | 3 +- .../testdata/testroot3/v8_build_config.json | 3 +- deps/v8/tools/v8_presubmit.py | 43 +- deps/v8/tools/v8heapconst.py | 959 +-- deps/v8/tools/v8windbg/BUILD.gn | 4 +- deps/v8/tools/wasm/code-size-factors.py | 79 + 2040 files changed, 94694 insertions(+), 43284 deletions(-) create mode 100644 deps/v8/.style.yapf create mode 100644 deps/v8/bazel/BUILD.trace_event_common delete mode 100644 deps/v8/bazel/generate-inspector-files.cmd delete mode 100755 deps/v8/bazel/generate-inspector-files.sh create mode 100644 deps/v8/bazel/requirements.in create mode 100644 deps/v8/bazel/requirements.txt create mode 100644 deps/v8/gni/release_branch_toggle.gni delete mode 100644 deps/v8/include/cppgc/internal/prefinalizer-handler.h create mode 100644 deps/v8/src/base/platform/platform-darwin.cc rename deps/v8/src/{utils => base}/pointer-with-payload.h (68%) create mode 100644 deps/v8/src/builtins/builtins-shadow-realms.cc create mode 100644 deps/v8/src/builtins/builtins-shadowrealm-gen.cc create mode 100644 deps/v8/src/builtins/builtins-struct.cc create mode 100644 deps/v8/src/builtins/builtins-temporal-gen.cc create mode 100644 deps/v8/src/codegen/arm/reglist-arm.h create mode 100644 deps/v8/src/codegen/arm64/reglist-arm64.h create mode 100644 deps/v8/src/codegen/ia32/reglist-ia32.h create mode 100644 deps/v8/src/codegen/loong64/reglist-loong64.h create mode 100644 deps/v8/src/codegen/mips/reglist-mips.h create mode 100644 deps/v8/src/codegen/mips64/reglist-mips64.h create mode 100644 deps/v8/src/codegen/ppc/reglist-ppc.h create mode 100644 deps/v8/src/codegen/register-base.h create mode 100644 deps/v8/src/codegen/reglist-base.h create mode 100644 deps/v8/src/codegen/riscv64/reglist-riscv64.h create mode 100644 deps/v8/src/codegen/s390/reglist-s390.h create mode 100644 deps/v8/src/codegen/x64/reglist-x64.h create mode 100644 deps/v8/src/common/allow-deprecated.h create mode 100644 deps/v8/src/common/operation.h delete mode 100644 deps/v8/src/compiler/js-heap-copy-reducer.cc delete mode 100644 deps/v8/src/compiler/js-heap-copy-reducer.h create mode 100644 deps/v8/src/compiler/simplified-lowering-verifier.cc create mode 100644 deps/v8/src/compiler/simplified-lowering-verifier.h create mode 100644 deps/v8/src/compiler/wasm-loop-peeling.cc create mode 100644 deps/v8/src/compiler/wasm-loop-peeling.h create mode 100644 deps/v8/src/execution/clobber-registers.cc create mode 100644 deps/v8/src/execution/clobber-registers.h delete mode 100644 deps/v8/src/execution/runtime-profiler.cc create mode 100644 deps/v8/src/execution/tiering-manager.cc rename deps/v8/src/execution/{runtime-profiler.h => tiering-manager.h} (52%) create mode 100644 deps/v8/src/heap/allocation-result.h create mode 100644 deps/v8/src/heap/base/active-system-pages.cc create mode 100644 deps/v8/src/heap/base/active-system-pages.h create mode 100644 deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h create mode 100644 deps/v8/src/heap/cppgc-js/cpp-marking-state.h create mode 100644 deps/v8/src/heap/cppgc/remembered-set.cc create mode 100644 deps/v8/src/heap/cppgc/remembered-set.h create mode 100644 deps/v8/src/heap/embedder-tracing-inl.h rename deps/v8/src/heap/{local-allocator-inl.h => evacuation-allocator-inl.h} (76%) rename deps/v8/src/heap/{local-allocator.h => evacuation-allocator.h} (88%) create mode 100644 deps/v8/src/heap/heap-allocator-inl.h create mode 100644 deps/v8/src/heap/heap-allocator.cc create mode 100644 deps/v8/src/heap/heap-allocator.h create mode 100644 deps/v8/src/heap/reference-summarizer.cc create mode 100644 deps/v8/src/heap/reference-summarizer.h create mode 100644 deps/v8/src/maglev/DEPS create mode 100644 deps/v8/src/maglev/OWNERS create mode 100644 deps/v8/src/maglev/maglev-basic-block.h create mode 100644 deps/v8/src/maglev/maglev-code-gen-state.h create mode 100644 deps/v8/src/maglev/maglev-code-generator.cc create mode 100644 deps/v8/src/maglev/maglev-code-generator.h create mode 100644 deps/v8/src/maglev/maglev-compilation-info.cc create mode 100644 deps/v8/src/maglev/maglev-compilation-info.h create mode 100644 deps/v8/src/maglev/maglev-compilation-unit.cc create mode 100644 deps/v8/src/maglev/maglev-compilation-unit.h create mode 100644 deps/v8/src/maglev/maglev-compiler.cc create mode 100644 deps/v8/src/maglev/maglev-compiler.h create mode 100644 deps/v8/src/maglev/maglev-concurrent-dispatcher.cc create mode 100644 deps/v8/src/maglev/maglev-concurrent-dispatcher.h create mode 100644 deps/v8/src/maglev/maglev-graph-builder.cc create mode 100644 deps/v8/src/maglev/maglev-graph-builder.h create mode 100644 deps/v8/src/maglev/maglev-graph-labeller.h create mode 100644 deps/v8/src/maglev/maglev-graph-printer.cc create mode 100644 deps/v8/src/maglev/maglev-graph-printer.h create mode 100644 deps/v8/src/maglev/maglev-graph-processor.h create mode 100644 deps/v8/src/maglev/maglev-graph.h create mode 100644 deps/v8/src/maglev/maglev-interpreter-frame-state.h create mode 100644 deps/v8/src/maglev/maglev-ir.cc create mode 100644 deps/v8/src/maglev/maglev-ir.h create mode 100644 deps/v8/src/maglev/maglev-regalloc-data.h create mode 100644 deps/v8/src/maglev/maglev-regalloc.cc create mode 100644 deps/v8/src/maglev/maglev-regalloc.h create mode 100644 deps/v8/src/maglev/maglev-register-frame-array.h create mode 100644 deps/v8/src/maglev/maglev-vreg-allocator.h create mode 100644 deps/v8/src/maglev/maglev.cc create mode 100644 deps/v8/src/maglev/maglev.h create mode 100644 deps/v8/src/numbers/integer-literal-inl.h create mode 100644 deps/v8/src/numbers/integer-literal.h rename deps/v8/src/objects/{stack-frame-info-inl.h => call-site-info-inl.h} (59%) rename deps/v8/src/objects/{stack-frame-info.cc => call-site-info.cc} (85%) rename deps/v8/src/objects/{stack-frame-info.h => call-site-info.h} (59%) rename deps/v8/src/objects/{stack-frame-info.tq => call-site-info.tq} (82%) create mode 100644 deps/v8/src/objects/js-shadow-realms-inl.h create mode 100644 deps/v8/src/objects/js-shadow-realms.h create mode 100644 deps/v8/src/objects/js-shadow-realms.tq create mode 100644 deps/v8/src/objects/js-struct-inl.h create mode 100644 deps/v8/src/objects/js-struct.h create mode 100644 deps/v8/src/objects/js-struct.tq create mode 100644 deps/v8/src/objects/js-temporal-objects.cc create mode 100644 deps/v8/src/objects/symbol-table.cc rename deps/v8/src/{security => sandbox}/OWNERS (100%) rename deps/v8/src/{security => sandbox}/external-pointer-inl.h (53%) create mode 100644 deps/v8/src/sandbox/external-pointer-table-inl.h create mode 100644 deps/v8/src/sandbox/external-pointer-table.cc create mode 100644 deps/v8/src/sandbox/external-pointer-table.h rename deps/v8/src/{security => sandbox}/external-pointer.h (74%) create mode 100644 deps/v8/src/sandbox/sandbox.cc create mode 100644 deps/v8/src/sandbox/sandbox.h create mode 100644 deps/v8/src/sandbox/sandboxed-pointer-inl.h create mode 100644 deps/v8/src/sandbox/sandboxed-pointer.h delete mode 100644 deps/v8/src/security/caged-pointer-inl.h delete mode 100644 deps/v8/src/security/caged-pointer.h delete mode 100644 deps/v8/src/security/external-pointer-table.cc delete mode 100644 deps/v8/src/security/external-pointer-table.h delete mode 100644 deps/v8/src/security/vm-cage.cc delete mode 100644 deps/v8/src/security/vm-cage.h create mode 100644 deps/v8/test/fuzzer/inspector/regress-1297964 create mode 100644 deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt create mode 100644 deps/v8/test/inspector/debugger/class-private-methods-nested-super.js create mode 100644 deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation-expected.txt create mode 100644 deps/v8/test/inspector/debugger/set-breakpoint-on-instrumentation.js create mode 100644 deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation-expected.txt create mode 100644 deps/v8/test/inspector/debugger/wasm-set-breakpoint-on-instrumentation.js create mode 100644 deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs-expected.txt create mode 100644 deps/v8/test/inspector/heap-profiler/heap-snapshot-js-weak-refs.js create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1220203-expected.txt create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1220203.js create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1281031-expected.txt create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1281031.js create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1283049-expected.txt create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1283049.js create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1290861-expected.txt create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1290861.js create mode 100644 deps/v8/test/inspector/runtime/console-formatter-expected.txt create mode 100644 deps/v8/test/inspector/runtime/console-formatter.js create mode 100644 deps/v8/test/inspector/runtime/console-message-before-enable-expected.txt create mode 100644 deps/v8/test/inspector/runtime/console-message-before-enable.js create mode 100644 deps/v8/test/inspector/runtime/error-stack-expected.txt create mode 100644 deps/v8/test/inspector/runtime/error-stack-trace-limit-expected.txt create mode 100644 deps/v8/test/inspector/runtime/error-stack-trace-limit.js create mode 100644 deps/v8/test/inspector/runtime/error-stack.js create mode 100644 deps/v8/test/inspector/runtime/get-exception-details-expected.txt create mode 100644 deps/v8/test/inspector/runtime/get-exception-details.js create mode 100644 deps/v8/test/intl/number-format/format-range-v3.js create mode 100644 deps/v8/test/intl/number-format/rounding-increment-resolved-match-v3.js create mode 100644 deps/v8/test/intl/number-format/rounding-increment-v3.js create mode 100644 deps/v8/test/intl/number-format/rounding-increment-value-v3.js create mode 100644 deps/v8/test/intl/number-format/rounding-mode-table-v3.js create mode 100644 deps/v8/test/intl/number-format/rounding-mode-v3.js create mode 100644 deps/v8/test/intl/number-format/sign-display-v3.js create mode 100644 deps/v8/test/intl/number-format/trailing-zero-display-resolved-options-v3.js create mode 100644 deps/v8/test/intl/number-format/trailing-zero-display-v3.js create mode 100644 deps/v8/test/intl/number-format/use-grouping-v3.js create mode 100644 deps/v8/test/intl/plural-rules/select-range.js create mode 100644 deps/v8/test/js-perf-test/Array/includes.js create mode 100644 deps/v8/test/js-perf-test/Array/index-of.js rename deps/v8/test/js-perf-test/BytecodeHandlers/{LdaKeyedProperty.js => GetKeyedProperty.js} (100%) rename deps/v8/test/js-perf-test/BytecodeHandlers/{LdaNamedProperty.js => GetNamedProperty.js} (100%) create mode 100644 deps/v8/test/message/fail/data-view-invalid-length-1.js create mode 100644 deps/v8/test/message/fail/data-view-invalid-length-1.out create mode 100644 deps/v8/test/message/fail/data-view-invalid-length-2.js create mode 100644 deps/v8/test/message/fail/data-view-invalid-length-2.out create mode 100644 deps/v8/test/message/fail/data-view-invalid-length-3.js create mode 100644 deps/v8/test/message/fail/data-view-invalid-length-3.out create mode 100644 deps/v8/test/mjsunit/compiler/fast-api-calls-wasm.js delete mode 100644 deps/v8/test/mjsunit/compiler/regress-1226988.js create mode 100644 deps/v8/test/mjsunit/compiler/regress-store-store-elim.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps3.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js delete mode 100644 deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js delete mode 100644 deps/v8/test/mjsunit/const-field-tracking-2.js create mode 100644 deps/v8/test/mjsunit/d8/d8-multiple-module-exec.js create mode 100644 deps/v8/test/mjsunit/es6/classes-super-in-heritage.js delete mode 100644 deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js create mode 100644 deps/v8/test/mjsunit/harmony/array-prototype-groupby.js create mode 100644 deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js create mode 100644 deps/v8/test/mjsunit/harmony/private-brand-nested-super.js create mode 100644 deps/v8/test/mjsunit/harmony/private-name-surrogate-pair.js create mode 100644 deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js create mode 100644 deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js create mode 100644 deps/v8/test/mjsunit/maglev/00.js create mode 100644 deps/v8/test/mjsunit/maglev/01.js create mode 100644 deps/v8/test/mjsunit/maglev/02.js create mode 100644 deps/v8/test/mjsunit/maglev/03.js create mode 100644 deps/v8/test/mjsunit/maglev/04.js create mode 100644 deps/v8/test/mjsunit/maglev/05.js create mode 100644 deps/v8/test/mjsunit/maglev/06.js create mode 100644 deps/v8/test/mjsunit/maglev/07.js create mode 100644 deps/v8/test/mjsunit/maglev/08.js create mode 100644 deps/v8/test/mjsunit/maglev/09.js create mode 100644 deps/v8/test/mjsunit/maglev/10.js create mode 100644 deps/v8/test/mjsunit/maglev/11.js create mode 100644 deps/v8/test/mjsunit/maglev/12.js create mode 100644 deps/v8/test/mjsunit/maglev/13.js create mode 100644 deps/v8/test/mjsunit/maglev/14.js create mode 100644 deps/v8/test/mjsunit/maglev/15.js create mode 100644 deps/v8/test/mjsunit/maglev/16.js create mode 100644 deps/v8/test/mjsunit/maglev/17.js create mode 100644 deps/v8/test/mjsunit/maglev/18.js create mode 100644 deps/v8/test/mjsunit/optimized-array-includes.js create mode 100644 deps/v8/test/mjsunit/optimized-array-indexof.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1076569.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1079446.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1083272.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1083763.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1084953.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1137979.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1138075.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1138611.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1154961.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1163715.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1172797.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1201114.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1208805.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1223733.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-1225561.js create mode 100644 deps/v8/test/mjsunit/regress/regress-12495.js create mode 100644 deps/v8/test/mjsunit/regress/regress-12580.js create mode 100644 deps/v8/test/mjsunit/regress/regress-12657.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-752764.js create mode 100644 deps/v8/test/mjsunit/regress/regress-binary-bitwise-bigint-smi-mix-opt-depot.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1206289.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1262750.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1276923.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1277863.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1278086.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1290587.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-12122.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-12472.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-12595.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-12671.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-12688.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-12624.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1271244.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1271538.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1272204.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1279151.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1282224.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1283042.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1283395.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1284980.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1286253.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1289678.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1290079.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1294384.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1296876.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-inlining-throw.js create mode 100644 deps/v8/test/mjsunit/shared-memory/client-gc.js create mode 100644 deps/v8/test/mjsunit/shared-memory/shared-string-in-code-object.js create mode 100644 deps/v8/test/mjsunit/shared-memory/shared-string-in-weak-map.js create mode 100644 deps/v8/test/mjsunit/shared-memory/shared-string.js create mode 100644 deps/v8/test/mjsunit/shared-memory/shared-struct-atomics-workers.js create mode 100644 deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js create mode 100644 deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js create mode 100644 deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js create mode 100644 deps/v8/test/mjsunit/statistics-extension.js create mode 100644 deps/v8/test/mjsunit/temporal/time-zone-constructor.js create mode 100644 deps/v8/test/mjsunit/wasm/extended-constants.js create mode 100644 deps/v8/test/mjsunit/wasm/load-immutable.js create mode 100644 deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js create mode 100644 deps/v8/test/mjsunit/web-snapshot-helpers.js create mode 100644 deps/v8/test/mjsunit/web-snapshot/web-snapshot-1.js rename deps/v8/test/mjsunit/web-snapshot/{web-snapshot.js => web-snapshot-2.js} (54%) create mode 100644 deps/v8/test/mjsunit/web-snapshot/web-snapshot-3.js create mode 100644 deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js create mode 100644 deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js create mode 100644 deps/v8/test/unittests/base/virtual-address-space-unittest.cc create mode 100644 deps/v8/test/unittests/heap/base/active-system-pages-unittest.cc create mode 100644 deps/v8/test/unittests/sandbox/sandbox-unittest.cc delete mode 100644 deps/v8/test/unittests/security/virtual-memory-cage-unittest.cc create mode 100644 deps/v8/third_party/google_benchmark/precompiled_headers/benchmark/export.h delete mode 100644 deps/v8/third_party/inspector_protocol/crdtp/serializer_traits.h delete mode 100644 deps/v8/third_party/inspector_protocol/crdtp/serializer_traits_test.cc create mode 100644 deps/v8/third_party/test262-harness/LICENSE create mode 100644 deps/v8/third_party/test262-harness/OWNERS create mode 100644 deps/v8/third_party/test262-harness/README.md create mode 100644 deps/v8/third_party/test262-harness/README.v8 create mode 100644 deps/v8/third_party/test262-harness/__init__.py create mode 100644 deps/v8/third_party/test262-harness/excludelist.xml create mode 100644 deps/v8/third_party/test262-harness/src/__init__.py create mode 100644 deps/v8/third_party/test262-harness/src/_common.py create mode 100644 deps/v8/third_party/test262-harness/src/_monkeyYaml.py create mode 100644 deps/v8/third_party/test262-harness/src/_packager.py create mode 100644 deps/v8/third_party/test262-harness/src/_packagerConfig.py create mode 100644 deps/v8/third_party/test262-harness/src/parseTestRecord.py create mode 100644 deps/v8/third_party/test262-harness/src/templates/runner.bestPractice.html create mode 100644 deps/v8/third_party/test262-harness/src/templates/runner.intl402.html create mode 100644 deps/v8/third_party/test262-harness/src/templates/runner.test262.html create mode 100755 deps/v8/third_party/test262-harness/src/test262.py create mode 100644 deps/v8/third_party/test262-harness/test/README.md create mode 100644 deps/v8/third_party/test262-harness/test/fixtures/negative.js create mode 100644 deps/v8/third_party/test262-harness/test/fixtures/test262-old-headers.js create mode 100644 deps/v8/third_party/test262-harness/test/fixtures/test262-yaml-headers.js create mode 100644 deps/v8/third_party/test262-harness/test/test_common.py create mode 100644 deps/v8/third_party/test262-harness/test/test_monkeyYaml.py create mode 100644 deps/v8/third_party/test262-harness/test/test_parseTestRecord.py create mode 100644 deps/v8/third_party/test262-harness/test/test_test262.py create mode 100644 deps/v8/third_party/zlib/google/redact.h delete mode 100644 deps/v8/tools/clusterfuzz/PRESUBMIT.py rename deps/v8/tools/clusterfuzz/{ => foozzie}/BUILD.gn (95%) create mode 100644 deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/baseline/d8.py (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/baseline/v8_build_config.json (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/build1/d8.py (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/build1/v8_build_config.json (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/build2/d8.py (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/build2/v8_build_config.json (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/build3/d8.py (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/build3/v8_build_config.json (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/failure_output.txt (76%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/failure_output_arch.txt (77%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/failure_output_second.txt (77%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/fuzz-123.js (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/testdata/smoke_test_output.txt (78%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/toolchain/BUILD.gn (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_commands.py (93%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_foozzie.py (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_foozzie_harness_adjust.js (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_foozzie_test.py (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_fuzz_config.py (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_fuzz_experiments.json (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_fuzz_flags.json (90%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_mock.js (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_mock_archs.js (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_mock_webassembly.js (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_smoke_tests.js (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_suppressions.js (100%) rename deps/v8/tools/clusterfuzz/{ => foozzie}/v8_suppressions.py (100%) create mode 100644 deps/v8/tools/clusterfuzz/trials/BUILD.gn create mode 100644 deps/v8/tools/clusterfuzz/trials/PRESUBMIT.py create mode 100644 deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json delete mode 100644 deps/v8/tools/gcmole/GCMOLE.gn mode change 100644 => 100755 deps/v8/tools/gcmole/gcmole.py create mode 100644 deps/v8/tools/heap-layout/heap-layout-viewer-template.html create mode 100644 deps/v8/tools/heap-layout/heap-layout-viewer.mjs create mode 100644 deps/v8/tools/heap-layout/heap-size-trend-viewer-template.html create mode 100644 deps/v8/tools/heap-layout/heap-size-trend-viewer.mjs create mode 100644 deps/v8/tools/heap-layout/index.css create mode 100644 deps/v8/tools/heap-layout/index.html create mode 100644 deps/v8/tools/heap-layout/space-categories.mjs create mode 100644 deps/v8/tools/heap-layout/trace-file-reader.mjs create mode 100644 deps/v8/tools/js/helper.mjs rename deps/v8/tools/{system-analyzer/view => js}/log-file-reader-template.html (81%) create mode 100644 deps/v8/tools/js/web-api-helper.mjs delete mode 100755 deps/v8/tools/release/auto_tag.py delete mode 100755 deps/v8/tools/release/check_clusterfuzz.py delete mode 100755 deps/v8/tools/release/search_related_commits.py delete mode 100755 deps/v8/tools/release/test_search_related_commits.py delete mode 100644 deps/v8/tools/testrunner/local/junit_output.py delete mode 100644 deps/v8/tools/tick-processor.html create mode 100755 deps/v8/tools/wasm/code-size-factors.py diff --git a/deps/v8/.bazelrc b/deps/v8/.bazelrc index e0127628ca93aa..95bfad4a35c17e 100644 --- a/deps/v8/.bazelrc +++ b/deps/v8/.bazelrc @@ -2,17 +2,22 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -# V8 bazel port only supports clang -build --action_env=BAZEL_COMPILER=clang -build --action_env=CC=clang -build --action_env=CXX=clang++ +# Pass CC, CXX and PATH from the environment +build --action_env=CC +build --action_env=CXX build --action_env=PATH +# Use Clang compiler +build:clang --action_env=BAZEL_COMPILER=clang +build:clang --action_env=CC=clang +build:clang --action_env=CXX=clang++ + # V8 debug config build:debug --compilation_mode=dbg build:debug --config=v8_enable_debugging_features build:debug --//:v8_enable_fast_mksnapshot build:debug --//:v8_enable_backtrace +build:debug --//:v8_enable_handle_zapping # v8_enable_debugging_features flags build:v8_enable_debugging_features --//:v8_enable_verify_heap diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 66116d82ca5a5c..b8d1d934bcf610 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -54,7 +54,6 @@ !/test/mjsunit/tools/*.log /test/mozilla/data /test/test262/data -/test/test262/harness /test/wasm-js/data /test/wasm-js/tests /test/wasm-js/tests.tar.gz @@ -76,6 +75,7 @@ !/third_party/googletest/src/googletest/include/gtest /third_party/googletest/src/googletest/include/gtest/* !/third_party/googletest/src/googletest/include/gtest/gtest_prod.h +!/third_party/test262-harness !/third_party/v8 !/third_party/wasm-api /tools/clang diff --git a/deps/v8/.style.yapf b/deps/v8/.style.yapf new file mode 100644 index 00000000000000..de0c6a70f38b94 --- /dev/null +++ b/deps/v8/.style.yapf @@ -0,0 +1,2 @@ +[style] +based_on_style = chromium diff --git a/deps/v8/.vpython3 b/deps/v8/.vpython3 index 95e52ee59ebea6..d1842bb8dd2eda 100644 --- a/deps/v8/.vpython3 +++ b/deps/v8/.vpython3 @@ -44,3 +44,28 @@ wheel: < name: "infra/python/wheels/six-py2_py3" version: "version:1.15.0" > + +wheel: < + name: "infra/python/wheels/coverage/${vpython_platform}" + version: "version:5.5.chromium.2" +> + +wheel: < + name: "infra/python/wheels/pbr-py2_py3" + version: "version:3.0.0" +> + +wheel: < + name: "infra/python/wheels/funcsigs-py2_py3" + version: "version:1.0.2" +> + +wheel: < + name: "infra/python/wheels/mock-py2_py3" + version: "version:2.0.0" +> + +wheel: < + name: "infra/python/wheels/numpy/${vpython_platform}" + version: "version:1.2x.supported.1" +> diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index b89eacba9f18e5..f05ba729c58624 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -61,8 +61,11 @@ Andrei Kashcha Andrew Paprocki Anna Henningsen Antoine du Hamel +Anton Bershanskiy <8knots@protonmail.com> Anton Bikineev +Ao Wang Archil Sharashenidze +Bala Avulapati Bangfu Tao Ben Coe Ben Newman @@ -104,6 +107,7 @@ Fedor Indutny Felix Geisendörfer Filipe David Manana Franziska Hinkelmann +Gao Sheng Geoffrey Garside Gergely Nagy Gilang Mentari Hamidy @@ -111,6 +115,7 @@ Gus Caplan Gwang Yoon Hwang Haichuan Wang Hannu Trey +Harshil Jain Henrique Ferreiro Hirofumi Mako Honggyu Kim @@ -142,6 +147,7 @@ Junha Park Junming Huang Kang-Hao (Kenny) Lu Karl Skomski +Keith Smiley Kevin Gibbons Kris Selden Kyounga Ra @@ -256,9 +262,11 @@ Yu Yin Yujie Wang Yuri Iozzelli Yusif Khudhur +Yuxiang Cao Zac Hansen Zeynep Cankara Zhao Jiazhong Zheng Liu Zhongping Wang 柳è£ä¸€ +Yang Xiang diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index 1bad423e03d7a6..a632cc4fe8ad63 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -3,6 +3,8 @@ # found in the LICENSE file. load("@bazel_skylib//lib:selects.bzl", "selects") +load("@rules_python//python:defs.bzl", "py_binary") +load("@v8_python_deps//:requirements.bzl", "requirement") load( "@v8//:bazel/defs.bzl", "v8_binary", @@ -18,13 +20,6 @@ load( ) load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression") -config_setting( - name = "is_debug", - values = { - "compilation_mode": "dbg", - }, -) - # ================================================= # Flags # ================================================= @@ -58,7 +53,6 @@ config_setting( # v8_can_use_fpu_instructions # v8_use_mips_abi_hardfloat # v8_enable_gdbjit -# v8_enable_minor_mc # v8_check_header_includes # v8_enable_shared_ro_heap # v8_enable_lazy_source_positions @@ -71,28 +65,22 @@ config_setting( # v8_verify_torque_generation_invariance # v8_enable_snapshot_compression # v8_control_flow_integrity -# v8_enable_virtual_memory_cage +# v8_enable_sandbox # cppgc_enable_caged_heap # cppgc_enable_check_assignments_in_prefinalizers # cppgc_enable_object_names # cppgc_enable_verify_heap # cppgc_enable_young_generation # v8_enable_zone_compression -# v8_enable_heap_sandbox # v8_enable_precise_zone_stats # v8_enable_swiss_name_dictionary # v8_generate_external_defines_header # v8_dict_property_const_tracking # v8_enable_map_packing -# v8_allow_javascript_in_promise_hooks +# v8_enable_javascript_promise_hooks # v8_enable_allocation_folding # v8_allocation_site_tracking -v8_string( - name = "v8_root", - default = "third_party/v8/HEAD", -) - v8_flag(name = "v8_android_log_stdout") v8_flag(name = "v8_annotate_torque_ir") @@ -115,10 +103,9 @@ v8_flag(name = "v8_enable_debug_code") v8_flag(name = "v8_enable_disassembler") -v8_flag( - name = "v8_enable_handle_zapping", - default = True, -) +v8_flag(name = "v8_enable_handle_zapping") + +v8_flag(name = "v8_enable_runtime_call_stats") v8_flag(name = "v8_enable_hugepage") @@ -136,11 +123,6 @@ v8_flag( default = True, ) -v8_flag( - name = "v8_enable_minor_mc", - default = True, -) - v8_flag(name = "v8_enable_object_print") v8_flag(name = "v8_enable_slow_dchecks") @@ -199,7 +181,7 @@ selects.config_setting_group( name = "v8_target_x64_default_pointer_compression", match_all = [ ":v8_enable_pointer_compression_is_none", - "@config//:v8_target_x64", + "@v8//bazel/config:v8_target_x64", ], ) @@ -208,7 +190,7 @@ selects.config_setting_group( name = "v8_target_arm64_default_pointer_compression", match_all = [ ":v8_enable_pointer_compression_is_none", - "@config//:v8_target_arm64", + "@v8//bazel/config:v8_target_arm64", ], ) @@ -253,7 +235,7 @@ selects.config_setting_group( selects.config_setting_group( name = "should_add_rdynamic", match_all = [ - "@config//:is_linux", + "@v8//bazel/config:is_linux", ":is_v8_enable_backtrace", ], ) @@ -273,9 +255,9 @@ v8_config( "v8_enable_hugepage": "ENABLE_HUGEPAGE", "v8_enable_future": "V8_ENABLE_FUTURE", "v8_enable_lazy_source_positions": "V8_ENABLE_LAZY_SOURCE_POSITIONS", - "v8_enable_minor_mc": "ENABLE_MINOR_MC", "v8_enable_object_print": "OBJECT_PRINT", "v8_enable_slow_dchecks": "ENABLE_SLOW_DCHECKS", + "v8_enable_runtime_call_stats": "V8_RUNTIME_CALL_STATS", "v8_enable_snapshot_native_code_counters": "V8_SNAPSHOT_NATIVE_CODE_COUNTERS", "v8_enable_trace_maps": "V8_TRACE_MAPS", "v8_enable_v8_checks": "V8_ENABLE_CHECKS", @@ -291,37 +273,51 @@ v8_config( "V8_ADVANCED_BIGINT_ALGORITHMS", "V8_CONCURRENT_MARKING", ] + select({ - ":is_debug": [ + "@v8//bazel/config:is_debug": [ "DEBUG", "V8_ENABLE_CHECKS", ], "//conditions:default": [], }) + select( { - "@config//:v8_target_ia32": ["V8_TARGET_ARCH_IA32"], - "@config//:v8_target_x64": ["V8_TARGET_ARCH_X64"], - "@config//:v8_target_arm": [ + "@v8//bazel/config:v8_target_ia32": ["V8_TARGET_ARCH_IA32"], + "@v8//bazel/config:v8_target_x64": ["V8_TARGET_ARCH_X64"], + "@v8//bazel/config:v8_target_arm": [ "V8_TARGET_ARCH_ARM", "CAN_USE_ARMV7_INSTRUCTIONS", "CAN_USE_VFP3_INSTRUCTIONS", ], - "@config//:v8_target_arm64": ["V8_TARGET_ARCH_ARM64"], + "@v8//bazel/config:v8_target_arm64": ["V8_TARGET_ARCH_ARM64"], + "@v8//bazel/config:v8_target_s390x": [ + "V8_TARGET_ARCH_S390", + "V8_TARGET_ARCH_S390X", + ], + "@v8//bazel/config:v8_target_riscv64": [ + # NOTE: Bazel rules for riscv64 weren't tested on a real system. + "V8_TARGET_ARCH_RISCV64", + "CAN_USE_FPU_INSTRUCTIONS", + ], + "@v8//bazel/config:v8_target_ppc64le": [ + # NOTE: Bazel rules for ppc64le weren't tested on a real system. + "V8_TARGET_ARCH_PPC64", + "V8_TARGET_ARCH_PPC_LE", + ], }, no_match_error = "Please specify a target cpu supported by v8", ) + select({ - "@config//:is_android": [ + "@v8//bazel/config:is_android": [ "V8_HAVE_TARGET_OS", "V8_TARGET_OS_ANDROID", ], - "@config//:is_linux": [ + "@v8//bazel/config:is_linux": [ "V8_HAVE_TARGET_OS", "V8_TARGET_OS_LINUX", ], - "@config//:is_macos": [ + "@v8//bazel/config:is_macos": [ "V8_HAVE_TARGET_OS", - "V8_TARGET_OS_MACOSX", + "V8_TARGET_OS_MACOS", ], - "@config//:is_windows": [ + "@v8//bazel/config:is_windows": [ "V8_HAVE_TARGET_OS", "V8_TARGET_OS_WIN", "UNICODE", @@ -413,7 +409,6 @@ filegroup( "include/cppgc/internal/name-trait.h", "include/cppgc/internal/persistent-node.h", "include/cppgc/internal/pointer-policies.h", - "include/cppgc/internal/prefinalizer-handler.h", "include/cppgc/internal/write-barrier.h", "include/cppgc/liveness-broker.h", "include/cppgc/macros.h", @@ -595,6 +590,7 @@ filegroup( "src/base/platform/semaphore.h", "src/base/platform/time.cc", "src/base/platform/time.h", + "src/base/pointer-with-payload.h", "src/base/platform/wrappers.h", "src/base/region-allocator.cc", "src/base/region-allocator.h", @@ -623,7 +619,7 @@ filegroup( "src/base/vlq-base64.h", "src/base/platform/yield-processor.h", ] + select({ - "@config//:is_posix": [ + "@v8//bazel/config:is_posix": [ "src/base/platform/platform-posix.cc", "src/base/platform/platform-posix.h", "src/base/platform/platform-posix-time.cc", @@ -631,19 +627,20 @@ filegroup( ], "//conditions:default": [], }) + select({ - "@config//:is_linux": [ + "@v8//bazel/config:is_linux": [ "src/base/debug/stack_trace_posix.cc", "src/base/platform/platform-linux.cc", ], - "@config//:is_android": [ + "@v8//bazel/config:is_android": [ "src/base/debug/stack_trace_android.cc", "src/base/platform/platform-linux.cc", ], - "@config//:is_macos": [ + "@v8//bazel/config:is_macos": [ "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-darwin.cc", "src/base/platform/platform-macos.cc", ], - "@config//:is_windows": [ + "@v8//bazel/config:is_windows": [ "src/base/win32-headers.h", "src/base/debug/stack_trace_win.cc", "src/base/platform/platform-win32.cc", @@ -655,7 +652,6 @@ filegroup( filegroup( name = "v8_libplatform_files", srcs = [ - "base/trace_event/common/trace_event_common.h", "include/libplatform/libplatform.h", "include/libplatform/libplatform-export.h", "include/libplatform/v8-tracing.h", @@ -832,6 +828,7 @@ filegroup( "src/objects/api-callbacks.tq", "src/objects/arguments.tq", "src/objects/bigint.tq", + "src/objects/call-site-info.tq", "src/objects/cell.tq", "src/objects/code.tq", "src/objects/contexts.tq", @@ -857,6 +854,8 @@ filegroup( "src/objects/js-proxy.tq", "src/objects/js-regexp-string-iterator.tq", "src/objects/js-regexp.tq", + "src/objects/js-shadow-realms.tq", + "src/objects/js-struct.tq", "src/objects/js-temporal-objects.tq", "src/objects/js-weak-refs.tq", "src/objects/literal-objects.tq", @@ -878,7 +877,6 @@ filegroup( "src/objects/script.tq", "src/objects/shared-function-info.tq", "src/objects/source-text-module.tq", - "src/objects/stack-frame-info.tq", "src/objects/string.tq", "src/objects/struct.tq", "src/objects/swiss-hash-table-helpers.tq", @@ -923,6 +921,8 @@ filegroup( filegroup( name = "torque_base_files", srcs = [ + "src/numbers/integer-literal-inl.h", + "src/numbers/integer-literal.h", "src/torque/ast.h", "src/torque/cc-generator.cc", "src/torque/cc-generator.h", @@ -980,10 +980,9 @@ filegroup( name = "v8_base_without_compiler_files", srcs = [ ":cppgc_base_files", - ":v8_cppgc_shared_files", + ":v8_heap_base_files", ":v8_bigint", ":generated_bytecode_builtins_list", - "base/trace_event/common/trace_event_common.h", "include/cppgc/common.h", "include/v8-inspector-protocol.h", "include/v8-inspector.h", @@ -1053,8 +1052,10 @@ filegroup( "src/builtins/builtins-promise.h", "src/builtins/builtins-reflect.cc", "src/builtins/builtins-regexp.cc", + "src/builtins/builtins-shadow-realms.cc", "src/builtins/builtins-sharedarraybuffer.cc", "src/builtins/builtins-string.cc", + "src/builtins/builtins-struct.cc", "src/builtins/builtins-symbol.cc", "src/builtins/builtins-temporal.cc", "src/builtins/builtins-trace.cc", @@ -1116,9 +1117,11 @@ filegroup( "src/codegen/pending-optimization-table.cc", "src/codegen/pending-optimization-table.h", "src/codegen/register-arch.h", + "src/codegen/register-base.h", "src/codegen/register-configuration.cc", "src/codegen/register-configuration.h", "src/codegen/register.h", + "src/codegen/reglist-base.h", "src/codegen/reglist.h", "src/codegen/reloc-info.cc", "src/codegen/reloc-info.h", @@ -1142,9 +1145,11 @@ filegroup( "src/codegen/unoptimized-compilation-info.h", "src/common/assert-scope.cc", "src/common/assert-scope.h", + "src/common/allow-deprecated.h", "src/common/checks.h", "src/common/high-allocation-throughput-scope.h", "src/common/message-template.h", + "src/common/operation.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", "src/compiler-dispatcher/lazy-compile-dispatcher.cc", @@ -1214,6 +1219,8 @@ filegroup( "src/execution/arguments-inl.h", "src/execution/arguments.cc", "src/execution/arguments.h", + "src/execution/clobber-registers.cc", + "src/execution/clobber-registers.h", "src/execution/encoded-c-signature.cc", "src/execution/encoded-c-signature.h", "src/execution/embedder-state.h", @@ -1247,8 +1254,6 @@ filegroup( "src/execution/protectors-inl.h", "src/execution/protectors.cc", "src/execution/protectors.h", - "src/execution/runtime-profiler.cc", - "src/execution/runtime-profiler.h", "src/execution/shared-mutex-guard-if-off-thread.h", "src/execution/simulator-base.cc", "src/execution/simulator-base.h", @@ -1259,6 +1264,8 @@ filegroup( "src/execution/thread-id.h", "src/execution/thread-local-top.cc", "src/execution/thread-local-top.h", + "src/execution/tiering-manager.cc", + "src/execution/tiering-manager.h", "src/execution/v8threads.cc", "src/execution/v8threads.h", "src/execution/vm-state-inl.h", @@ -1291,8 +1298,11 @@ filegroup( "src/handles/maybe-handles.h", "src/handles/persistent-handles.cc", "src/handles/persistent-handles.h", + "src/heap/base/active-system-pages.cc", + "src/heap/base/active-system-pages.h", "src/heap/allocation-observer.cc", "src/heap/allocation-observer.h", + "src/heap/allocation-result.h", "src/heap/allocation-stats.h", "src/heap/array-buffer-sweeper.cc", "src/heap/array-buffer-sweeper.h", @@ -1318,6 +1328,8 @@ filegroup( "src/heap/concurrent-marking.h", "src/heap/cppgc-js/cpp-heap.cc", "src/heap/cppgc-js/cpp-heap.h", + "src/heap/cppgc-js/cpp-marking-state.h", + "src/heap/cppgc-js/cpp-marking-state-inl.h", "src/heap/cppgc-js/cpp-snapshot.cc", "src/heap/cppgc-js/cpp-snapshot.h", "src/heap/cppgc-js/unified-heap-marking-state.h", @@ -1327,6 +1339,7 @@ filegroup( "src/heap/cppgc-js/unified-heap-marking-visitor.h", "src/heap/embedder-tracing.cc", "src/heap/embedder-tracing.h", + "src/heap/embedder-tracing-inl.h", "src/heap/factory-base.cc", "src/heap/factory-base.h", "src/heap/factory-base-inl.h", @@ -1342,6 +1355,9 @@ filegroup( "src/heap/gc-idle-time-handler.h", "src/heap/gc-tracer.cc", "src/heap/gc-tracer.h", + "src/heap/heap-allocator-inl.h", + "src/heap/heap-allocator.cc", + "src/heap/heap-allocator.h", "src/heap/heap-controller.cc", "src/heap/heap-controller.h", "src/heap/heap-inl.h", @@ -1366,8 +1382,8 @@ filegroup( "src/heap/large-spaces.h", "src/heap/linear-allocation-area.h", "src/heap/list.h", - "src/heap/local-allocator-inl.h", - "src/heap/local-allocator.h", + "src/heap/evacuation-allocator-inl.h", + "src/heap/evacuation-allocator.h", "src/heap/local-factory.cc", "src/heap/local-factory.h", "src/heap/local-factory-inl.h", @@ -1549,6 +1565,9 @@ filegroup( "src/objects/bigint-inl.h", "src/objects/bigint.cc", "src/objects/bigint.h", + "src/objects/call-site-info-inl.h", + "src/objects/call-site-info.cc", + "src/objects/call-site-info.h", "src/objects/cell-inl.h", "src/objects/cell.h", "src/objects/code-inl.h", @@ -1635,8 +1654,13 @@ filegroup( "src/objects/js-regexp-string-iterator.h", "src/objects/js-regexp.cc", "src/objects/js-regexp.h", + "src/objects/js-shadow-realms.h", + "src/objects/js-shadow-realms-inl.h", + "src/objects/js-struct.h", + "src/objects/js-struct-inl.h", "src/objects/js-temporal-objects.h", "src/objects/js-temporal-objects-inl.h", + "src/objects/js-temporal-objects.cc", "src/objects/js-weak-refs.h", "src/objects/js-weak-refs-inl.h", "src/objects/keys.cc", @@ -1727,9 +1751,6 @@ filegroup( "src/objects/source-text-module.cc", "src/objects/source-text-module.h", "src/objects/source-text-module-inl.h", - "src/objects/stack-frame-info-inl.h", - "src/objects/stack-frame-info.cc", - "src/objects/stack-frame-info.h", "src/objects/string-comparator.cc", "src/objects/string-comparator.h", "src/objects/string-inl.h", @@ -1737,6 +1758,7 @@ filegroup( "src/objects/string-set.h", "src/objects/string-table-inl.h", "src/objects/string-table.cc", + "src/objects/symbol-table.cc", "src/objects/string-table.h", "src/objects/string.cc", "src/objects/string.h", @@ -1918,14 +1940,15 @@ filegroup( "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", "src/runtime/runtime.h", - "src/security/external-pointer-table.cc", - "src/security/vm-cage.cc", - "src/security/caged-pointer-inl.h", - "src/security/caged-pointer.h", - "src/security/external-pointer-inl.h", - "src/security/external-pointer-table.h", - "src/security/external-pointer.h", - "src/security/vm-cage.h", + "src/sandbox/external-pointer-inl.h", + "src/sandbox/external-pointer.h", + "src/sandbox/external-pointer-table.cc", + "src/sandbox/external-pointer-table-inl.h", + "src/sandbox/external-pointer-table.h", + "src/sandbox/sandbox.cc", + "src/sandbox/sandbox.h", + "src/sandbox/sandboxed-pointer-inl.h", + "src/sandbox/sandboxed-pointer.h", "src/base/sanitizer/asan.h", "src/base/sanitizer/lsan-page-allocator.cc", "src/base/sanitizer/lsan-page-allocator.h", @@ -1964,8 +1987,6 @@ filegroup( "src/snapshot/shared-heap-serializer.cc", "src/snapshot/snapshot-compression.cc", "src/snapshot/snapshot-compression.h", - "third_party/zlib/google/compression_utils_portable.h", - "third_party/zlib/google/compression_utils_portable.cc", "src/snapshot/snapshot-data.cc", "src/snapshot/snapshot-data.h", "src/snapshot/snapshot-source-sink.cc", @@ -2014,8 +2035,8 @@ filegroup( "src/tracing/traced-value.h", "src/tracing/tracing-category-observer.cc", "src/tracing/tracing-category-observer.h", - "src/trap-handler/handler-inside.cc", "src/trap-handler/handler-inside-posix.h", + "src/trap-handler/handler-inside.cc", "src/trap-handler/handler-outside.cc", "src/trap-handler/handler-shared.cc", "src/trap-handler/trap-handler-internal.h", @@ -2037,7 +2058,6 @@ filegroup( "src/utils/memcopy.h", "src/utils/ostreams.cc", "src/utils/ostreams.h", - "src/utils/pointer-with-payload.h", "src/utils/scoped-list.h", "src/utils/utils-inl.h", "src/utils/utils.cc", @@ -2070,7 +2090,7 @@ filegroup( "src/heap/third-party/heap-api.h", "src/heap/third-party/heap-api-stub.cc", ] + select({ - "@config//:v8_target_ia32": [ + "@v8//bazel/config:v8_target_ia32": [ "src/baseline/ia32/baseline-assembler-ia32-inl.h", "src/baseline/ia32/baseline-compiler-ia32-inl.h", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", @@ -2080,11 +2100,13 @@ filegroup( "src/codegen/ia32/assembler-ia32.cc", "src/codegen/ia32/assembler-ia32.h", "src/codegen/ia32/constants-ia32.h", + "src/codegen/ia32/fma-instr.h", "src/codegen/ia32/interface-descriptors-ia32-inl.h", "src/codegen/ia32/sse-instr.h", "src/codegen/ia32/macro-assembler-ia32.cc", "src/codegen/ia32/macro-assembler-ia32.h", "src/codegen/ia32/register-ia32.h", + "src/codegen/ia32/reglist-ia32.h", "src/compiler/backend/ia32/code-generator-ia32.cc", "src/compiler/backend/ia32/instruction-codes-ia32.h", "src/compiler/backend/ia32/instruction-scheduler-ia32.cc", @@ -2098,7 +2120,7 @@ filegroup( "src/regexp/ia32/regexp-macro-assembler-ia32.h", "src/wasm/baseline/ia32/liftoff-assembler-ia32.h", ], - "@config//:v8_target_x64": [ + "@v8//bazel/config:v8_target_x64": [ "src/baseline/x64/baseline-assembler-x64-inl.h", "src/baseline/x64/baseline-compiler-x64-inl.h", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", @@ -2114,6 +2136,7 @@ filegroup( "src/codegen/x64/macro-assembler-x64.cc", "src/codegen/x64/macro-assembler-x64.h", "src/codegen/x64/register-x64.h", + "src/codegen/x64/reglist-x64.h", "src/compiler/backend/x64/code-generator-x64.cc", "src/compiler/backend/x64/instruction-codes-x64.h", "src/compiler/backend/x64/instruction-scheduler-x64.cc", @@ -2130,7 +2153,7 @@ filegroup( "src/regexp/x64/regexp-macro-assembler-x64.h", "src/wasm/baseline/x64/liftoff-assembler-x64.h", ], - "@config//:v8_target_arm": [ + "@v8//bazel/config:v8_target_arm": [ "src/baseline/arm/baseline-assembler-arm-inl.h", "src/baseline/arm/baseline-compiler-arm-inl.h", "src/codegen/arm/assembler-arm-inl.h", @@ -2143,6 +2166,7 @@ filegroup( "src/codegen/arm/macro-assembler-arm.cc", "src/codegen/arm/macro-assembler-arm.h", "src/codegen/arm/register-arm.h", + "src/codegen/arm/reglist-arm.h", "src/compiler/backend/arm/code-generator-arm.cc", "src/compiler/backend/arm/instruction-codes-arm.h", "src/compiler/backend/arm/instruction-scheduler-arm.cc", @@ -2161,7 +2185,7 @@ filegroup( "src/regexp/arm/regexp-macro-assembler-arm.h", "src/wasm/baseline/arm/liftoff-assembler-arm.h", ], - "@config//:v8_target_arm64": [ + "@v8//bazel/config:v8_target_arm64": [ "src/baseline/arm64/baseline-assembler-arm64-inl.h", "src/baseline/arm64/baseline-compiler-arm64-inl.h", "src/codegen/arm64/assembler-arm64-inl.h", @@ -2181,6 +2205,7 @@ filegroup( "src/codegen/arm64/macro-assembler-arm64.h", "src/codegen/arm64/register-arm64.cc", "src/codegen/arm64/register-arm64.h", + "src/codegen/arm64/reglist-arm64.h", "src/codegen/arm64/utils-arm64.cc", "src/codegen/arm64/utils-arm64.h", "src/compiler/backend/arm64/code-generator-arm64.cc", @@ -2204,31 +2229,121 @@ filegroup( "src/regexp/arm64/regexp-macro-assembler-arm64.h", "src/wasm/baseline/arm64/liftoff-assembler-arm64.h", ], + "@v8//bazel/config:v8_target_s390x": [ + "src/baseline/s390/baseline-assembler-s390-inl.h", + "src/baseline/s390/baseline-compiler-s390-inl.h", + "src/codegen/s390/assembler-s390.cc", + "src/codegen/s390/assembler-s390.h", + "src/codegen/s390/assembler-s390-inl.h", + "src/codegen/s390/constants-s390.cc", + "src/codegen/s390/constants-s390.h", + "src/codegen/s390/cpu-s390.cc", + "src/codegen/s390/interface-descriptors-s390-inl.h", + "src/codegen/s390/macro-assembler-s390.cc", + "src/codegen/s390/macro-assembler-s390.h", + "src/codegen/s390/register-s390.h", + "src/codegen/s390/reglist-s390.h", + "src/compiler/backend/s390/code-generator-s390.cc", + "src/compiler/backend/s390/instruction-codes-s390.h", + "src/compiler/backend/s390/instruction-scheduler-s390.cc", + "src/compiler/backend/s390/instruction-selector-s390.cc", + "src/compiler/backend/s390/unwinding-info-writer-s390.cc", + "src/compiler/backend/s390/unwinding-info-writer-s390.h", + "src/deoptimizer/s390/deoptimizer-s390.cc", + "src/diagnostics/s390/disasm-s390.cc", + "src/diagnostics/s390/eh-frame-s390.cc", + "src/diagnostics/s390/unwinder-s390.cc", + "src/execution/s390/frame-constants-s390.cc", + "src/execution/s390/frame-constants-s390.h", + "src/execution/s390/simulator-s390.cc", + "src/execution/s390/simulator-s390.h", + "src/regexp/s390/regexp-macro-assembler-s390.cc", + "src/regexp/s390/regexp-macro-assembler-s390.h", + "src/wasm/baseline/s390/liftoff-assembler-s390.h", + ], + "@v8//bazel/config:v8_target_riscv64": [ + "src/baseline/riscv64/baseline-assembler-riscv64-inl.h", + "src/baseline/riscv64/baseline-compiler-riscv64-inl.h", + "src/codegen/riscv64/assembler-riscv64.cc", + "src/codegen/riscv64/assembler-riscv64.h", + "src/codegen/riscv64/assembler-riscv64-inl.h", + "src/codegen/riscv64/constants-riscv64.cc", + "src/codegen/riscv64/constants-riscv64.h", + "src/codegen/riscv64/cpu-riscv64.cc", + "src/codegen/riscv64/interface-descriptors-riscv64-inl.h", + "src/codegen/riscv64/macro-assembler-riscv64.cc", + "src/codegen/riscv64/macro-assembler-riscv64.h", + "src/codegen/riscv64/register-riscv64.h", + "src/codegen/riscv64/reglist-riscv64.h", + "src/compiler/backend/riscv64/code-generator-riscv64.cc", + "src/compiler/backend/riscv64/instruction-codes-riscv64.h", + "src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc", + "src/compiler/backend/riscv64/instruction-selector-riscv64.cc", + "src/deoptimizer/riscv64/deoptimizer-riscv64.cc", + "src/diagnostics/riscv64/disasm-riscv64.cc", + "src/diagnostics/riscv64/unwinder-riscv64.cc", + "src/execution/riscv64/frame-constants-riscv64.cc", + "src/execution/riscv64/frame-constants-riscv64.h", + "src/execution/riscv64/simulator-riscv64.cc", + "src/execution/riscv64/simulator-riscv64.h", + "src/regexp/riscv64/regexp-macro-assembler-riscv64.cc", + "src/regexp/riscv64/regexp-macro-assembler-riscv64.h", + "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h", + ], + "@v8//bazel/config:v8_target_ppc64le": [ + "src/baseline/ppc/baseline-assembler-ppc-inl.h", + "src/baseline/ppc/baseline-compiler-ppc-inl.h", + "src/codegen/ppc/assembler-ppc.cc", + "src/codegen/ppc/assembler-ppc.h", + "src/codegen/ppc/assembler-ppc-inl.h", + "src/codegen/ppc/constants-ppc.cc", + "src/codegen/ppc/constants-ppc.h", + "src/codegen/ppc/cpu-ppc.cc", + "src/codegen/ppc/interface-descriptors-ppc-inl.h", + "src/codegen/ppc/macro-assembler-ppc.cc", + "src/codegen/ppc/macro-assembler-ppc.h", + "src/codegen/ppc/register-ppc.h", + "src/codegen/ppc/reglist-ppc.h", + "src/compiler/backend/ppc/code-generator-ppc.cc", + "src/compiler/backend/ppc/instruction-codes-ppc.h", + "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", + "src/compiler/backend/ppc/instruction-selector-ppc.cc", + "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", + "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", + "src/deoptimizer/ppc/deoptimizer-ppc.cc", + "src/diagnostics/ppc/disasm-ppc.cc", + "src/diagnostics/ppc/eh-frame-ppc.cc", + "src/diagnostics/ppc/unwinder-ppc.cc", + "src/execution/ppc/frame-constants-ppc.cc", + "src/execution/ppc/frame-constants-ppc.h", + "src/execution/ppc/simulator-ppc.cc", + "src/execution/ppc/simulator-ppc.h", + "src/regexp/ppc/regexp-macro-assembler-ppc.cc", + "src/regexp/ppc/regexp-macro-assembler-ppc.h", + "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", + ], }) + select({ # Only for x64 builds and for arm64 with x64 host simulator. - "@config//:is_posix_x64": [ + "@v8//bazel/config:is_non_android_posix_x64": [ "src/trap-handler/handler-inside-posix.cc", "src/trap-handler/handler-outside-posix.cc", ], "//conditions:default": [], }) + select({ - "@config//:v8_arm64_simulator": [ + "@v8//bazel/config:v8_arm64_simulator": [ "src/trap-handler/trap-handler-simulator.h", "src/trap-handler/handler-outside-simulator.cc", ], "//conditions:default": [], }) + select({ - "@config//:is_windows": [ + "@v8//bazel/config:is_windows": [ "src/trap-handler/handler-inside-win.cc", - "src/trap-handler/handler-outside-win.cc", "src/trap-handler/handler-inside-win.h", - # Needed on windows to work around https://github.com/bazelbuild/bazel/issues/6337 - "third_party/zlib/zlib.h", - "third_party/zlib/zconf.h", + "src/trap-handler/handler-outside-win.cc", ], "//conditions:default": [], }) + select({ - "@config//:is_windows_64bit": [ + "@v8//bazel/config:is_windows_64bit": [ "src/diagnostics/unwinding-info-win64.cc", "src/diagnostics/unwinding-info-win64.h", ], @@ -2247,6 +2362,7 @@ filegroup( "src/compiler/wasm-compiler.h", "src/compiler/wasm-escape-analysis.h", "src/compiler/wasm-inlining.h", + "src/compiler/wasm-loop-peeling.h", "src/debug/debug-wasm-objects.cc", "src/debug/debug-wasm-objects.h", "src/debug/debug-wasm-objects-inl.h", @@ -2512,8 +2628,6 @@ filegroup( "src/compiler/js-graph.h", "src/compiler/js-heap-broker.cc", "src/compiler/js-heap-broker.h", - "src/compiler/js-heap-copy-reducer.cc", - "src/compiler/js-heap-copy-reducer.h", "src/compiler/js-inlining.cc", "src/compiler/js-inlining.h", "src/compiler/js-inlining-heuristic.cc", @@ -2603,6 +2717,8 @@ filegroup( "src/compiler/select-lowering.h", "src/compiler/simplified-lowering.cc", "src/compiler/simplified-lowering.h", + "src/compiler/simplified-lowering-verifier.cc", + "src/compiler/simplified-lowering-verifier.h", "src/compiler/simplified-operator.cc", "src/compiler/simplified-operator.h", "src/compiler/simplified-operator-reducer.cc", @@ -2632,6 +2748,7 @@ filegroup( ":is_v8_enable_webassembly": [ "src/compiler/int64-lowering.cc", "src/compiler/wasm-compiler.cc", + "src/compiler/wasm-loop-peeling.cc", "src/compiler/wasm-escape-analysis.cc", "src/compiler/wasm-inlining.cc", ], @@ -2680,9 +2797,11 @@ filegroup( "src/builtins/builtins-proxy-gen.h", "src/builtins/builtins-regexp-gen.cc", "src/builtins/builtins-regexp-gen.h", + "src/builtins/builtins-shadowrealm-gen.cc", "src/builtins/builtins-sharedarraybuffer-gen.cc", "src/builtins/builtins-string-gen.cc", "src/builtins/builtins-string-gen.h", + "src/builtins/builtins-temporal-gen.cc", "src/builtins/builtins-typed-array-gen.cc", "src/builtins/builtins-typed-array-gen.h", "src/builtins/builtins-utils-gen.h", @@ -2709,11 +2828,16 @@ filegroup( "src/interpreter/interpreter-generator.h", "src/interpreter/interpreter-intrinsics-generator.cc", "src/interpreter/interpreter-intrinsics-generator.h", + "src/numbers/integer-literal-inl.h", + "src/numbers/integer-literal.h", ] + select({ - "@config//:v8_target_ia32": ["src/builtins/ia32/builtins-ia32.cc"], - "@config//:v8_target_x64": ["src/builtins/x64/builtins-x64.cc"], - "@config//:v8_target_arm": ["src/builtins/arm/builtins-arm.cc"], - "@config//:v8_target_arm64": ["src/builtins/arm64/builtins-arm64.cc"], + "@v8//bazel/config:v8_target_ia32": ["src/builtins/ia32/builtins-ia32.cc"], + "@v8//bazel/config:v8_target_x64": ["src/builtins/x64/builtins-x64.cc"], + "@v8//bazel/config:v8_target_arm": ["src/builtins/arm/builtins-arm.cc"], + "@v8//bazel/config:v8_target_arm64": ["src/builtins/arm64/builtins-arm64.cc"], + "@v8//bazel/config:v8_target_s390x": ["src/builtins/s390/builtins-s390.cc"], + "@v8//bazel/config:v8_target_riscv64": ["src/builtins/riscv64/builtins-riscv64.cc"], + "@v8//bazel/config:v8_target_ppc64le": ["src/builtins/ppc/builtins-ppc.cc"], }) + select({ ":is_v8_enable_webassembly": [ "src/builtins/builtins-wasm-gen.cc", @@ -2744,6 +2868,7 @@ filegroup( "src/heap/cppgc/gc-info-table.h", "src/heap/cppgc/gc-invoker.cc", "src/heap/cppgc/gc-invoker.h", + "src/heap/cppgc/globals.h", "src/heap/cppgc/heap.cc", "src/heap/cppgc/heap.h", "src/heap/cppgc/heap-base.cc", @@ -2800,6 +2925,8 @@ filegroup( "src/heap/cppgc/process-heap-statistics.h", "src/heap/cppgc/raw-heap.cc", "src/heap/cppgc/raw-heap.h", + "src/heap/cppgc/remembered-set.cc", + "src/heap/cppgc/remembered-set.h", "src/heap/cppgc/source-location.cc", "src/heap/cppgc/stats-collector.cc", "src/heap/cppgc/stats-collector.h", @@ -2819,24 +2946,28 @@ filegroup( ) filegroup( - name = "v8_cppgc_shared_files", + name = "v8_heap_base_files", srcs = [ + "src/heap/base/active-system-pages.cc", + "src/heap/base/active-system-pages.h", "src/heap/base/stack.cc", "src/heap/base/stack.h", "src/heap/base/worklist.cc", "src/heap/base/worklist.h", - "src/heap/cppgc/globals.h", ] + select({ # Note these cannot be v8_target_is_* selects because these contain # inline assembly that runs inside the executable. Since these are # linked directly into mksnapshot, they must use the actual target cpu. - "@config//:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"], - "@config//:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"], - "@config//:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"], - "@config//:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"], - "@config//:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.S"], - "@config//:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.S"], - "@config//:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"], + "@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"], + "@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"], + "@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"], + "@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"], + "@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/push_registers_asm.cc"], + "@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/push_registers_asm.cc"], + "@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/push_registers_asm.cc"], + "@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.S"], + "@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.S"], + "@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"], }), ) @@ -2977,7 +3108,6 @@ filegroup( "third_party/inspector_protocol/crdtp/protocol_core.h", "third_party/inspector_protocol/crdtp/serializable.cc", "third_party/inspector_protocol/crdtp/serializable.h", - "third_party/inspector_protocol/crdtp/serializer_traits.h", "third_party/inspector_protocol/crdtp/span.cc", "third_party/inspector_protocol/crdtp/span.h", "third_party/inspector_protocol/crdtp/status.cc", @@ -2990,16 +3120,15 @@ filegroup( srcs = [ "src/init/setup-isolate-deserialize.cc", ] + select({ - "@config//:v8_target_arm": [ + "@v8//bazel/config:v8_target_arm": [ "google3/snapshots/arm/noicu/embedded.S", "google3/snapshots/arm/noicu/snapshot.cc", ], - "@config//:v8_target_ia32": [ + "@v8//bazel/config:v8_target_ia32": [ "google3/snapshots/ia32/noicu/embedded.S", "google3/snapshots/ia32/noicu/snapshot.cc", ], - "@config//:v8_target_arm64": [":noicu/generated_snapshot_files"], - "@config//:v8_target_x64": [":noicu/generated_snapshot_files"], + "//conditions:default": [":noicu/generated_snapshot_files"], }), ) @@ -3008,16 +3137,15 @@ filegroup( srcs = [ "src/init/setup-isolate-deserialize.cc", ] + select({ - "@config//:v8_target_arm": [ + "@v8//bazel/config:v8_target_arm": [ "google3/snapshots/arm/icu/embedded.S", "google3/snapshots/arm/icu/snapshot.cc", ], - "@config//:v8_target_ia32": [ + "@v8//bazel/config:v8_target_ia32": [ "google3/snapshots/ia32/icu/embedded.S", "google3/snapshots/ia32/icu/snapshot.cc", ], - "@config//:v8_target_arm64": [":icu/generated_snapshot_files"], - "@config//:v8_target_x64": [":icu/generated_snapshot_files"], + "//conditions:default": [":icu/generated_snapshot_files"], }), ) @@ -3048,7 +3176,7 @@ v8_torque( ":is_v8_annotate_torque_ir": ["-annotate-ir"], "//conditions:default": [], }) + select({ - "@config//:v8_target_is_32_bits": ["-m32"], + "@v8//bazel/config:v8_target_is_32_bits": ["-m32"], "//conditions:default": [], }), extras = [ @@ -3077,9 +3205,39 @@ v8_torque( noicu_srcs = [":noicu/torque_files"], ) +py_binary( + name = "code_generator", + srcs = [ + "third_party/inspector_protocol/code_generator.py", + "third_party/inspector_protocol/pdl.py", + ], + data = [ + "third_party/inspector_protocol/lib/Forward_h.template", + "third_party/inspector_protocol/lib/Object_cpp.template", + "third_party/inspector_protocol/lib/Object_h.template", + "third_party/inspector_protocol/lib/Protocol_cpp.template", + "third_party/inspector_protocol/lib/ValueConversions_cpp.template", + "third_party/inspector_protocol/lib/ValueConversions_h.template", + "third_party/inspector_protocol/lib/Values_cpp.template", + "third_party/inspector_protocol/lib/Values_h.template", + "third_party/inspector_protocol/lib/base_string_adapter_cc.template", + "third_party/inspector_protocol/lib/base_string_adapter_h.template", + "third_party/inspector_protocol/templates/Exported_h.template", + "third_party/inspector_protocol/templates/Imported_h.template", + "third_party/inspector_protocol/templates/TypeBuilder_cpp.template", + "third_party/inspector_protocol/templates/TypeBuilder_h.template", + ], + deps = [ + requirement("jinja2"), + ], +) + genrule( name = "generated_inspector_files", - srcs = ["include/js_protocol.pdl"], + srcs = [ + "include/js_protocol.pdl", + "src/inspector/inspector_protocol_config.json", + ], outs = [ "include/inspector/Debugger.h", "include/inspector/Runtime.h", @@ -3100,10 +3258,16 @@ genrule( "src/inspector/protocol/Schema.cpp", "src/inspector/protocol/Schema.h", ], - cmd = "bazel/generate-inspector-files.sh $(@D)", - cmd_bat = "bazel\\generate-inspector-files.cmd $(@D)", local = 1, + cmd = "$(location :code_generator) --jinja_dir . \ + --inspector_protocol_dir third_party/inspector_protocol \ + --config $(location :src/inspector/inspector_protocol_config.json) \ + --config_value protocol.path=$(location :include/js_protocol.pdl) \ + --output_base $(@D)/src/inspector", message = "Generating inspector files", + tools = [ + ":code_generator", + ], ) filegroup( @@ -3216,7 +3380,7 @@ cc_library( ":torque_base_files", ], copts = select({ - "@config//:is_posix": [ "-fexceptions" ], + "@v8//bazel/config:is_posix": ["-fexceptions"], "//conditions:default": [], }), features = ["-use_header_modules"], @@ -3234,7 +3398,7 @@ v8_library( ], icu_deps = [ ":icu/generated_torque_headers", - "@icu", + "//external:icu", ], icu_srcs = [ ":generated_regexp_special_case", @@ -3249,23 +3413,30 @@ v8_library( ], deps = [ ":v8_libbase", - "@zlib", + "//external:base_trace_event_common", + "//external:zlib", + "//external:zlib_compression_utils", ], ) v8_library( name = "v8", srcs = [":v8_inspector_files"], + hdrs = [":public_header_files"], icu_deps = [":icu/v8_libshared"], icu_srcs = [":icu/snapshot_files"], noicu_deps = [":noicu/v8_libshared"], noicu_srcs = [":noicu/snapshot_files"], + visibility = ["//visibility:public"], ) # TODO(victorgomes): Check if v8_enable_webassembly is true. v8_library( name = "wee8", srcs = [":wee8_files"], + hdrs = [":public_wasm_c_api_header_files"], + strip_include_prefix = "third_party", + visibility = ["//visibility:public"], deps = [":noicu/v8"], ) @@ -3312,7 +3483,7 @@ v8_binary( "UNISTR_FROM_CHAR_EXPLICIT=", ], deps = [ - "@icu", + "//external:icu", ], ) @@ -3323,12 +3494,12 @@ v8_binary( ":torque_base_files", ], copts = select({ - "@config//:is_posix": [ "-fexceptions" ], + "@v8//bazel/config:is_posix": ["-fexceptions"], "//conditions:default": [], }), features = ["-use_header_modules"], linkopts = select({ - "@config//:is_android": ["-llog"], + "@v8//bazel/config:is_android": ["-llog"], "//conditions:default": [], }), deps = ["v8_libbase"], @@ -3339,7 +3510,7 @@ v8_binary( srcs = [":mksnapshot_files"], icu_deps = [":icu/v8_libshared"], linkopts = select({ - "@config//:is_android": ["-llog"], + "@v8//bazel/config:is_android": ["-llog"], "//conditions:default": [], }), noicu_deps = [":v8_libshared_noicu"], diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 19731feebe82b4..7b19ee86d8d9b5 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -83,7 +83,7 @@ declare_args() { v8_enable_hugepage = false # Sets -dENABLE_HANDLE_ZAPPING. - v8_enable_handle_zapping = is_debug + v8_enable_handle_zapping = !is_on_release_branch || is_debug # Enable slow dchecks. v8_enable_slow_dchecks = false @@ -198,6 +198,10 @@ declare_args() { # Sets -dV8_EXTERNAL_CODE_SPACE v8_enable_external_code_space = "" + # Enable the Maglev compiler. + # Sets -dV8_ENABLE_MAGLEV + v8_enable_maglev = "" + # With post mortem support enabled, metadata is embedded into libv8 that # describes various parameters of the VM for use by debuggers. See # tools/gen-postmortem-metadata.py for details. @@ -225,9 +229,6 @@ declare_args() { (is_linux || is_chromeos || is_mac)) || (v8_current_cpu == "ppc64" && (is_linux || is_chromeos)) - # Enable minor mark compact. - v8_enable_minor_mc = true - # Check that each header can be included in isolation (requires also # setting the "check_v8_header_includes" gclient variable to run a # specific hook). @@ -281,9 +282,6 @@ declare_args() { # ARM64. v8_control_flow_integrity = false - # Enable object names in cppgc for debug purposes. - cppgc_enable_object_names = false - # Enable heap reservation of size 4GB. Only possible for 64bit archs. cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64" || @@ -295,21 +293,24 @@ declare_args() { # Enable allocations during prefinalizer invocations. cppgc_allow_allocations_in_prefinalizers = false - # Enable young generation in cppgc. - cppgc_enable_young_generation = false - # Enable V8 zone compression experimental feature. # Sets -DV8_COMPRESS_ZONES. v8_enable_zone_compression = "" - # Enable V8 heap sandbox experimental feature. - # Sets -DV8_HEAP_SANDBOX. - v8_enable_heap_sandbox = "" + # Enable the experimental V8 sandbox. + # Sets -DV8_SANDBOX. + v8_enable_sandbox = false + + # Enable external pointer sandboxing. Requires v8_enable_sandbox. + # Sets -DV8_SANDBOXED_EXTERNAL_POINRTERS. + v8_enable_sandboxed_external_pointers = false - # Enable the Virtual Memory Cage, which contains the pointer compression cage - # as well as ArrayBuffer BackingStores and WASM memory cages. - # Sets -DV8_VIRTUAL_MEMORY_CAGE. - v8_enable_virtual_memory_cage = "" + # Enable sandboxed pointers. Requires v8_enable_sandbox. + # Sets -DV8_SANDBOXED_POINTERS. + v8_enable_sandboxed_pointers = false + + # Enable all available sandbox features. Implies v8_enable_sandbox. + v8_enable_sandbox_future = false # Experimental feature for collecting per-class zone memory stats. # Requires use_rtti = true @@ -333,12 +334,15 @@ declare_args() { v8_enable_map_packing = false # Allow for JS promise hooks (instead of just C++). - v8_allow_javascript_in_promise_hooks = false + v8_enable_javascript_promise_hooks = false # Enable allocation folding globally (sets -dV8_ALLOCATION_FOLDING). # When it's disabled, the --turbo-allocation-folding runtime flag will be ignored. v8_enable_allocation_folding = true + # Enable runtime verification of heap snapshots produced for devtools. + v8_enable_heap_snapshot_verify = "" + # Enable global allocation site tracking. v8_allocation_site_tracking = true @@ -348,11 +352,16 @@ declare_args() { # This is only used by nodejs. v8_scriptormodule_legacy_lifetime = false - # If enabled, the receiver is always included in the actual and formal - # parameter count of function with JS linkage. - # TODO(v8:11112): Remove once all architectures support the flag and it is - # enabled unconditionally. - v8_include_receiver_in_argc = true + # Change code emission and runtime features to be CET shadow-stack compliant + # (incomplete and experimental). + v8_enable_cet_shadow_stack = false + + # Get VMEX priviledge at startup. + # It allows to run V8 without "deprecated-ambient-replace-as-executable". + # Sets -DV8_USE_VMEX_RESOURCE. + # TODO(victorgomes): Remove this flag once Chormium no longer needs + # the deprecated feature. + v8_fuchsia_use_vmex_resource = is_fuchsia && !build_with_chromium } # Derived defaults. @@ -377,6 +386,10 @@ if (v8_enable_test_features == "") { if (v8_enable_v8_checks == "") { v8_enable_v8_checks = v8_enable_debugging_features } +if (v8_enable_heap_snapshot_verify == "") { + v8_enable_heap_snapshot_verify = + v8_enable_debugging_features || dcheck_always_on +} if (v8_enable_snapshot_code_comments) { assert(v8_code_comments == true || v8_code_comments == "", "v8_enable_snapshot_code_comments conflicts with v8_code_comments.") @@ -403,18 +416,21 @@ if (v8_enable_fast_torque == "") { if (v8_enable_zone_compression == "") { v8_enable_zone_compression = false } -if (v8_enable_heap_sandbox == "") { - v8_enable_heap_sandbox = false -} -if (v8_enable_virtual_memory_cage == "") { - v8_enable_virtual_memory_cage = v8_enable_heap_sandbox -} if (v8_enable_short_builtin_calls == "") { v8_enable_short_builtin_calls = v8_current_cpu == "x64" || (!is_android && v8_current_cpu == "arm64") } if (v8_enable_external_code_space == "") { - v8_enable_external_code_space = false + # Can't use !is_android here, because Torque toolchain is affected by + # the value of this flag but actually runs on the host side. + v8_enable_external_code_space = + v8_enable_pointer_compression && + (v8_current_cpu == "x64" || + (target_os != "android" && target_os != "fuchsia" && + v8_current_cpu == "arm64")) +} +if (v8_enable_maglev == "") { + v8_enable_maglev = v8_current_cpu == "x64" && v8_enable_pointer_compression } if (v8_enable_single_generation == "") { v8_enable_single_generation = v8_disable_write_barriers @@ -452,7 +468,16 @@ if (v8_multi_arch_build && "clang_x64_pointer_compression") { v8_enable_pointer_compression = !v8_enable_pointer_compression v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression + v8_enable_external_code_space = v8_enable_pointer_compression +} + +# Check if it is a Chromium build and activate PAC/BTI if needed. +# TODO(cavalcantii): have a single point of integration with PAC/BTI flags. +if (build_with_chromium && v8_current_cpu == "arm64" && + arm_control_flow_integrity == "standard") { + v8_control_flow_integrity = true } + if (v8_enable_short_builtin_calls && ((!v8_enable_pointer_compression && v8_current_cpu != "x64") || v8_control_flow_integrity)) { @@ -467,15 +492,17 @@ if (v8_enable_shared_ro_heap == "") { v8_enable_pointer_compression_shared_cage } -# Check if it is a Chromium build and activate PAC/BTI if needed. -if (build_with_chromium && v8_current_cpu == "arm64" && - arm_control_flow_integrity == "standard") { - v8_control_flow_integrity = true +# Enable the v8 sandbox on 64-bit Chromium builds. +if (build_with_chromium && v8_enable_pointer_compression_shared_cage && + v8_enable_external_code_space) { + v8_enable_sandbox = true } -# Enable the virtual memory cage on 64-bit Chromium builds. -if (build_with_chromium && v8_enable_pointer_compression_shared_cage) { - v8_enable_virtual_memory_cage = true +# Enable all available sandbox features if sandbox future is enabled. +if (v8_enable_sandbox_future) { + v8_enable_sandboxed_pointers = true + v8_enable_sandboxed_external_pointers = true + v8_enable_sandbox = true } assert(!v8_disable_write_barriers || v8_enable_single_generation, @@ -500,18 +527,17 @@ assert(!v8_enable_map_packing || v8_current_cpu == "x64", assert(!v8_enable_external_code_space || v8_enable_pointer_compression, "External code space feature requires pointer compression") -assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression, - "V8 Heap Sandbox requires pointer compression") +assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage, + "The sandbox requires the shared pointer compression cage") -assert(!v8_enable_heap_sandbox || !v8_enable_external_code_space, - "V8 Heap Sandbox is not compatible with external code space YET") +assert(!v8_enable_sandbox || v8_enable_external_code_space, + "The sandbox requires the external code space") -assert(!v8_enable_heap_sandbox || v8_enable_virtual_memory_cage, - "The Heap Sandbox requires the virtual memory cage") +assert(!v8_enable_sandboxed_pointers || v8_enable_sandbox, + "Sandboxed pointers require the sandbox") -assert( - !v8_enable_virtual_memory_cage || v8_enable_pointer_compression_shared_cage, - "V8 VirtualMemoryCage requires the shared pointer compression cage") +assert(!v8_enable_sandboxed_external_pointers || v8_enable_sandbox, + "Sandboxed external pointers require the sandbox") assert( !v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression, @@ -541,6 +567,10 @@ if (v8_enable_single_generation == true) { assert(!v8_enable_conservative_stack_scanning || v8_enable_single_generation, "Conservative stack scanning requires single generation") +if (v8_fuchsia_use_vmex_resource) { + assert(target_os == "fuchsia", "VMEX resource only available on Fuchsia") +} + v8_random_seed = "314159265" v8_toolset_for_shell = "host" @@ -673,8 +703,9 @@ external_v8_defines = [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE", "V8_31BIT_SMIS_ON_64BIT_ARCH", "V8_COMPRESS_ZONES", - "V8_HEAP_SANDBOX", - "V8_VIRTUAL_MEMORY_CAGE", + "V8_SANDBOX", + "V8_SANDBOXED_POINTERS", + "V8_SANDBOXED_EXTERNAL_POINTERS", "V8_DEPRECATION_WARNINGS", "V8_IMMINENT_DEPRECATION_WARNINGS", "V8_NO_ARGUMENTS_ADAPTOR", @@ -702,11 +733,14 @@ if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) { if (v8_enable_zone_compression) { enabled_external_v8_defines += [ "V8_COMPRESS_ZONES" ] } -if (v8_enable_heap_sandbox) { - enabled_external_v8_defines += [ "V8_HEAP_SANDBOX" ] +if (v8_enable_sandbox) { + enabled_external_v8_defines += [ "V8_SANDBOX" ] +} +if (v8_enable_sandboxed_pointers) { + enabled_external_v8_defines += [ "V8_SANDBOXED_POINTERS" ] } -if (v8_enable_virtual_memory_cage) { - enabled_external_v8_defines += [ "V8_VIRTUAL_MEMORY_CAGE" ] +if (v8_enable_sandboxed_external_pointers) { + enabled_external_v8_defines += [ "V8_SANDBOXED_EXTERNAL_POINTERS" ] } if (v8_deprecation_warnings) { enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ] @@ -810,10 +844,6 @@ config("features") { defines += [ "V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=${v8_typed_array_max_size_in_heap}" ] - assert( - !v8_enable_raw_heap_snapshots, - "This flag is deprecated and is now available through the inspector interface as an argument to profiler's method `takeHeapSnapshot`. Consider using blink's flag `enable_additional_blink_object_names` to get better naming of internal objects.") - if (v8_enable_future) { defines += [ "V8_ENABLE_FUTURE" ] } @@ -832,9 +862,6 @@ config("features") { if (v8_enable_hugepage) { defines += [ "ENABLE_HUGEPAGE" ] } - if (v8_enable_minor_mc) { - defines += [ "ENABLE_MINOR_MC" ] - } if (v8_enable_object_print) { defines += [ "OBJECT_PRINT" ] } @@ -870,6 +897,9 @@ config("features") { if (v8_enable_debug_code) { defines += [ "V8_ENABLE_DEBUG_CODE" ] } + if (v8_enable_heap_snapshot_verify) { + defines += [ "V8_ENABLE_HEAP_SNAPSHOT_VERIFY" ] + } if (v8_enable_snapshot_native_code_counters) { defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ] } @@ -915,6 +945,9 @@ config("features") { if (v8_control_flow_integrity) { defines += [ "V8_ENABLE_CONTROL_FLOW_INTEGRITY" ] } + if (v8_enable_cet_shadow_stack) { + defines += [ "V8_ENABLE_CET_SHADOW_STACK" ] + } if (v8_enable_wasm_gdb_remote_debugging) { defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ] } @@ -930,6 +963,9 @@ config("features") { if (v8_enable_external_code_space) { defines += [ "V8_EXTERNAL_CODE_SPACE" ] } + if (v8_enable_maglev) { + defines += [ "V8_ENABLE_MAGLEV" ] + } if (v8_enable_swiss_name_dictionary) { defines += [ "V8_ENABLE_SWISS_NAME_DICTIONARY" ] } @@ -945,8 +981,8 @@ config("features") { if (v8_dict_property_const_tracking) { defines += [ "V8_DICT_PROPERTY_CONST_TRACKING" ] } - if (v8_allow_javascript_in_promise_hooks) { - defines += [ "V8_ALLOW_JAVASCRIPT_IN_PROMISE_HOOKS" ] + if (v8_enable_javascript_promise_hooks) { + defines += [ "V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS" ] } if (v8_enable_allocation_folding) { defines += [ "V8_ALLOCATION_FOLDING" ] @@ -960,8 +996,8 @@ config("features") { if (v8_advanced_bigint_algorithms) { defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ] } - if (v8_include_receiver_in_argc) { - defines += [ "V8_INCLUDE_RECEIVER_IN_ARGC" ] + if (v8_fuchsia_use_vmex_resource) { + defines += [ "V8_USE_VMEX_RESOURCE" ] } } @@ -993,7 +1029,7 @@ config("toolchain") { ] } - # TODO(jochen): Add support for arm_test_noprobe. + # TODO(infra): Add support for arm_test_noprobe. if (current_cpu != "arm") { # These defines ares used for the ARM simulator. @@ -1006,11 +1042,17 @@ config("toolchain") { } if (v8_current_cpu == "arm64") { defines += [ "V8_TARGET_ARCH_ARM64" ] - if (v8_control_flow_integrity) { - # TODO(v8:10026): Enable this in src/build. - if (current_cpu == "arm64") { + if (current_cpu == "arm64") { + # This will enable PAC+BTI in code generation and static code. + if (v8_control_flow_integrity) { + # TODO(v8:10026): Enable this in src/build. cflags += [ "-mbranch-protection=standard" ] asmflags = [ "-mmark-bti-property" ] + } else if (build_with_chromium && arm_control_flow_integrity == "pac") { + # This should enable PAC only in C++ code (and no CFI in runtime + # generated code). For details, see crbug.com/919548. + cflags += [ "-mbranch-protection=pac-ret" ] + asmflags = [ "-mbranch-protection=pac-ret" ] } } } @@ -1055,7 +1097,7 @@ config("toolchain") { defines += [ "FPU_MODE_FP32" ] } - # TODO(jochen): Add support for mips_arch_variant rx and loongson. + # TODO(infra): Add support for mips_arch_variant rx and loongson. } if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") { @@ -1177,14 +1219,14 @@ config("toolchain") { defines += [ "V8_TARGET_OS_LINUX" ] } else if (target_os == "mac") { defines += [ "V8_HAVE_TARGET_OS" ] - defines += [ "V8_TARGET_OS_MACOSX" ] + defines += [ "V8_TARGET_OS_MACOS" ] } else if (target_os == "win") { defines += [ "V8_HAVE_TARGET_OS" ] defines += [ "V8_TARGET_OS_WIN" ] } - # TODO(jochen): Support v8_enable_prof on Windows. - # TODO(jochen): Add support for compiling with simulators. + # TODO(infra): Support v8_enable_prof on Windows. + # TODO(infra): Add support for compiling with simulators. if (v8_enable_debugging_features) { if ((is_linux || is_chromeos) && v8_enable_backtrace) { @@ -1716,6 +1758,7 @@ torque_files = [ "src/objects/api-callbacks.tq", "src/objects/arguments.tq", "src/objects/bigint.tq", + "src/objects/call-site-info.tq", "src/objects/cell.tq", "src/objects/code.tq", "src/objects/contexts.tq", @@ -1741,6 +1784,8 @@ torque_files = [ "src/objects/js-proxy.tq", "src/objects/js-regexp-string-iterator.tq", "src/objects/js-regexp.tq", + "src/objects/js-shadow-realms.tq", + "src/objects/js-struct.tq", "src/objects/js-temporal-objects.tq", "src/objects/js-weak-refs.tq", "src/objects/literal-objects.tq", @@ -1762,7 +1807,6 @@ torque_files = [ "src/objects/script.tq", "src/objects/shared-function-info.tq", "src/objects/source-text-module.tq", - "src/objects/stack-frame-info.tq", "src/objects/string.tq", "src/objects/struct.tq", "src/objects/swiss-hash-table-helpers.tq", @@ -2197,12 +2241,14 @@ action("v8_dump_build_config") { "v8_enable_pointer_compression=$v8_enable_pointer_compression", "v8_enable_pointer_compression_shared_cage=" + "$v8_enable_pointer_compression_shared_cage", - "v8_enable_virtual_memory_cage=$v8_enable_virtual_memory_cage", + "v8_enable_sandbox=$v8_enable_sandbox", + "v8_enable_shared_ro_heap=$v8_enable_shared_ro_heap", "v8_enable_third_party_heap=$v8_enable_third_party_heap", "v8_enable_webassembly=$v8_enable_webassembly", "v8_dict_property_const_tracking=$v8_dict_property_const_tracking", "v8_control_flow_integrity=$v8_control_flow_integrity", "v8_target_cpu=\"$v8_target_cpu\"", + "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", ] if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || @@ -2317,9 +2363,11 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-proxy-gen.h", "src/builtins/builtins-regexp-gen.cc", "src/builtins/builtins-regexp-gen.h", + "src/builtins/builtins-shadowrealm-gen.cc", "src/builtins/builtins-sharedarraybuffer-gen.cc", "src/builtins/builtins-string-gen.cc", "src/builtins/builtins-string-gen.h", + "src/builtins/builtins-temporal-gen.cc", "src/builtins/builtins-typed-array-gen.cc", "src/builtins/builtins-typed-array-gen.h", "src/builtins/builtins-utils-gen.h", @@ -2346,6 +2394,8 @@ v8_source_set("v8_initializers") { "src/interpreter/interpreter-generator.h", "src/interpreter/interpreter-intrinsics-generator.cc", "src/interpreter/interpreter-intrinsics-generator.h", + "src/numbers/integer-literal-inl.h", + "src/numbers/integer-literal.h", ] if (v8_enable_webassembly) { @@ -2679,8 +2729,10 @@ v8_header_set("v8_internal_headers") { "src/codegen/optimized-compilation-info.h", "src/codegen/pending-optimization-table.h", "src/codegen/register-arch.h", + "src/codegen/register-base.h", "src/codegen/register-configuration.h", "src/codegen/register.h", + "src/codegen/reglist-base.h", "src/codegen/reglist.h", "src/codegen/reloc-info.h", "src/codegen/safepoint-table.h", @@ -2693,10 +2745,12 @@ v8_header_set("v8_internal_headers") { "src/codegen/tnode.h", "src/codegen/turbo-assembler.h", "src/codegen/unoptimized-compilation-info.h", + "src/common/allow-deprecated.h", "src/common/assert-scope.h", "src/common/checks.h", "src/common/high-allocation-throughput-scope.h", "src/common/message-template.h", + "src/common/operation.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", "src/compiler-dispatcher/lazy-compile-dispatcher.h", @@ -2765,7 +2819,6 @@ v8_header_set("v8_internal_headers") { "src/compiler/js-generic-lowering.h", "src/compiler/js-graph.h", "src/compiler/js-heap-broker.h", - "src/compiler/js-heap-copy-reducer.h", "src/compiler/js-inlining-heuristic.h", "src/compiler/js-inlining.h", "src/compiler/js-intrinsic-lowering.h", @@ -2812,6 +2865,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/schedule.h", "src/compiler/scheduler.h", "src/compiler/select-lowering.h", + "src/compiler/simplified-lowering-verifier.h", "src/compiler/simplified-lowering.h", "src/compiler/simplified-operator-reducer.h", "src/compiler/simplified-operator.h", @@ -2860,6 +2914,7 @@ v8_header_set("v8_internal_headers") { "src/diagnostics/unwinder.h", "src/execution/arguments-inl.h", "src/execution/arguments.h", + "src/execution/clobber-registers.h", "src/execution/embedder-state.h", "src/execution/encoded-c-signature.h", "src/execution/execution.h", @@ -2880,13 +2935,13 @@ v8_header_set("v8_internal_headers") { "src/execution/pointer-authentication.h", "src/execution/protectors-inl.h", "src/execution/protectors.h", - "src/execution/runtime-profiler.h", "src/execution/shared-mutex-guard-if-off-thread.h", "src/execution/simulator-base.h", "src/execution/simulator.h", "src/execution/stack-guard.h", "src/execution/thread-id.h", "src/execution/thread-local-top.h", + "src/execution/tiering-manager.h", "src/execution/v8threads.h", "src/execution/vm-state-inl.h", "src/execution/vm-state.h", @@ -2906,6 +2961,7 @@ v8_header_set("v8_internal_headers") { "src/handles/maybe-handles.h", "src/handles/persistent-handles.h", "src/heap/allocation-observer.h", + "src/heap/allocation-result.h", "src/heap/allocation-stats.h", "src/heap/array-buffer-sweeper.h", "src/heap/barrier.h", @@ -2920,11 +2976,16 @@ v8_header_set("v8_internal_headers") { "src/heap/concurrent-allocator.h", "src/heap/concurrent-marking.h", "src/heap/cppgc-js/cpp-heap.h", + "src/heap/cppgc-js/cpp-marking-state-inl.h", + "src/heap/cppgc-js/cpp-marking-state.h", "src/heap/cppgc-js/cpp-snapshot.h", "src/heap/cppgc-js/unified-heap-marking-state.h", "src/heap/cppgc-js/unified-heap-marking-verifier.h", "src/heap/cppgc-js/unified-heap-marking-visitor.h", + "src/heap/embedder-tracing-inl.h", "src/heap/embedder-tracing.h", + "src/heap/evacuation-allocator-inl.h", + "src/heap/evacuation-allocator.h", "src/heap/factory-base-inl.h", "src/heap/factory-base.h", "src/heap/factory-inl.h", @@ -2934,6 +2995,8 @@ v8_header_set("v8_internal_headers") { "src/heap/free-list.h", "src/heap/gc-idle-time-handler.h", "src/heap/gc-tracer.h", + "src/heap/heap-allocator-inl.h", + "src/heap/heap-allocator.h", "src/heap/heap-controller.h", "src/heap/heap-inl.h", "src/heap/heap-layout-tracer.h", @@ -2949,8 +3012,6 @@ v8_header_set("v8_internal_headers") { "src/heap/large-spaces.h", "src/heap/linear-allocation-area.h", "src/heap/list.h", - "src/heap/local-allocator-inl.h", - "src/heap/local-allocator.h", "src/heap/local-factory-inl.h", "src/heap/local-factory.h", "src/heap/local-heap-inl.h", @@ -3066,6 +3127,8 @@ v8_header_set("v8_internal_headers") { "src/objects/backing-store.h", "src/objects/bigint-inl.h", "src/objects/bigint.h", + "src/objects/call-site-info-inl.h", + "src/objects/call-site-info.h", "src/objects/cell-inl.h", "src/objects/cell.h", "src/objects/code-inl.h", @@ -3140,6 +3203,10 @@ v8_header_set("v8_internal_headers") { "src/objects/js-regexp.h", "src/objects/js-segments-inl.h", "src/objects/js-segments.h", + "src/objects/js-shadow-realms-inl.h", + "src/objects/js-shadow-realms.h", + "src/objects/js-struct-inl.h", + "src/objects/js-struct.h", "src/objects/js-temporal-objects-inl.h", "src/objects/js-temporal-objects.h", "src/objects/js-weak-refs-inl.h", @@ -3213,8 +3280,6 @@ v8_header_set("v8_internal_headers") { "src/objects/smi.h", "src/objects/source-text-module-inl.h", "src/objects/source-text-module.h", - "src/objects/stack-frame-info-inl.h", - "src/objects/stack-frame-info.h", "src/objects/string-comparator.h", "src/objects/string-inl.h", "src/objects/string-set-inl.h", @@ -3315,12 +3380,13 @@ v8_header_set("v8_internal_headers") { "src/roots/roots.h", "src/runtime/runtime-utils.h", "src/runtime/runtime.h", - "src/security/caged-pointer-inl.h", - "src/security/caged-pointer.h", - "src/security/external-pointer-inl.h", - "src/security/external-pointer-table.h", - "src/security/external-pointer.h", - "src/security/vm-cage.h", + "src/sandbox/external-pointer-inl.h", + "src/sandbox/external-pointer-table-inl.h", + "src/sandbox/external-pointer-table.h", + "src/sandbox/external-pointer.h", + "src/sandbox/sandbox.h", + "src/sandbox/sandboxed-pointer-inl.h", + "src/sandbox/sandboxed-pointer.h", "src/snapshot/code-serializer.h", "src/snapshot/context-deserializer.h", "src/snapshot/context-serializer.h", @@ -3376,7 +3442,6 @@ v8_header_set("v8_internal_headers") { "src/utils/locked-queue.h", "src/utils/memcopy.h", "src/utils/ostreams.h", - "src/utils/pointer-with-payload.h", "src/utils/scoped-list.h", "src/utils/utils-inl.h", "src/utils/utils.h", @@ -3402,6 +3467,30 @@ v8_header_set("v8_internal_headers") { sources -= [ "//base/trace_event/common/trace_event_common.h" ] } + if (v8_enable_maglev) { + sources += [ + "src/maglev/maglev-basic-block.h", + "src/maglev/maglev-code-gen-state.h", + "src/maglev/maglev-code-generator.h", + "src/maglev/maglev-compilation-info.h", + "src/maglev/maglev-compilation-unit.h", + "src/maglev/maglev-compiler.h", + "src/maglev/maglev-concurrent-dispatcher.h", + "src/maglev/maglev-graph-builder.h", + "src/maglev/maglev-graph-labeller.h", + "src/maglev/maglev-graph-printer.h", + "src/maglev/maglev-graph-processor.h", + "src/maglev/maglev-graph.h", + "src/maglev/maglev-interpreter-frame-state.h", + "src/maglev/maglev-ir.h", + "src/maglev/maglev-regalloc-data.h", + "src/maglev/maglev-regalloc.h", + "src/maglev/maglev-register-frame-array.h", + "src/maglev/maglev-vreg-allocator.h", + "src/maglev/maglev.h", + ] + } + if (v8_enable_webassembly) { sources += [ "src/asmjs/asm-js.h", @@ -3413,6 +3502,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/wasm-compiler.h", "src/compiler/wasm-escape-analysis.h", "src/compiler/wasm-inlining.h", + "src/compiler/wasm-loop-peeling.h", "src/debug/debug-wasm-objects-inl.h", "src/debug/debug-wasm-objects.h", "src/trap-handler/trap-handler-internal.h", @@ -3529,6 +3619,10 @@ v8_header_set("v8_internal_headers") { ] } + if (v8_enable_heap_snapshot_verify) { + sources += [ "src/heap/reference-summarizer.h" ] + } + if (v8_current_cpu == "x86") { sources += [ ### gcmole(arch:ia32) ### "src/baseline/ia32/baseline-assembler-ia32-inl.h", @@ -3539,6 +3633,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/ia32/interface-descriptors-ia32-inl.h", "src/codegen/ia32/macro-assembler-ia32.h", "src/codegen/ia32/register-ia32.h", + "src/codegen/ia32/reglist-ia32.h", "src/codegen/ia32/sse-instr.h", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", "src/compiler/backend/ia32/instruction-codes-ia32.h", @@ -3558,6 +3653,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/x64/interface-descriptors-x64-inl.h", "src/codegen/x64/macro-assembler-x64.h", "src/codegen/x64/register-x64.h", + "src/codegen/x64/reglist-x64.h", "src/codegen/x64/sse-instr.h", "src/compiler/backend/x64/instruction-codes-x64.h", "src/compiler/backend/x64/unwinding-info-writer-x64.h", @@ -3591,6 +3687,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/arm/interface-descriptors-arm-inl.h", "src/codegen/arm/macro-assembler-arm.h", "src/codegen/arm/register-arm.h", + "src/codegen/arm/reglist-arm.h", "src/compiler/backend/arm/instruction-codes-arm.h", "src/compiler/backend/arm/unwinding-info-writer-arm.h", "src/execution/arm/frame-constants-arm.h", @@ -3612,6 +3709,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/arm64/macro-assembler-arm64-inl.h", "src/codegen/arm64/macro-assembler-arm64.h", "src/codegen/arm64/register-arm64.h", + "src/codegen/arm64/reglist-arm64.h", "src/codegen/arm64/utils-arm64.h", "src/compiler/backend/arm64/instruction-codes-arm64.h", "src/compiler/backend/arm64/unwinding-info-writer-arm64.h", @@ -3648,6 +3746,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/mips/constants-mips.h", "src/codegen/mips/macro-assembler-mips.h", "src/codegen/mips/register-mips.h", + "src/codegen/mips/reglist-mips.h", "src/compiler/backend/mips/instruction-codes-mips.h", "src/execution/mips/frame-constants-mips.h", "src/execution/mips/simulator-mips.h", @@ -3663,6 +3762,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/mips64/constants-mips64.h", "src/codegen/mips64/macro-assembler-mips64.h", "src/codegen/mips64/register-mips64.h", + "src/codegen/mips64/reglist-mips64.h", "src/compiler/backend/mips64/instruction-codes-mips64.h", "src/execution/mips64/frame-constants-mips64.h", "src/execution/mips64/simulator-mips64.h", @@ -3678,6 +3778,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/loong64/constants-loong64.h", "src/codegen/loong64/macro-assembler-loong64.h", "src/codegen/loong64/register-loong64.h", + "src/codegen/loong64/reglist-loong64.h", "src/compiler/backend/loong64/instruction-codes-loong64.h", "src/execution/loong64/frame-constants-loong64.h", "src/execution/loong64/simulator-loong64.h", @@ -3692,6 +3793,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/ppc/interface-descriptors-ppc-inl.h", "src/codegen/ppc/macro-assembler-ppc.h", "src/codegen/ppc/register-ppc.h", + "src/codegen/ppc/reglist-ppc.h", "src/compiler/backend/ppc/instruction-codes-ppc.h", "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", "src/execution/ppc/frame-constants-ppc.h", @@ -3701,14 +3803,15 @@ v8_header_set("v8_internal_headers") { ] } else if (v8_current_cpu == "ppc64") { sources += [ ### gcmole(arch:ppc64) ### - "src/baseline/s390/baseline-assembler-s390-inl.h", - "src/baseline/s390/baseline-compiler-s390-inl.h", + "src/baseline/ppc/baseline-assembler-ppc-inl.h", + "src/baseline/ppc/baseline-compiler-ppc-inl.h", "src/codegen/ppc/assembler-ppc-inl.h", "src/codegen/ppc/assembler-ppc.h", "src/codegen/ppc/constants-ppc.h", "src/codegen/ppc/interface-descriptors-ppc-inl.h", "src/codegen/ppc/macro-assembler-ppc.h", "src/codegen/ppc/register-ppc.h", + "src/codegen/ppc/reglist-ppc.h", "src/compiler/backend/ppc/instruction-codes-ppc.h", "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", "src/execution/ppc/frame-constants-ppc.h", @@ -3726,6 +3829,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/s390/interface-descriptors-s390-inl.h", "src/codegen/s390/macro-assembler-s390.h", "src/codegen/s390/register-s390.h", + "src/codegen/s390/reglist-s390.h", "src/compiler/backend/s390/instruction-codes-s390.h", "src/compiler/backend/s390/unwinding-info-writer-s390.h", "src/execution/s390/frame-constants-s390.h", @@ -3742,6 +3846,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/riscv64/constants-riscv64.h", "src/codegen/riscv64/macro-assembler-riscv64.h", "src/codegen/riscv64/register-riscv64.h", + "src/codegen/riscv64/reglist-riscv64.h", "src/compiler/backend/riscv64/instruction-codes-riscv64.h", "src/execution/riscv64/frame-constants-riscv64.h", "src/execution/riscv64/simulator-riscv64.h", @@ -3762,6 +3867,7 @@ v8_header_set("v8_internal_headers") { ":cppgc_headers", ":generate_bytecode_builtins_list", ":run_torque", + ":v8_heap_base_headers", ":v8_libbase", ] } @@ -3822,7 +3928,6 @@ v8_compiler_sources = [ "src/compiler/js-generic-lowering.cc", "src/compiler/js-graph.cc", "src/compiler/js-heap-broker.cc", - "src/compiler/js-heap-copy-reducer.cc", "src/compiler/js-inlining-heuristic.cc", "src/compiler/js-inlining.cc", "src/compiler/js-intrinsic-lowering.cc", @@ -3864,6 +3969,7 @@ v8_compiler_sources = [ "src/compiler/schedule.cc", "src/compiler/scheduler.cc", "src/compiler/select-lowering.cc", + "src/compiler/simplified-lowering-verifier.cc", "src/compiler/simplified-lowering.cc", "src/compiler/simplified-operator-reducer.cc", "src/compiler/simplified-operator.cc", @@ -3885,6 +3991,7 @@ if (v8_enable_webassembly) { "src/compiler/wasm-compiler.cc", "src/compiler/wasm-escape-analysis.cc", "src/compiler/wasm-inlining.cc", + "src/compiler/wasm-loop-peeling.cc", ] } @@ -4007,8 +4114,10 @@ v8_source_set("v8_base_without_compiler") { "src/builtins/builtins-object.cc", "src/builtins/builtins-reflect.cc", "src/builtins/builtins-regexp.cc", + "src/builtins/builtins-shadow-realms.cc", "src/builtins/builtins-sharedarraybuffer.cc", "src/builtins/builtins-string.cc", + "src/builtins/builtins-struct.cc", "src/builtins/builtins-symbol.cc", "src/builtins/builtins-temporal.cc", "src/builtins/builtins-trace.cc", @@ -4077,6 +4186,7 @@ v8_source_set("v8_base_without_compiler") { "src/diagnostics/perf-jit.cc", "src/diagnostics/unwinder.cc", "src/execution/arguments.cc", + "src/execution/clobber-registers.cc", "src/execution/embedder-state.cc", "src/execution/encoded-c-signature.cc", "src/execution/execution.cc", @@ -4088,11 +4198,11 @@ v8_source_set("v8_base_without_compiler") { "src/execution/messages.cc", "src/execution/microtask-queue.cc", "src/execution/protectors.cc", - "src/execution/runtime-profiler.cc", "src/execution/simulator-base.cc", "src/execution/stack-guard.cc", "src/execution/thread-id.cc", "src/execution/thread-local-top.cc", + "src/execution/tiering-manager.cc", "src/execution/v8threads.cc", "src/extensions/cputracemark-extension.cc", "src/extensions/externalize-string-extension.cc", @@ -4127,6 +4237,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/free-list.cc", "src/heap/gc-idle-time-handler.cc", "src/heap/gc-tracer.cc", + "src/heap/heap-allocator.cc", "src/heap/heap-controller.cc", "src/heap/heap-layout-tracer.cc", "src/heap/heap-write-barrier.cc", @@ -4205,6 +4316,7 @@ v8_source_set("v8_base_without_compiler") { "src/numbers/math-random.cc", "src/objects/backing-store.cc", "src/objects/bigint.cc", + "src/objects/call-site-info.cc", "src/objects/code-kind.cc", "src/objects/code.cc", "src/objects/compilation-cache-table.cc", @@ -4232,6 +4344,7 @@ v8_source_set("v8_base_without_compiler") { "src/objects/js-segment-iterator.cc", "src/objects/js-segmenter.cc", "src/objects/js-segments.cc", + "src/objects/js-temporal-objects.cc", "src/objects/keys.cc", "src/objects/literal-objects.cc", "src/objects/lookup-cache.cc", @@ -4250,11 +4363,11 @@ v8_source_set("v8_base_without_compiler") { "src/objects/scope-info.cc", "src/objects/shared-function-info.cc", "src/objects/source-text-module.cc", - "src/objects/stack-frame-info.cc", "src/objects/string-comparator.cc", "src/objects/string-table.cc", "src/objects/string.cc", "src/objects/swiss-name-dictionary.cc", + "src/objects/symbol-table.cc", "src/objects/synthetic-module.cc", "src/objects/tagged-impl.cc", "src/objects/template-objects.cc", @@ -4340,8 +4453,8 @@ v8_source_set("v8_base_without_compiler") { "src/runtime/runtime-typedarray.cc", "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", - "src/security/external-pointer-table.cc", - "src/security/vm-cage.cc", + "src/sandbox/external-pointer-table.cc", + "src/sandbox/sandbox.cc", "src/snapshot/code-serializer.cc", "src/snapshot/context-deserializer.cc", "src/snapshot/context-serializer.cc", @@ -4394,8 +4507,23 @@ v8_source_set("v8_base_without_compiler") { "src/zone/zone.cc", ] - if (v8_enable_webassembly) { + if (v8_enable_maglev) { sources += [ + "src/maglev/maglev-code-generator.cc", + "src/maglev/maglev-compilation-info.cc", + "src/maglev/maglev-compilation-unit.cc", + "src/maglev/maglev-compiler.cc", + "src/maglev/maglev-concurrent-dispatcher.cc", + "src/maglev/maglev-graph-builder.cc", + "src/maglev/maglev-graph-printer.cc", + "src/maglev/maglev-ir.cc", + "src/maglev/maglev-regalloc.cc", + "src/maglev/maglev.cc", + ] + } + + if (v8_enable_webassembly) { + sources += [ ### gcmole(all) ### "src/asmjs/asm-js.cc", "src/asmjs/asm-parser.cc", "src/asmjs/asm-scanner.cc", @@ -4468,6 +4596,10 @@ v8_source_set("v8_base_without_compiler") { ] } + if (v8_enable_heap_snapshot_verify) { + sources += [ "src/heap/reference-summarizer.cc" ] + } + if (v8_current_cpu == "x86") { sources += [ ### gcmole(arch:ia32) ### "src/codegen/ia32/assembler-ia32.cc", @@ -4732,8 +4864,8 @@ v8_source_set("v8_base_without_compiler") { deps = [ ":torque_generated_definitions", ":v8_bigint", - ":v8_cppgc_shared", ":v8_headers", + ":v8_heap_base", ":v8_libbase", ":v8_shared_internal_headers", ":v8_tracing", @@ -4830,6 +4962,8 @@ v8_source_set("torque_base") { visibility = [ ":*" ] # Only targets in this file can depend on this. sources = [ + "src/numbers/integer-literal-inl.h", + "src/numbers/integer-literal.h", "src/torque/ast.h", "src/torque/cc-generator.cc", "src/torque/cc-generator.h", @@ -5029,6 +5163,7 @@ v8_component("v8_libbase") { "src/base/platform/time.h", "src/base/platform/wrappers.h", "src/base/platform/yield-processor.h", + "src/base/pointer-with-payload.h", "src/base/region-allocator.cc", "src/base/region-allocator.h", "src/base/ring-buffer.h", @@ -5120,6 +5255,7 @@ v8_component("v8_libbase") { if (host_os == "mac") { sources += [ "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-darwin.cc", "src/base/platform/platform-macos.cc", ] } else { @@ -5139,14 +5275,24 @@ v8_component("v8_libbase") { "src/base/debug/stack_trace_fuchsia.cc", "src/base/platform/platform-fuchsia.cc", ] - deps += [ "//third_party/fuchsia-sdk/sdk/pkg/zx" ] - } else if (is_mac || is_ios) { + deps += [ + "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel", + "//third_party/fuchsia-sdk/sdk/pkg/fdio", + "//third_party/fuchsia-sdk/sdk/pkg/zx", + ] + } else if (is_mac) { sources += [ "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-darwin.cc", "src/base/platform/platform-macos.cc", ] + } else if (is_ios) { + sources += [ + "src/base/debug/stack_trace_posix.cc", + "src/base/platform/platform-darwin.cc", + ] } else if (is_win) { - # TODO(jochen): Add support for cygwin. + # TODO(infra): Add support for cygwin. sources += [ "src/base/debug/stack_trace_win.cc", "src/base/platform/platform-win32.cc", @@ -5195,7 +5341,7 @@ v8_component("v8_libbase") { [ "//build/config/clang:llvm-symbolizer_data($host_toolchain)" ] } - # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris. + # TODO(infra): Add support for qnx, freebsd, openbsd, netbsd, and solaris. } v8_component("v8_libplatform") { @@ -5325,13 +5471,23 @@ v8_source_set("v8_bigint") { configs = [ ":internal_config" ] } -v8_source_set("v8_cppgc_shared") { +v8_source_set("v8_heap_base_headers") { sources = [ - "src/heap/base/stack.cc", + "src/heap/base/active-system-pages.h", "src/heap/base/stack.h", - "src/heap/base/worklist.cc", "src/heap/base/worklist.h", - "src/heap/cppgc/globals.h", + ] + + configs = [ ":internal_config" ] + + public_deps = [ ":v8_libbase" ] +} + +v8_source_set("v8_heap_base") { + sources = [ + "src/heap/base/active-system-pages.cc", + "src/heap/base/stack.cc", + "src/heap/base/worklist.cc", ] if (is_clang || !is_win) { @@ -5369,7 +5525,7 @@ v8_source_set("v8_cppgc_shared") { configs = [ ":internal_config" ] public_deps = [ - ":cppgc_headers", + ":v8_heap_base_headers", ":v8_libbase", ] } @@ -5404,7 +5560,6 @@ v8_header_set("cppgc_headers") { "include/cppgc/internal/name-trait.h", "include/cppgc/internal/persistent-node.h", "include/cppgc/internal/pointer-policies.h", - "include/cppgc/internal/prefinalizer-handler.h", "include/cppgc/internal/write-barrier.h", "include/cppgc/liveness-broker.h", "include/cppgc/macros.h", @@ -5459,6 +5614,7 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/gc-info.cc", "src/heap/cppgc/gc-invoker.cc", "src/heap/cppgc/gc-invoker.h", + "src/heap/cppgc/globals.h", "src/heap/cppgc/heap-base.cc", "src/heap/cppgc/heap-base.h", "src/heap/cppgc/heap-consistency.cc", @@ -5515,6 +5671,8 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/process-heap.h", "src/heap/cppgc/raw-heap.cc", "src/heap/cppgc/raw-heap.h", + "src/heap/cppgc/remembered-set.cc", + "src/heap/cppgc/remembered-set.h", "src/heap/cppgc/source-location.cc", "src/heap/cppgc/stats-collector.cc", "src/heap/cppgc/stats-collector.h", @@ -5551,7 +5709,7 @@ v8_source_set("cppgc_base") { public_deps = [ ":cppgc_headers", - ":v8_cppgc_shared", + ":v8_heap_base", ":v8_libbase", ":v8_libplatform", ] @@ -5862,6 +6020,7 @@ group("v8_clusterfuzz") { deps = [ ":d8", ":v8_simple_inspector_fuzzer", + "tools/clusterfuzz/trials:v8_clusterfuzz_resources", ] if (v8_multi_arch_build) { @@ -5870,7 +6029,7 @@ group("v8_clusterfuzz") { ":d8(//build/toolchain/linux:clang_x64_v8_arm64)", ":d8(//build/toolchain/linux:clang_x86)", ":d8(//build/toolchain/linux:clang_x86_v8_arm)", - ":d8(tools/clusterfuzz/toolchain:clang_x64_pointer_compression)", + ":d8(tools/clusterfuzz/foozzie/toolchain:clang_x64_pointer_compression)", ] } } @@ -5981,10 +6140,10 @@ if (is_component_build) { } } - v8_component("v8_cppgc_shared_for_testing") { + v8_component("v8_heap_base_for_testing") { testonly = true - public_deps = [ ":v8_cppgc_shared" ] + public_deps = [ ":v8_heap_base" ] configs = [] public_configs = [ ":external_config" ] @@ -6033,10 +6192,10 @@ if (is_component_build) { } } - group("v8_cppgc_shared_for_testing") { + group("v8_heap_base_for_testing") { testonly = true - public_deps = [ ":v8_cppgc_shared" ] + public_deps = [ ":v8_heap_base" ] public_configs = [ ":external_config" ] } @@ -6086,7 +6245,7 @@ v8_executable("d8") { } if (v8_correctness_fuzzer) { - deps += [ "tools/clusterfuzz:v8_correctness_fuzzer_resources" ] + deps += [ "tools/clusterfuzz/foozzie:v8_correctness_fuzzer_resources" ] } defines = [] diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS index add6b07ed60d9d..39f241b3e9a2f1 100644 --- a/deps/v8/COMMON_OWNERS +++ b/deps/v8/COMMON_OWNERS @@ -1,16 +1,13 @@ adamk@chromium.org ahaas@chromium.org -bbudge@chromium.org bikineev@chromium.org bmeurer@chromium.org cbruni@chromium.org clemensb@chromium.org danno@chromium.org -delphick@chromium.org dinfuehr@chromium.org ecmziegler@chromium.org gdeepti@chromium.org -gsathya@chromium.org hablich@chromium.org hpayer@chromium.org ishell@chromium.org @@ -23,16 +20,15 @@ mathias@chromium.org marja@chromium.org mlippautz@chromium.org mslekova@chromium.org -mvstanton@chromium.org nicohartmann@chromium.org +nikolaos@chromium.org omerkatz@chromium.org pthier@chromium.org -sigurds@chromium.org syg@chromium.org szuend@chromium.org +tebbi@chromium.org thibaudm@chromium.org vahl@chromium.org verwaest@chromium.org victorgomes@chromium.org yangguo@chromium.org -zhin@chromium.org diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 102f46264b8e95..61577d45ab6c23 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -40,10 +40,10 @@ vars = { 'reclient_version': 're_client_version:0.40.0.40ff5a5', # GN CIPD package version. - 'gn_version': 'git_revision:fc295f3ac7ca4fe7acc6cb5fb052d22909ef3a8f', + 'gn_version': 'git_revision:bd99dbf98cbdefe18a4128189665c5761263bcfb', # luci-go CIPD package version. - 'luci_go': 'git_revision:31175eb1a2712bb75d06a9bad5d4dd3f2a09cd1f', + 'luci_go': 'git_revision:cb424e70e75136736a86359ef070aa96425fe7a3', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -72,20 +72,20 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_sources_version # and whatever else without interference from each other. - 'android_sdk_sources_version': 'Yw53980aNNn0n9l58lN7u0wSVmxlY0OM1zFnGDQeJs4C', + 'android_sdk_sources_version': '7EcXjyZWkTu3sCA8d8eRXg_aCBCYt8ihXgxp29VXLs8C', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. - 'android_sdk_cmdline-tools_version': 'Ez2NWws2SJYCF6qw2O-mSCqK6424l3ZdSTpppLyVR_cC', + 'android_sdk_cmdline-tools_version': 'PGPmqJtSIQ84If155ba7iTU846h5WJ-bL5d_OoUWEWYC', } deps = { 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '7f36dbc19d31e2aad895c60261ca8f726442bfbb', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'd115b033c4e53666b535cbd1985ffe60badad082', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '9cfc74504f0c5093fe6799e70f15bded2423b5b4', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '3d9590754d5d23e62d15472c5baf6777ca59df20', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '075dd7e22837a69189003e4fa84499acf63188cf', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '113dd1badbcbffea108a8c95ac7c89c22bfd25f3', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'e435ad79c17b1888b34df88d6a30a094936e3836', 'buildtools/linux64': { @@ -111,9 +111,9 @@ deps = { 'buildtools/third_party/libc++/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '89f2e82120461d34098edd216e57aa743f441107', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'a897d0f3f8e8c28ac2abf848f3b695b724409298', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'c8c0ec928e46328fa284e7290c4ef052c7d285d4', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'd1c7f92b8b0bff8d9f710ca40e44563a63db376e', 'buildtools/win': { 'packages': [ { @@ -139,9 +139,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '1f16a6ad0edd10e774e336d8b331471b0c3bb360', - 'test/test262/harness': - Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f7fb969cc4934bbc5aa29a378d59325eaa84f475', 'third_party/aemu-linux-x64': { 'packages': [ { @@ -163,11 +161,11 @@ deps = { 'dep_type': 'cipd', }, 'third_party/android_ndk': { - 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '401019bf85744311b26c88ced255cd53401af8b7', + 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '9644104c8cf85bf1bdce5b1c0691e9778572c3f8', 'condition': 'checkout_android', }, 'third_party/android_platform': { - 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'abc362f16dfc1a6cc082298ed54504bef11eb9e7', + 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '87b4b48de3c8204224d63612c287eb5a447a562d', 'condition': 'checkout_android', }, 'third_party/android_sdk/public': { @@ -209,7 +207,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '49839733a7f26070e8d666d91fae177711154e1d', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'b3fe2c177912640bc676b332a2f41dc812ea5843', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,18 +215,18 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0a233e176044b6d9b9ff9fb30b589bfb18f9ca04', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'b199f549263a02900faef8c8c3d581c580e837c3', 'third_party/fuchsia-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5e0b0d0b67e889360eaa456cc17ce47d89a92167', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '7c9c220d13ab367d49420144a257886ebfbce278', 'condition': 'checkout_fuchsia', }, 'third_party/google_benchmark/src': { - 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'ab867074da2423c2d9cf225233191a01f043485d', + 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '5704cd4c8cea889d68f9ae29ca5aaee97ef91816', }, 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '4c5650f68866e3c2e60361d5c4c95c6f335fb64b', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'ae5e06dd35c6137d335331b0815cf1f60fd7e3c5', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'edf883ad2db9c723b058a6a17a146d68d6343143', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '8a5b728e4f43b0eabdb9ea450f956d67cfb22719', 'third_party/instrumented_libraries': Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e09c4b66b6e87116eb190651421f1a6e2f3b9c52', 'third_party/ittapi': { @@ -242,7 +240,7 @@ deps = { 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1', 'third_party/logdog/logdog': - Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '17ec234f823f7bff6ada6584fdbbee9d54b8fc58', + Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399', 'third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '1b882ef6372b58bfd55a3285f37ed801be9137cd', 'third_party/perfetto': @@ -274,9 +272,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'efd9399ae01364926be2a38946127fdf463480db', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'b0676a1f52484bf53a1a49d0e48ff8abc430fafe', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '336fcfd099995c128bc93e97b8263cc6fc891cc8', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b60d34c100e5a8f4b01d838527f000faab673da3', 'tools/clang/dsymutil': { 'packages': [ { diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index 61963c62f62009..cb6888d32a23e2 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -36,6 +36,9 @@ import re import sys +# This line is 'magic' in that git-cl looks for it to decide whether to +# use Python3 instead of Python2 when running the code in this file. +USE_PYTHON3 = True _EXCLUDED_PATHS = ( r"^test[\\\/].*", @@ -223,7 +226,7 @@ def _FilterDuplicates(impacted_files, affected_files): warning_descriptions = [] for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( added_includes): - description_with_path = '%s\n %s' % (path, rule_description) + description_with_path = '{}\n {}'.format(path, rule_description) if rule_type == Rule.DISALLOW: error_descriptions.append(description_with_path) else: @@ -285,8 +288,8 @@ def PathToGuardMacro(path): break if not file_omitted and not all(found_patterns): - problems.append( - '%s: Missing include guard \'%s\'' % (local_path, guard_macro)) + problems.append('{}: Missing include guard \'{}\''.format( + local_path, guard_macro)) if problems: return [output_api.PresubmitError( @@ -320,8 +323,8 @@ def FilterFile(affected_file): local_path = f.LocalPath() for line_number, line in f.ChangedContents(): if (include_directive_pattern.search(line)): - problems.append( - '%s:%d\n %s' % (local_path, line_number, line.strip())) + problems.append('{}:{}\n {}'.format(local_path, line_number, + line.strip())) if problems: return [output_api.PresubmitError(include_error, problems)] @@ -341,11 +344,13 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api): file_inclusion_pattern = r'.+\.cc' base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?' - inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern) - comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern) + inclusion_pattern = input_api.re.compile( + r'({})\s*\('.format(base_function_pattern)) + comment_pattern = input_api.re.compile( + r'//.*({})'.format(base_function_pattern)) exclusion_pattern = input_api.re.compile( - r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % ( - base_function_pattern, base_function_pattern)) + r'::[A-Za-z0-9_]+({})|({})[^;]+'.format(base_function_pattern, + base_function_pattern) + '\{') def FilterFile(affected_file): files_to_skip = (_EXCLUDED_PATHS + @@ -363,8 +368,8 @@ def FilterFile(affected_file): if (inclusion_pattern.search(line) and not comment_pattern.search(line) and not exclusion_pattern.search(line)): - problems.append( - '%s:%d\n %s' % (local_path, line_number, line.strip())) + problems.append('{}:{}\n {}'.format(local_path, line_number, + line.strip())) if problems: return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)] @@ -423,7 +428,7 @@ def _SkipTreeCheck(input_api, output_api): def _CheckCommitMessageBugEntry(input_api, output_api): """Check that bug entries are well-formed in commit message.""" bogus_bug_msg = ( - 'Bogus BUG entry: %s. Please specify the issue tracker prefix and the ' + 'Bogus BUG entry: {}. Please specify the issue tracker prefix and the ' 'issue number, separated by a colon, e.g. v8:123 or chromium:12345.') results = [] for bug in (input_api.change.BUG or '').split(','): @@ -437,12 +442,13 @@ def _CheckCommitMessageBugEntry(input_api, output_api): prefix_guess = 'chromium' else: prefix_guess = 'v8' - results.append('BUG entry requires issue tracker prefix, e.g. %s:%s' % - (prefix_guess, bug)) + results.append( + 'BUG entry requires issue tracker prefix, e.g. {}:{}'.format( + prefix_guess, bug)) except ValueError: - results.append(bogus_bug_msg % bug) + results.append(bogus_bug_msg.format(bug)) elif not re.match(r'\w+:\d+', bug): - results.append(bogus_bug_msg % bug) + results.append(bogus_bug_msg.format(bug)) return [output_api.PresubmitError(r) for r in results] @@ -459,8 +465,8 @@ def FilterFile(affected_file): try: json.load(j) except Exception as e: - results.append( - 'JSON validation failed for %s. Error:\n%s' % (f.LocalPath(), e)) + results.append('JSON validation failed for {}. Error:\n{}'.format( + f.LocalPath(), e)) return [output_api.PresubmitError(r) for r in results] @@ -509,8 +515,7 @@ def FilterFile(affected_file): include_deletes=False): with open(f.LocalPath()) as fh: for match in re.finditer(regexp, fh.read()): - errors.append('in {}: {}'.format(f.LocalPath(), - match.group().strip())) + errors.append(f'in {f.LocalPath()}: {match.group().strip()}') if errors: return [output_api.PresubmitPromptOrNotify( diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS index b1dc86db9d9467..feadac3fab5afc 100644 --- a/deps/v8/WATCHLISTS +++ b/deps/v8/WATCHLISTS @@ -48,12 +48,6 @@ '|include/js_protocol.pdl' \ '|include/v8-inspector*\.h', }, - 'interpreter': { - 'filepath': 'src/interpreter/' \ - '|src/compiler/bytecode' \ - '|test/cctest/interpreter/' \ - '|test/unittests/interpreter/', - }, 'baseline': { 'filepath': 'src/baseline/', }, @@ -66,10 +60,6 @@ 'arm': { 'filepath': '/arm/', }, - 'csa': { - 'filepath': 'src/codegen/code-stub-assembler\.(cc|h)$' \ - '|src/builtins/.*-gen.(cc|h)$', - }, 'merges': { 'filepath': '.', }, @@ -80,13 +70,13 @@ 'value_serializer': { 'filepath': 'src/value-serializer', }, + 'maglev': { + 'filepath': 'src/maglev/', + }, 'parser': { 'filepath': 'src/ast/' \ '|src/parsing/', }, - 'torque': { - 'filepath': '.*\.tq$', - }, 'tracing': { 'filepath': 'src/tracing/', }, @@ -114,11 +104,10 @@ }, 'WATCHLISTS': { - 'csa': [ - 'jgruber+watch@chromium.org', - ], - 'torque': [ + 'maglev': [ 'jgruber+watch@chromium.org', + 'leszeks+watch@chromium.org', + 'verwaest+watch@chromium.org', ], 'snapshot': [ 'jgruber+watch@chromium.org', diff --git a/deps/v8/WORKSPACE b/deps/v8/WORKSPACE index 32fff02aab80b6..490e973a762a5a 100644 --- a/deps/v8/WORKSPACE +++ b/deps/v8/WORKSPACE @@ -5,31 +5,69 @@ workspace(name = "v8") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + http_archive( name = "bazel_skylib", + sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", urls = [ "https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", ], - sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", ) + load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace") + bazel_skylib_workspace() -new_local_repository( - name = "config", - path = "bazel/config", - build_file = "bazel/config/BUILD.bazel", +http_archive( + name = "rules_python", + sha256 = "a30abdfc7126d497a7698c29c46ea9901c6392d6ed315171a6df5ce433aa4502", + strip_prefix = "rules_python-0.6.0", + url = "https://github.com/bazelbuild/rules_python/archive/0.6.0.tar.gz", +) + +load("@rules_python//python:pip.bzl", "pip_install") + +pip_install( + name = "v8_python_deps", + extra_pip_args = ["--require-hashes"], + requirements = "//:bazel/requirements.txt", ) new_local_repository( - name = "zlib", - path = "third_party/zlib", + name = "com_googlesource_chromium_zlib", build_file = "bazel/BUILD.zlib", + path = "third_party/zlib", +) + +bind( + name = "zlib", + actual = "@com_googlesource_chromium_zlib//:zlib", +) + +bind( + name = "zlib_compression_utils", + actual = "@com_googlesource_chromium_zlib//:zlib_compression_utils", ) new_local_repository( - name = "icu", - path = "third_party/icu", + name = "com_googlesource_chromium_icu", build_file = "bazel/BUILD.icu", + path = "third_party/icu", +) + +bind( + name = "icu", + actual = "@com_googlesource_chromium_icu//:icu", +) + +new_local_repository( + name = "com_googlesource_chromium_base_trace_event_common", + build_file = "bazel/BUILD.trace_event_common", + path = "base/trace_event/common", +) + +bind( + name = "base_trace_event_common", + actual = "@com_googlesource_chromium_base_trace_event_common//:trace_event_common", ) diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index 1fd2283decc212..fb1ce8a053c3d4 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -259,10 +259,10 @@ template <> perfetto::ThreadTrack BASE_EXPORT ConvertThreadId(const ::base::PlatformThreadId& thread); -#if defined(OS_WIN) +#if BUILDFLAG(IS_WIN) template <> perfetto::ThreadTrack BASE_EXPORT ConvertThreadId(const int& thread); -#endif // defined(OS_WIN) +#endif // BUILDFLAG(IS_WIN) } // namespace legacy diff --git a/deps/v8/bazel/BUILD.icu b/deps/v8/bazel/BUILD.icu index 662e11ec03a34a..2ae79a5784f252 100644 --- a/deps/v8/bazel/BUILD.icu +++ b/deps/v8/bazel/BUILD.icu @@ -4,15 +4,31 @@ filegroup( name = "icudata", - srcs = [ "common/icudtl.dat" ] + srcs = ["common/icudtl.dat"], ) cc_library( name = "icuuc", - srcs = glob([ + srcs = glob([ "source/common/**/*.h", - "source/common/**/*.cpp" + "source/common/**/*.cpp", ]), + copts = select({ + "@platforms//os:windows": [ + "/wd4005", # Macro redefinition. + "/wd4068", # Unknown pragmas. + "/wd4267", # Conversion from size_t on 64-bits. + "/utf-8", # ICU source files are in UTF-8. + ], + "//conditions:default": [ + "-Wno-unused-function", + "-Wno-parentheses", + "-Wno-unused-function", + "-Wno-unused-variable", + "-Wno-deprecated-declarations", + ], + }), + data = [":icudata"], defines = [ "U_COMMON_IMPLEMENTATION", "U_ICUDATAENTRY_IN_COMMON", @@ -34,35 +50,19 @@ cc_library( ], "//conditions:default": [], }), - copts = select({ - "@platforms//os:windows": [ - "/wd4005", # Macro redefinition. - "/wd4068", # Unknown pragmas. - "/wd4267", # Conversion from size_t on 64-bits. - "/utf-8", # ICU source files are in UTF-8. - ], - "//conditions:default": [ - "-Wno-unused-function", - "-Wno-parentheses", - "-Wno-unused-function", - "-Wno-unused-variable", - "-Wno-deprecated-declarations", - ], - }), includes = [ "source/common", "source/i18n", ], tags = ["requires-rtti"], - data = [ ":icudata" ], alwayslink = 1, ) cc_library( name = "icui18n", - srcs = glob([ + srcs = glob([ "source/i18n/**/*.h", - "source/i18n/**/*.cpp" + "source/i18n/**/*.cpp", ]), copts = select({ "@platforms//os:windows": [ @@ -83,19 +83,19 @@ cc_library( ], "//conditions:default": [], }), - deps = [ ":icuuc" ], + deps = [":icuuc"], alwayslink = 1, ) cc_library( name = "icu", + srcs = [ + "source/stubdata/stubdata.cpp", + ], hdrs = glob([ "source/common/unicode/*.h", "source/i18n/unicode/*.h", ]), - srcs = [ - "source/stubdata/stubdata.cpp", - ], copts = select({ "@platforms//os:windows": [ "/wd4005", # Macro redefinition. @@ -116,10 +116,10 @@ cc_library( "//conditions:default": [], }), include_prefix = "third_party/icu", + visibility = ["//visibility:public"], deps = [ + ":icui18n", ":icuuc", - ":icui18n" ], - visibility = ["//visibility:public"], alwayslink = 1, ) diff --git a/deps/v8/bazel/BUILD.trace_event_common b/deps/v8/bazel/BUILD.trace_event_common new file mode 100644 index 00000000000000..685b284071946c --- /dev/null +++ b/deps/v8/bazel/BUILD.trace_event_common @@ -0,0 +1,10 @@ +# Copyright 2021 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +cc_library( + name = "trace_event_common", + hdrs = ["trace_event_common.h"], + include_prefix = "base/trace_event/common", + visibility = ["//visibility:public"], +) diff --git a/deps/v8/bazel/BUILD.zlib b/deps/v8/bazel/BUILD.zlib index 140f761fbb81e3..25a2c35313b981 100644 --- a/deps/v8/bazel/BUILD.zlib +++ b/deps/v8/bazel/BUILD.zlib @@ -9,6 +9,7 @@ cc_library( "chromeconf.h", "compress.c", "contrib/optimizations/insert_string.h", + "contrib/optimizations/slide_hash_neon.h", "cpu_features.c", "cpu_features.h", "crc32.c", @@ -35,14 +36,10 @@ cc_library( "zlib.h", "zutil.c", "zutil.h", - "google/compression_utils_portable.h", - "google/compression_utils_portable.cc", - ], + ], hdrs = [ "zlib.h", - "google/compression_utils_portable.h", ], - include_prefix = "third_party/zlib", defines = [ "CHROMIUM_ZLIB_NO_CHROMECONF", "CPU_NO_SIMD", @@ -52,5 +49,21 @@ cc_library( "HAVE_HIDDEN", ], }), + include_prefix = "third_party/zlib", visibility = ["//visibility:public"], ) + +cc_library( + name = "zlib_compression_utils", + srcs = [ + "google/compression_utils_portable.cc", + ], + hdrs = [ + "google/compression_utils_portable.h", + ], + include_prefix = "third_party/zlib", + visibility = ["//visibility:public"], + deps = [ + "//external:zlib", + ], +) diff --git a/deps/v8/bazel/OWNERS b/deps/v8/bazel/OWNERS index 8636f621c416b4..502862b9bc01e1 100644 --- a/deps/v8/bazel/OWNERS +++ b/deps/v8/bazel/OWNERS @@ -1,5 +1,4 @@ # Google3 V8 owners ahaas@chromium.org cbruni@chromium.org -delphick@chromium.org victorgomes@chromium.org diff --git a/deps/v8/bazel/config/BUILD.bazel b/deps/v8/bazel/config/BUILD.bazel index 78a1b5debdac77..ac79c428089db0 100644 --- a/deps/v8/bazel/config/BUILD.bazel +++ b/deps/v8/bazel/config/BUILD.bazel @@ -15,6 +15,20 @@ package( ], ) +config_setting( + name = "is_fastbuild", + values = { + "compilation_mode": "fastbuild", + }, +) + +config_setting( + name = "is_debug", + values = { + "compilation_mode": "dbg", + }, +) + config_setting( name = "platform_cpu_x64", constraint_values = ["@platforms//cpu:x86_64"], @@ -27,7 +41,7 @@ config_setting( config_setting( name = "platform_cpu_arm64", - constraint_values = ["@platforms//cpu:arm"], + constraint_values = ["@platforms//cpu:aarch64"], ) config_setting( @@ -35,6 +49,21 @@ config_setting( constraint_values = ["@platforms//cpu:arm"], ) +config_setting( + name = "platform_cpu_s390x", + constraint_values = ["@platforms//cpu:s390x"], +) + +config_setting( + name = "platform_cpu_riscv64", + constraint_values = ["@platforms//cpu:riscv64"], +) + +config_setting( + name = "platform_cpu_ppc64le", + constraint_values = ["@platforms//cpu:ppc"], +) + v8_target_cpu( name = "v8_target_cpu", build_setting_default = "none", @@ -58,15 +87,30 @@ v8_configure_target_cpu( ) v8_configure_target_cpu( - name = "arm", + name = "arm64", matching_configs = [":platform_cpu_arm64"], ) v8_configure_target_cpu( - name = "arm64", + name = "arm", matching_configs = [":platform_cpu_arm"], ) +v8_configure_target_cpu( + name = "s390x", + matching_configs = [":platform_cpu_s390x"], +) + +v8_configure_target_cpu( + name = "riscv64", + matching_configs = [":platform_cpu_riscv64"], +) + +v8_configure_target_cpu( + name = "ppc64le", + matching_configs = [":platform_cpu_ppc64le"], +) + selects.config_setting_group( name = "v8_target_is_32_bits", match_any = [ @@ -110,6 +154,9 @@ selects.config_setting_group( ":v8_target_arm64", ":is_x64", ":is_arm64", + ":is_s390x", + ":is_riscv64", + ":is_ppc64le", ], ) @@ -130,45 +177,175 @@ selects.config_setting_group( ], ) +selects.config_setting_group( + name = "is_non_android_posix", + match_any = [ + ":is_linux", + ":is_macos", + ], +) + selects.config_setting_group( name = "is_posix_x64", match_all = [ ":is_posix", ":is_x64", - ] + ], +) + +selects.config_setting_group( + name = "is_non_android_posix_x64", + match_all = [ + ":is_non_android_posix", + ":is_x64", + ], ) selects.config_setting_group( name = "is_inline_asm_x64", - match_all = [":is_posix", ":is_x64"], + match_all = [ + ":is_posix", + ":is_x64", + ], ) selects.config_setting_group( name = "is_inline_asm_ia32", - match_all = [":is_posix", ":is_ia32"], + match_all = [ + ":is_posix", + ":is_ia32", + ], ) selects.config_setting_group( name = "is_inline_asm_arm64", - match_all = [":is_posix", ":is_arm64"], + match_all = [ + ":is_posix", + ":is_arm64", + ], ) selects.config_setting_group( name = "is_inline_asm_arm", - match_all = [":is_posix", ":is_arm"], + match_all = [ + ":is_posix", + ":is_arm", + ], +) + +selects.config_setting_group( + name = "is_inline_asm_s390x", + match_all = [ + ":is_posix", + ":is_s390x", + ], +) + +selects.config_setting_group( + name = "is_inline_asm_riscv64", + match_all = [ + ":is_posix", + ":is_riscv64", + ], +) + +selects.config_setting_group( + name = "is_inline_asm_ppc64le", + match_all = [ + ":is_posix", + ":is_ppc64le", + ], ) selects.config_setting_group( name = "is_msvc_asm_x64", - match_all = [":is_windows", ":is_x64"], + match_all = [ + ":is_windows", + ":is_x64", + ], ) selects.config_setting_group( name = "is_msvc_asm_ia32", - match_all = [":is_windows", ":is_ia32"], + match_all = [ + ":is_windows", + ":is_ia32", + ], ) selects.config_setting_group( name = "is_msvc_asm_arm64", - match_all = [":is_windows", ":is_arm64"], + match_all = [ + ":is_windows", + ":is_arm64", + ], +) + +config_setting( + name = "is_compiler_default", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "compiler", + }, +) + +selects.config_setting_group( + name = "is_compiler_default_on_linux", + match_all = [ + ":is_compiler_default", + ":is_linux", + ], +) + +selects.config_setting_group( + name = "is_compiler_default_on_macos", + match_all = [ + ":is_compiler_default", + ":is_macos", + ], +) + +config_setting( + name = "is_compiler_clang", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "clang", + }, +) + +selects.config_setting_group( + name = "is_clang", + match_any = [ + ":is_compiler_default_on_macos", + ":is_compiler_clang", + ], +) + +selects.config_setting_group( + name = "is_clang_s390x", + match_all = [ + ":is_clang", + ":is_s390x", + ], +) + +config_setting( + name = "is_compiler_gcc", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "gcc", + }, +) + +selects.config_setting_group( + name = "is_gcc", + match_any = [ + ":is_compiler_default_on_linux", + ":is_compiler_gcc", + ], +) + +selects.config_setting_group( + name = "is_gcc_fastbuild", + match_all = [ + ":is_gcc", + ":is_fastbuild", + ], ) diff --git a/deps/v8/bazel/config/v8-target-cpu.bzl b/deps/v8/bazel/config/v8-target-cpu.bzl index 2d5d241ebf45f4..a0ce9d1cb39cab 100644 --- a/deps/v8/bazel/config/v8-target-cpu.bzl +++ b/deps/v8/bazel/config/v8-target-cpu.bzl @@ -14,7 +14,7 @@ V8CpuTypeInfo = provider( ) def _host_target_cpu_impl(ctx): - allowed_values = ["arm", "arm64", "ia32", "x64", "none"] + allowed_values = ["arm", "arm64", "ia32", "ppc64le", "riscv64", "s390x", "x64", "none"] cpu_type = ctx.build_setting_value if cpu_type in allowed_values: return V8CpuTypeInfo(value = cpu_type) diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index fc428ba16cd083..ed7dea9581deff 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -89,38 +89,77 @@ def _default_args(): return struct( deps = [":define_flags"], defines = select({ - "@config//:is_windows": [ + "@v8//bazel/config:is_windows": [ "UNICODE", "_UNICODE", "_CRT_RAND_S", - "_WIN32_WINNT=0x0602", # Override bazel default to Windows 8 + "_WIN32_WINNT=0x0602", # Override bazel default to Windows 8 ], "//conditions:default": [], }), copts = select({ - "@config//:is_posix": [ + "@v8//bazel/config:is_posix": [ "-fPIC", + "-fno-strict-aliasing", "-Werror", "-Wextra", + "-Wno-unknown-warning-option", "-Wno-bitwise-instead-of-logical", "-Wno-builtin-assume-aligned-alignment", "-Wno-unused-parameter", "-Wno-implicit-int-float-conversion", "-Wno-deprecated-copy", "-Wno-non-virtual-dtor", - "-std=c++17", "-isystem .", ], "//conditions:default": [], + }) + select({ + "@v8//bazel/config:is_clang": [ + "-Wno-invalid-offsetof", + "-std=c++17", + ], + "@v8//bazel/config:is_gcc": [ + "-Wno-extra", + "-Wno-array-bounds", + "-Wno-class-memaccess", + "-Wno-comments", + "-Wno-deprecated-declarations", + "-Wno-implicit-fallthrough", + "-Wno-int-in-bool-context", + "-Wno-maybe-uninitialized", + "-Wno-mismatched-new-delete", + "-Wno-redundant-move", + "-Wno-return-type", + "-Wno-stringop-overflow", + # Use GNU dialect, because GCC doesn't allow using + # ##__VA_ARGS__ when in standards-conforming mode. + "-std=gnu++17", + ], + "@v8//bazel/config:is_windows": [ + "/std:c++17", + ], + "//conditions:default": [], + }) + select({ + "@v8//bazel/config:is_gcc_fastbuild": [ + # Non-debug builds without optimizations fail because + # of recursive inlining of "always_inline" functions. + "-O1", + ], + "//conditions:default": [], + }) + select({ + "@v8//bazel/config:is_clang_s390x": [ + "-fno-integrated-as", + ], + "//conditions:default": [], }), includes = ["include"], linkopts = select({ - "@config//:is_windows": [ + "@v8//bazel/config:is_windows": [ "Winmm.lib", "DbgHelp.lib", "Advapi32.lib", ], - "@config//:is_macos": ["-pthread"], + "@v8//bazel/config:is_macos": ["-pthread"], "//conditions:default": ["-Wl,--no-as-needed -ldl -pthread"], }) + select({ ":should_add_rdynamic": ["-rdynamic"], @@ -209,6 +248,7 @@ def v8_library( linkstatic = 1, **kwargs ) + # Alias target used because of cc_library bug in bazel on windows # https://github.com/bazelbuild/bazel/issues/14237 # TODO(victorgomes): Remove alias once bug is fixed @@ -227,6 +267,7 @@ def v8_library( linkstatic = 1, **kwargs ) + # Alias target used because of cc_library bug in bazel on windows # https://github.com/bazelbuild/bazel/issues/14237 # TODO(victorgomes): Remove alias once bug is fixed @@ -248,8 +289,10 @@ def v8_library( ) def _torque_impl(ctx): - v8root = "." - prefix = ctx.attr.prefix + if ctx.workspace_name == "v8": + v8root = "." + else: + v8root = "external/v8" # Arguments args = [] @@ -301,7 +344,6 @@ _v8_torque = rule( cfg = "exec", ), "args": attr.string_list(), - "v8root": attr.label(default = ":v8_root"), }, ) @@ -313,7 +355,7 @@ def v8_torque(name, noicu_srcs, icu_srcs, args, extras): args = args, extras = extras, tool = select({ - "@config//:v8_target_is_32_bits": ":torque_non_pointer_compression", + "@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression", "//conditions:default": ":torque", }), ) @@ -324,32 +366,44 @@ def v8_torque(name, noicu_srcs, icu_srcs, args, extras): args = args, extras = extras, tool = select({ - "@config//:v8_target_is_32_bits": ":torque_non_pointer_compression", + "@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression", "//conditions:default": ":torque", }), ) def _v8_target_cpu_transition_impl(settings, attr): + # Check for an existing v8_target_cpu flag. + if "@v8//bazel/config:v8_target_cpu" in settings: + if settings["@v8//bazel/config:v8_target_cpu"] != "none": + return + + # Auto-detect target architecture based on the --cpu flag. mapping = { "haswell": "x64", "k8": "x64", "x86_64": "x64", + "darwin": "x64", "darwin_x86_64": "x64", + "x64_windows": "x64", "x86": "ia32", - "ppc": "ppc64", + "aarch64": "arm64", "arm64-v8a": "arm64", "arm": "arm64", + "darwin_arm64": "arm64", "armeabi-v7a": "arm32", + "s390x": "s390x", + "riscv64": "riscv64", + "ppc": "ppc64le", } v8_target_cpu = mapping[settings["//command_line_option:cpu"]] - return {"@config//:v8_target_cpu": v8_target_cpu} + return {"@v8//bazel/config:v8_target_cpu": v8_target_cpu} # Set the v8_target_cpu to be the correct architecture given the cpu specified # on the command line. v8_target_cpu_transition = transition( implementation = _v8_target_cpu_transition_impl, - inputs = ["//command_line_option:cpu"], - outputs = ["@config//:v8_target_cpu"], + inputs = ["@v8//bazel/config:v8_target_cpu", "//command_line_option:cpu"], + outputs = ["@v8//bazel/config:v8_target_cpu"], ) def _mksnapshot(ctx): @@ -453,7 +507,8 @@ def build_config_content(cpu, icu): ("v8_enable_webassembly", "false"), ("v8_control_flow_integrity", "false"), ("v8_enable_single_generation", "false"), - ("v8_enable_virtual_memory_cage", "false"), + ("v8_enable_sandbox", "false"), + ("v8_enable_shared_ro_heap", "false"), ("v8_target_cpu", cpu), ]) diff --git a/deps/v8/bazel/generate-inspector-files.cmd b/deps/v8/bazel/generate-inspector-files.cmd deleted file mode 100644 index 202dd81d7cf48f..00000000000000 --- a/deps/v8/bazel/generate-inspector-files.cmd +++ /dev/null @@ -1,24 +0,0 @@ -REM Copyright 2021 the V8 project authors. All rights reserved. -REM Use of this source code is governed by a BSD-style license that can be -REM found in the LICENSE file. - -set BAZEL_OUT=%1 - -REM Bazel nukes all env vars, and we need the following for gn to work -set DEPOT_TOOLS_WIN_TOOLCHAIN=0 -set ProgramFiles(x86)=C:\Program Files (x86) -set windir=C:\Windows - -REM Create a default GN output folder -cmd.exe /S /E:ON /V:ON /D /c gn gen out/inspector - -REM Generate inspector files -cmd.exe /S /E:ON /V:ON /D /c autoninja -C out/inspector gen/src/inspector/protocol/Forward.h - -REM Create directories in bazel output folder -MKDIR -p %BAZEL_OUT%\include\inspector -MKDIR -p %BAZEL_OUT%\src\inspector\protocol - -REM Copy generated files to bazel output folder -COPY out\inspector\gen\include\inspector\* %BAZEL_OUT%\include\inspector\ -COPY out\inspector\gen\src\inspector\protocol\* %BAZEL_OUT%\src\inspector\protocol\ \ No newline at end of file diff --git a/deps/v8/bazel/generate-inspector-files.sh b/deps/v8/bazel/generate-inspector-files.sh deleted file mode 100755 index 7fd4ab2a564a4e..00000000000000 --- a/deps/v8/bazel/generate-inspector-files.sh +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2021 the V8 project authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -BAZEL_OUT=$1 - -# Create a default GN output folder -gn gen out/inspector - -# Generate inspector files -autoninja -C out/inspector src/inspector:inspector - -# Create directories in bazel output folder -mkdir -p $BAZEL_OUT/include/inspector -mkdir -p $BAZEL_OUT/src/inspector/protocol - -# Copy generated files to bazel output folder -cp out/inspector/gen/include/inspector/* $BAZEL_OUT/include/inspector/ -cp out/inspector/gen/src/inspector/protocol/* $BAZEL_OUT/src/inspector/protocol/ diff --git a/deps/v8/bazel/requirements.in b/deps/v8/bazel/requirements.in new file mode 100644 index 00000000000000..7f7afbf3bf54b3 --- /dev/null +++ b/deps/v8/bazel/requirements.in @@ -0,0 +1 @@ +jinja2 diff --git a/deps/v8/bazel/requirements.txt b/deps/v8/bazel/requirements.txt new file mode 100644 index 00000000000000..a9c132f6883ec8 --- /dev/null +++ b/deps/v8/bazel/requirements.txt @@ -0,0 +1,81 @@ +# +# This file is autogenerated by pip-compile with python 3.9 +# To update, run: +# +# pip-compile --generate-hashes requirements.in +# +jinja2==3.0.3 \ + --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \ + --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7 + # via -r requirements.in +markupsafe==2.0.1 \ + --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ + --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \ + --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \ + --hash=sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194 \ + --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \ + --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ + --hash=sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724 \ + --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \ + --hash=sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646 \ + --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \ + --hash=sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6 \ + --hash=sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a \ + --hash=sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6 \ + --hash=sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad \ + --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \ + --hash=sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38 \ + --hash=sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac \ + --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \ + --hash=sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6 \ + --hash=sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047 \ + --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \ + --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \ + --hash=sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b \ + --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \ + --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \ + --hash=sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a \ + --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a \ + --hash=sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1 \ + --hash=sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9 \ + --hash=sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864 \ + --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \ + --hash=sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee \ + --hash=sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f \ + --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \ + --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \ + --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \ + --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \ + --hash=sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b \ + --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \ + --hash=sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86 \ + --hash=sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6 \ + --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \ + --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \ + --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \ + --hash=sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28 \ + --hash=sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e \ + --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \ + --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \ + --hash=sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f \ + --hash=sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d \ + --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \ + --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \ + --hash=sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145 \ + --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \ + --hash=sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c \ + --hash=sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1 \ + --hash=sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a \ + --hash=sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207 \ + --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \ + --hash=sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53 \ + --hash=sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd \ + --hash=sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134 \ + --hash=sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85 \ + --hash=sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9 \ + --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \ + --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \ + --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \ + --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ + --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872 + # via jinja2 diff --git a/deps/v8/bazel/v8-non-pointer-compression.bzl b/deps/v8/bazel/v8-non-pointer-compression.bzl index 4f1c6bc003372e..7bb23591ca380c 100644 --- a/deps/v8/bazel/v8-non-pointer-compression.bzl +++ b/deps/v8/bazel/v8-non-pointer-compression.bzl @@ -1,12 +1,12 @@ def _v8_disable_pointer_compression(settings, attr): return { - "//third_party/v8/HEAD:v8_enable_pointer_compression": "False", + "//:v8_enable_pointer_compression": "False", } v8_disable_pointer_compression = transition( implementation = _v8_disable_pointer_compression, inputs = [], - outputs = ["//third_party/v8/HEAD:v8_enable_pointer_compression"], + outputs = ["//:v8_enable_pointer_compression"], ) # The implementation of transition_rule: all this does is copy the @@ -51,7 +51,7 @@ v8_binary_non_pointer_compression = rule( # consequences for your build. The whitelist defaults to "everything". # But you can redefine it more strictly if you feel that's prudent. "_allowlist_function_transition": attr.label( - default = "//tools/allowlists/function_transition_allowlist", + default = "@bazel_tools//tools/allowlists/function_transition_allowlist", ), }, # Making this executable means it works with "$ bazel run". diff --git a/deps/v8/gni/OWNERS b/deps/v8/gni/OWNERS index cb04fa0838fbb5..e87e9c95a1cfc8 100644 --- a/deps/v8/gni/OWNERS +++ b/deps/v8/gni/OWNERS @@ -1 +1,5 @@ file:../INFRA_OWNERS + +per-file v8.cmx=victorgomes@chromium.org +per-file release_branch_toggle.gni=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com +per-file release_branch_toggle.gni=lutz@chromium.org \ No newline at end of file diff --git a/deps/v8/gni/release_branch_toggle.gni b/deps/v8/gni/release_branch_toggle.gni new file mode 100644 index 00000000000000..c502c8c62e5836 --- /dev/null +++ b/deps/v8/gni/release_branch_toggle.gni @@ -0,0 +1,7 @@ +# Copyright 2022 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +declare_args() { + is_on_release_branch = true +} \ No newline at end of file diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni index feabd079e009ca..39b196521c35ad 100644 --- a/deps/v8/gni/snapshot_toolchain.gni +++ b/deps/v8/gni/snapshot_toolchain.gni @@ -70,6 +70,10 @@ if (v8_snapshot_toolchain == "") { # therefore snapshots will need to be built using native mksnapshot # in combination with qemu v8_snapshot_toolchain = current_toolchain + } else if (host_cpu == "arm64" && current_cpu == "x64") { + # Cross-build from arm64 to intel (likely on an Apple Silicon mac). + v8_snapshot_toolchain = + "//build/toolchain/${host_os}:clang_arm64_v8_$v8_current_cpu" } else if (host_cpu == "x64") { # This is a cross-compile from an x64 host to either a non-Intel target # cpu or a different target OS. Clang will always be used by default on the diff --git a/deps/v8/gni/v8.cmx b/deps/v8/gni/v8.cmx index 8cd8b75fdfe37b..45fd74a09faffa 100644 --- a/deps/v8/gni/v8.cmx +++ b/deps/v8/gni/v8.cmx @@ -1,4 +1,11 @@ { + "facets": { + "fuchsia.test": { + "system-services": [ + "fuchsia.kernel.VmexResource" + ] + } + }, "sandbox": { "dev": [ "null", @@ -18,6 +25,7 @@ "fuchsia.device.NameProvider", "fuchsia.fonts.Provider", "fuchsia.intl.PropertyProvider", + "fuchsia.kernel.VmexResource", "fuchsia.logger.Log", "fuchsia.logger.LogSink", "fuchsia.media.Audio", diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index fe445307f92aa3..7b9da1f06a6539 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -5,6 +5,7 @@ import("//build/config/gclient_args.gni") import("//build/config/sanitizers/sanitizers.gni") import("//build/config/v8_target_cpu.gni") +import("release_branch_toggle.gni") import("split_static_library.gni") declare_args() { @@ -31,10 +32,6 @@ declare_args() { # Support for backtrace_symbols on linux. v8_enable_backtrace = "" - # This flag is deprecated and is now available through the inspector interface - # as an argument to profiler's method `takeHeapSnapshot`. - v8_enable_raw_heap_snapshots = false - # Use external files for startup data blobs: # the JS builtins sources and the start snapshot. v8_use_external_startup_data = "" @@ -73,7 +70,7 @@ declare_args() { v8_enable_webassembly = "" # Enable runtime call stats. - v8_enable_runtime_call_stats = true + v8_enable_runtime_call_stats = !is_on_release_branch # Add fuzzilli fuzzer support. v8_fuzzilli = false @@ -85,6 +82,12 @@ declare_args() { cppgc_is_standalone = false + # Enable object names in cppgc for debug purposes. + cppgc_enable_object_names = false + + # Enable young generation in cppgc. + cppgc_enable_young_generation = false + # Enable advanced BigInt algorithms, costing about 10-30 KB binary size # depending on platform. Disabled on Android to save binary size. v8_advanced_bigint_algorithms = !is_android diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS index 0222513df26cd0..535040c539a732 100644 --- a/deps/v8/include/OWNERS +++ b/deps/v8/include/OWNERS @@ -7,9 +7,12 @@ yangguo@chromium.org per-file *DEPS=file:../COMMON_OWNERS per-file v8-internal.h=file:../COMMON_OWNERS -per-file v8-inspector.h=file:../src/inspector/OWNERS -per-file v8-inspector-protocol.h=file:../src/inspector/OWNERS + +per-file v8-debug.h=file:../src/debug/OWNERS + per-file js_protocol.pdl=file:../src/inspector/OWNERS +per-file v8-inspector*=file:../src/inspector/OWNERS +per-file v8-inspector*=file:../src/inspector/OWNERS # Needed by the auto_tag builder per-file v8-version.h=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com diff --git a/deps/v8/include/cppgc/README.md b/deps/v8/include/cppgc/README.md index e454399853a3ca..a7d08f86b32a39 100644 --- a/deps/v8/include/cppgc/README.md +++ b/deps/v8/include/cppgc/README.md @@ -1,16 +1,133 @@ # Oilpan: C++ Garbage Collection Oilpan is an open-source garbage collection library for C++ that can be used stand-alone or in collaboration with V8's JavaScript garbage collector. +Oilpan implements mark-and-sweep garbage collection (GC) with limited compaction (for a subset of objects). **Key properties** + - Trace-based garbage collection; +- Incremental and concurrent marking; +- Incremental and concurrent sweeping; - Precise on-heap memory layout; - Conservative on-stack memory layout; - Allows for collection with and without considering stack; -- Incremental and concurrent marking; -- Incremental and concurrent sweeping; - Non-incremental and non-concurrent compaction for selected spaces; See the [Hello World](https://chromium.googlesource.com/v8/v8/+/main/samples/cppgc/hello-world.cc) example on how to get started using Oilpan to manage C++ code. Oilpan follows V8's project organization, see e.g. on how we accept [contributions](https://v8.dev/docs/contribute) and [provide a stable API](https://v8.dev/docs/api). + +## Threading model + +Oilpan features thread-local garbage collection and assumes heaps are not shared among threads. +In other words, objects are accessed and ultimately reclaimed by the garbage collector on the same thread that allocates them. +This allows Oilpan to run garbage collection in parallel with mutators running in other threads. + +References to objects belonging to another thread's heap are modeled using cross-thread roots. +This is even true for on-heap to on-heap references. + +## Heap partitioning + +Oilpan's heaps are partitioned into spaces. +The space for an object is chosen depending on a number of criteria, e.g.: + +- Objects over 64KiB are allocated in a large object space +- Objects can be assigned to a dedicated custom space. + Custom spaces can also be marked as compactable. +- Other objects are allocated in one of the normal page spaces bucketed depending on their size. + +## Precise and conservative garbage collection + +Oilpan supports two kinds of GCs: + +1. **Conservative GC.** +A GC is called conservative when it is executed while the regular native stack is not empty. +In this case, the native stack might contain references to objects in Oilpan's heap, which should be kept alive. +The GC scans the native stack and treats the pointers discovered via the native stack as part of the root set. +This kind of GC is considered imprecise because values on stack other than references may accidentally appear as references to on-heap object, which means these objects will be kept alive despite being in practice unreachable from the application as an actual reference. + +2. **Precise GC.** +A precise GC is triggered at the end of an event loop, which is controlled by an embedder via a platform. +At this point, it is guaranteed that there are no on-stack references pointing to Oilpan's heap. +This means there is no risk of confusing other value types with references. +Oilpan has precise knowledge of on-heap object layouts, and so it knows exactly where pointers lie in memory. +Oilpan can just start marking from the regular root set and collect all garbage precisely. + +## Atomic, incremental and concurrent garbage collection + +Oilpan has three modes of operation: + +1. **Atomic GC.** +The entire GC cycle, including all its phases (e.g. see [Marking](#Marking-phase) and [Sweeping](#Sweeping-phase)), are executed back to back in a single pause. +This mode of operation is also known as Stop-The-World (STW) garbage collection. +It results in the most jank (due to a single long pause), but is overall the most efficient (e.g. no need for write barriers). + +2. **Incremental GC.** +Garbage collection work is split up into multiple steps which are interleaved with the mutator, i.e. user code chunked into tasks. +Each step is a small chunk of work that is executed either as dedicated tasks between mutator tasks or, as needed, during mutator tasks. +Using incremental GC introduces the need for write barriers that record changes to the object graph so that a consistent state is observed and no objects are accidentally considered dead and reclaimed. +The incremental steps are followed by a smaller atomic pause to finalize garbage collection. +The smaller pause times, due to smaller chunks of work, helps with reducing jank. + +3. **Concurrent GC.** +This is the most common type of GC. +It builds on top of incremental GC and offloads much of the garbage collection work away from the mutator thread and on to background threads. +Using concurrent GC allows the mutator thread to spend less time on GC and more on the actual mutator. + +## Marking phase + +The marking phase consists of the following steps: + +1. Mark all objects in the root set. + +2. Mark all objects transitively reachable from the root set by calling `Trace()` methods defined on each object. + +3. Clear out all weak handles to unreachable objects and run weak callbacks. + +The marking phase can be executed atomically in a stop-the-world manner, in which all 3 steps are executed one after the other. + +Alternatively, it can also be executed incrementally/concurrently. +With incremental/concurrent marking, step 1 is executed in a short pause after which the mutator regains control. +Step 2 is repeatedly executed in an interleaved manner with the mutator. +When the GC is ready to finalize, i.e. step 2 is (almost) finished, another short pause is triggered in which step 2 is finished and step 3 is performed. + +To prevent a user-after-free (UAF) issues it is required for Oilpan to know about all edges in the object graph. +This means that all pointers except on-stack pointers must be wrapped with Oilpan's handles (i.e., Persistent<>, Member<>, WeakMember<>). +Raw pointers to on-heap objects create an edge that Oilpan cannot observe and cause UAF issues +Thus, raw pointers shall not be used to reference on-heap objects (except for raw pointers on native stacks). + +## Sweeping phase + +The sweeping phase consists of the following steps: + +1. Invoke pre-finalizers. +At this point, no destructors have been invoked and no memory has been reclaimed. +Pre-finalizers are allowed to access any other on-heap objects, even those that may get destructed. + +2. Sweeping invokes destructors of the dead (unreachable) objects and reclaims memory to be reused by future allocations. + +Assumptions should not be made about the order and the timing of their execution. +There is no guarantee on the order in which the destructors are invoked. +That's why destructors must not access any other on-heap objects (which might have already been destructed). +If some destructor unavoidably needs to access other on-heap objects, it will have to be converted to a pre-finalizer. +The pre-finalizer is allowed to access other on-heap objects. + +The mutator is resumed before all destructors have ran. +For example, imagine a case where X is a client of Y, and Y holds a list of clients. +If the code relies on X's destructor removing X from the list, there is a risk that Y iterates the list and calls some method of X which may touch other on-heap objects. +This causes a use-after-free. +Care must be taken to make sure that X is explicitly removed from the list before the mutator resumes its execution in a way that doesn't rely on X's destructor (e.g. a pre-finalizer). + +Similar to marking, sweeping can be executed in either an atomic stop-the-world manner or incrementally/concurrently. +With incremental/concurrent sweeping, step 2 is interleaved with mutator. +Incremental/concurrent sweeping can be atomically finalized in case it is needed to trigger another GC cycle. +Even with concurrent sweeping, destructors are guaranteed to run on the thread the object has been allocated on to preserve C++ semantics. + +Notes: + +* Weak processing runs only when the holder object of the WeakMember outlives the pointed object. +If the holder object and the pointed object die at the same time, weak processing doesn't run. +It is wrong to write code assuming that the weak processing always runs. + +* Pre-finalizers are heavy because the thread needs to scan all pre-finalizers at each sweeping phase to determine which pre-finalizers should be invoked (the thread needs to invoke pre-finalizers of dead objects). +Adding pre-finalizers to frequently created objects should be avoided. diff --git a/deps/v8/include/cppgc/default-platform.h b/deps/v8/include/cppgc/default-platform.h index 2ccdeddd8376f4..f9af756c39a0a5 100644 --- a/deps/v8/include/cppgc/default-platform.h +++ b/deps/v8/include/cppgc/default-platform.h @@ -6,7 +6,6 @@ #define INCLUDE_CPPGC_DEFAULT_PLATFORM_H_ #include -#include #include "cppgc/platform.h" #include "libplatform/libplatform.h" @@ -64,6 +63,8 @@ class V8_EXPORT DefaultPlatform : public Platform { return v8_platform_->GetTracingController(); } + v8::Platform* GetV8Platform() const { return v8_platform_.get(); } + protected: static constexpr v8::Isolate* kNoIsolate = nullptr; diff --git a/deps/v8/include/cppgc/explicit-management.h b/deps/v8/include/cppgc/explicit-management.h index cdb6af48586e02..0290328dccbab8 100644 --- a/deps/v8/include/cppgc/explicit-management.h +++ b/deps/v8/include/cppgc/explicit-management.h @@ -15,11 +15,27 @@ namespace cppgc { class HeapHandle; +namespace subtle { + +template +void FreeUnreferencedObject(HeapHandle& heap_handle, T& object); +template +bool Resize(T& object, AdditionalBytes additional_bytes); + +} // namespace subtle + namespace internal { -V8_EXPORT void FreeUnreferencedObject(HeapHandle&, void*); -V8_EXPORT bool Resize(void*, size_t); +class ExplicitManagementImpl final { + private: + V8_EXPORT static void FreeUnreferencedObject(HeapHandle&, void*); + V8_EXPORT static bool Resize(void*, size_t); + template + friend void subtle::FreeUnreferencedObject(HeapHandle&, T&); + template + friend bool subtle::Resize(T&, AdditionalBytes); +}; } // namespace internal namespace subtle { @@ -45,7 +61,8 @@ template void FreeUnreferencedObject(HeapHandle& heap_handle, T& object) { static_assert(IsGarbageCollectedTypeV, "Object must be of type GarbageCollected."); - internal::FreeUnreferencedObject(heap_handle, &object); + internal::ExplicitManagementImpl::FreeUnreferencedObject(heap_handle, + &object); } /** @@ -73,7 +90,8 @@ template bool Resize(T& object, AdditionalBytes additional_bytes) { static_assert(IsGarbageCollectedTypeV, "Object must be of type GarbageCollected."); - return internal::Resize(&object, sizeof(T) + additional_bytes.value); + return internal::ExplicitManagementImpl::Resize( + &object, sizeof(T) + additional_bytes.value); } } // namespace subtle diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h index 75d127ee9c6414..6737c8be49aca4 100644 --- a/deps/v8/include/cppgc/garbage-collected.h +++ b/deps/v8/include/cppgc/garbage-collected.h @@ -62,7 +62,8 @@ class GarbageCollected { // virtual destructor requires an unambiguous, accessible 'operator delete'. void operator delete(void*) { #ifdef V8_ENABLE_CHECKS - internal::Abort(); + internal::Fatal( + "Manually deleting a garbage collected object is not allowed"); #endif // V8_ENABLE_CHECKS } void operator delete[](void*) = delete; diff --git a/deps/v8/include/cppgc/heap-consistency.h b/deps/v8/include/cppgc/heap-consistency.h index 8e603d5d8af2de..54a4dbc21ed2eb 100644 --- a/deps/v8/include/cppgc/heap-consistency.h +++ b/deps/v8/include/cppgc/heap-consistency.h @@ -149,6 +149,19 @@ class HeapConsistency final { internal::WriteBarrier::GenerationalBarrier(params, slot); } + /** + * Generational barrier for source object that may contain outgoing pointers + * to objects in young generation. + * + * \param params The parameters retrieved from `GetWriteBarrierType()`. + * \param inner_pointer Pointer to the source object. + */ + static V8_INLINE void GenerationalBarrierForSourceObject( + const WriteBarrierParams& params, const void* inner_pointer) { + internal::WriteBarrier::GenerationalBarrierForSourceObject(params, + inner_pointer); + } + private: HeapConsistency() = delete; }; diff --git a/deps/v8/include/cppgc/heap.h b/deps/v8/include/cppgc/heap.h index 136c4fb44d08ab..aa3c6f468a21de 100644 --- a/deps/v8/include/cppgc/heap.h +++ b/deps/v8/include/cppgc/heap.h @@ -68,8 +68,8 @@ class V8_EXPORT Heap { */ kAtomic, /** - * Incremental marking, i.e. interleave marking is the rest of the - * application on the same thread. + * Incremental marking interleaves marking with the rest of the application + * workload on the same thread. */ kIncremental, /** @@ -86,6 +86,11 @@ class V8_EXPORT Heap { * Atomic stop-the-world sweeping. All of sweeping is performed at once. */ kAtomic, + /** + * Incremental sweeping interleaves sweeping with the rest of the + * application workload on the same thread. + */ + kIncremental, /** * Incremental and concurrent sweeping. Sweeping is split and interleaved * with the rest of the application. diff --git a/deps/v8/include/cppgc/internal/prefinalizer-handler.h b/deps/v8/include/cppgc/internal/prefinalizer-handler.h deleted file mode 100644 index 64b07ec9112689..00000000000000 --- a/deps/v8/include/cppgc/internal/prefinalizer-handler.h +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_ -#define INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_ - -#include "cppgc/heap.h" -#include "cppgc/liveness-broker.h" - -namespace cppgc { -namespace internal { - -class V8_EXPORT PreFinalizerRegistrationDispatcher final { - public: - using PreFinalizerCallback = bool (*)(const LivenessBroker&, void*); - struct PreFinalizer { - void* object; - PreFinalizerCallback callback; - - bool operator==(const PreFinalizer& other) const; - }; - - static void RegisterPrefinalizer(PreFinalizer pre_finalizer); -}; - -} // namespace internal -} // namespace cppgc - -#endif // INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_ diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h index 67f039c6584513..cdb7ec6f9e7067 100644 --- a/deps/v8/include/cppgc/internal/write-barrier.h +++ b/deps/v8/include/cppgc/internal/write-barrier.h @@ -70,10 +70,6 @@ class V8_EXPORT WriteBarrier final { // Returns the required write barrier for a given `value`. static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params); - template - static V8_INLINE Type GetWriteBarrierTypeForExternallyReferencedObject( - const void* value, Params& params, HeapHandleCallback callback); - static V8_INLINE void DijkstraMarkingBarrier(const Params& params, const void* object); static V8_INLINE void DijkstraMarkingBarrierRange( @@ -84,9 +80,13 @@ class V8_EXPORT WriteBarrier final { #if defined(CPPGC_YOUNG_GENERATION) static V8_INLINE void GenerationalBarrier(const Params& params, const void* slot); -#else // !CPPGC_YOUNG_GENERATION + static V8_INLINE void GenerationalBarrierForSourceObject( + const Params& params, const void* inner_pointer); +#else // !CPPGC_YOUNG_GENERATION static V8_INLINE void GenerationalBarrier(const Params& params, const void* slot) {} + static V8_INLINE void GenerationalBarrierForSourceObject( + const Params& params, const void* inner_pointer) {} #endif // CPPGC_YOUNG_GENERATION #if V8_ENABLE_CHECKS @@ -124,8 +124,10 @@ class V8_EXPORT WriteBarrier final { #if defined(CPPGC_YOUNG_GENERATION) static CagedHeapLocalData& GetLocalData(HeapHandle&); static void GenerationalBarrierSlow(const CagedHeapLocalData& local_data, - const AgeTable& ageTable, + const AgeTable& age_table, const void* slot, uintptr_t value_offset); + static void GenerationalBarrierForSourceObjectSlow( + const CagedHeapLocalData& local_data, const void* object); #endif // CPPGC_YOUNG_GENERATION static AtomicEntryFlag incremental_or_concurrent_marking_flag_; @@ -157,13 +159,6 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final { return GetNoSlot(value, params, callback); } - template - static V8_INLINE WriteBarrier::Type GetForExternallyReferenced( - const void* value, WriteBarrier::Params& params, - HeapHandleCallback callback) { - return GetNoSlot(value, params, callback); - } - private: WriteBarrierTypeForCagedHeapPolicy() = delete; @@ -292,15 +287,6 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final { callback); } - template - static V8_INLINE WriteBarrier::Type GetForExternallyReferenced( - const void* value, WriteBarrier::Params& params, - HeapHandleCallback callback) { - // The slot will never be used in `Get()` below. - return Get(nullptr, value, params, - callback); - } - private: template struct ValueModeDispatch; @@ -375,15 +361,6 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType( []() {}); } -// static -template -WriteBarrier::Type -WriteBarrier::GetWriteBarrierTypeForExternallyReferencedObject( - const void* value, Params& params, HeapHandleCallback callback) { - return WriteBarrierTypePolicy::GetForExternallyReferenced(value, params, - callback); -} - // static void WriteBarrier::DijkstraMarkingBarrier(const Params& params, const void* object) { @@ -433,6 +410,21 @@ void WriteBarrier::GenerationalBarrier(const Params& params, const void* slot) { GenerationalBarrierSlow(local_data, age_table, slot, params.value_offset); } +// static +void WriteBarrier::GenerationalBarrierForSourceObject( + const Params& params, const void* inner_pointer) { + CheckParams(Type::kGenerational, params); + + const CagedHeapLocalData& local_data = params.caged_heap(); + const AgeTable& age_table = local_data.age_table; + + // Assume that if the first element is in young generation, the whole range is + // in young generation. + if (V8_LIKELY(age_table[params.slot_offset] == AgeTable::Age::kYoung)) return; + + GenerationalBarrierForSourceObjectSlow(local_data, inner_pointer); +} + #endif // !CPPGC_YOUNG_GENERATION } // namespace internal diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h index 38105b8e4323b9..66a8cfd80294c3 100644 --- a/deps/v8/include/cppgc/member.h +++ b/deps/v8/include/cppgc/member.h @@ -26,7 +26,7 @@ class MemberBase { protected: struct AtomicInitializerTag {}; - MemberBase() = default; + MemberBase() : raw_(nullptr) {} explicit MemberBase(const void* value) : raw_(value) {} MemberBase(const void* value, AtomicInitializerTag) { SetRawAtomic(value); } @@ -46,7 +46,10 @@ class MemberBase { void ClearFromGC() const { raw_ = nullptr; } private: - mutable const void* raw_ = nullptr; + // All constructors initialize `raw_`. Do not add a default value here as it + // results in a non-atomic write on some builds, even when the atomic version + // of the constructor is used. + mutable const void* raw_; }; // The basic class from which all Member classes are 'generated'. diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h index 182fb08549a86d..244f94c81958b8 100644 --- a/deps/v8/include/cppgc/persistent.h +++ b/deps/v8/include/cppgc/persistent.h @@ -118,10 +118,10 @@ class BasicPersistent final : public PersistentBase, template ::value>> - BasicPersistent(internal::BasicMember - member, - const SourceLocation& loc = SourceLocation::Current()) + BasicPersistent( + const internal::BasicMember& member, + const SourceLocation& loc = SourceLocation::Current()) : BasicPersistent(member.Get(), loc) {} ~BasicPersistent() { Clear(); } @@ -159,9 +159,8 @@ class BasicPersistent final : public PersistentBase, typename MemberWeaknessTag, typename MemberCheckingPolicy, typename = std::enable_if_t::value>> BasicPersistent& operator=( - internal::BasicMember - member) { + const internal::BasicMember& member) { return operator=(member.Get()); } @@ -292,12 +291,12 @@ template -bool operator==(const BasicPersistent& p, - BasicMember - m) { +bool operator==( + const BasicPersistent& + p, + const BasicMember& m) { return p.Get() == m.Get(); } @@ -305,12 +304,12 @@ template -bool operator!=(const BasicPersistent& p, - BasicMember - m) { +bool operator!=( + const BasicPersistent& + p, + const BasicMember& m) { return !(p == m); } @@ -318,12 +317,12 @@ template -bool operator==(BasicMember - m, - const BasicPersistent& p) { +bool operator==( + const BasicMember& m, + const BasicPersistent& + p) { return m.Get() == p.Get(); } @@ -331,12 +330,12 @@ template -bool operator!=(BasicMember - m, - const BasicPersistent& p) { +bool operator!=( + const BasicMember& m, + const BasicPersistent& + p) { return !(m == p); } diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h index 3276a26b6520b6..5d5f8796adf42b 100644 --- a/deps/v8/include/cppgc/platform.h +++ b/deps/v8/include/cppgc/platform.h @@ -7,6 +7,7 @@ #include +#include "cppgc/source-location.h" #include "v8-platform.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) @@ -145,7 +146,8 @@ V8_EXPORT void ShutdownProcess(); namespace internal { -V8_EXPORT void Abort(); +V8_EXPORT void Fatal(const std::string& reason = std::string(), + const SourceLocation& = SourceLocation::Current()); } // namespace internal diff --git a/deps/v8/include/cppgc/prefinalizer.h b/deps/v8/include/cppgc/prefinalizer.h index 6153b37ff5d2cb..51f2eac8ed4661 100644 --- a/deps/v8/include/cppgc/prefinalizer.h +++ b/deps/v8/include/cppgc/prefinalizer.h @@ -6,23 +6,17 @@ #define INCLUDE_CPPGC_PREFINALIZER_H_ #include "cppgc/internal/compiler-specific.h" -#include "cppgc/internal/prefinalizer-handler.h" #include "cppgc/liveness-broker.h" namespace cppgc { namespace internal { -template -class PrefinalizerRegistration final { +class V8_EXPORT PrefinalizerRegistration final { public: - explicit PrefinalizerRegistration(T* self) { - static_assert(sizeof(&T::InvokePreFinalizer) > 0, - "USING_PRE_FINALIZER(T) must be defined."); + using Callback = bool (*)(const cppgc::LivenessBroker&, void*); - cppgc::internal::PreFinalizerRegistrationDispatcher::RegisterPrefinalizer( - {self, T::InvokePreFinalizer}); - } + PrefinalizerRegistration(void*, Callback); void* operator new(size_t, void* location) = delete; void* operator new(size_t) = delete; @@ -30,6 +24,35 @@ class PrefinalizerRegistration final { } // namespace internal +/** + * Macro must be used in the private section of `Class` and registers a + * prefinalization callback `void Class::PreFinalizer()`. The callback is + * invoked on garbage collection after the collector has found an object to be + * dead. + * + * Callback properties: + * - The callback is invoked before a possible destructor for the corresponding + * object. + * - The callback may access the whole object graph, irrespective of whether + * objects are considered dead or alive. + * - The callback is invoked on the same thread as the object was created on. + * + * Example: + * \code + * class WithPrefinalizer : public GarbageCollected { + * CPPGC_USING_PRE_FINALIZER(WithPrefinalizer, Dispose); + * + * public: + * void Trace(Visitor*) const {} + * void Dispose() { prefinalizer_called = true; } + * ~WithPrefinalizer() { + * // prefinalizer_called == true + * } + * private: + * bool prefinalizer_called = false; + * }; + * \endcode + */ #define CPPGC_USING_PRE_FINALIZER(Class, PreFinalizer) \ public: \ static bool InvokePreFinalizer(const cppgc::LivenessBroker& liveness_broker, \ @@ -43,8 +66,8 @@ class PrefinalizerRegistration final { } \ \ private: \ - CPPGC_NO_UNIQUE_ADDRESS cppgc::internal::PrefinalizerRegistration \ - prefinalizer_dummy_{this}; \ + CPPGC_NO_UNIQUE_ADDRESS cppgc::internal::PrefinalizerRegistration \ + prefinalizer_dummy_{this, Class::InvokePreFinalizer}; \ static_assert(true, "Force semicolon.") } // namespace cppgc diff --git a/deps/v8/include/cppgc/testing.h b/deps/v8/include/cppgc/testing.h index 229ce140f94277..bddd1fc163305b 100644 --- a/deps/v8/include/cppgc/testing.h +++ b/deps/v8/include/cppgc/testing.h @@ -19,8 +19,13 @@ class HeapHandle; namespace testing { /** - * Overrides the state of the stack with the provided value. Takes precedence - * over other parameters that set the stack state. Must no be nested. + * Overrides the state of the stack with the provided value. Parameters passed + * to explicit garbage collection calls still take precedence. Must not be + * nested. + * + * This scope is useful to make the garbage collector consider the stack when + * tasks that invoke garbage collection (through the provided platform) contain + * interesting pointers on its stack. */ class V8_EXPORT V8_NODISCARD OverrideEmbedderStackStateScope final { CPPGC_STACK_ALLOCATED(); @@ -93,6 +98,8 @@ class V8_EXPORT StandaloneTestingHeap final { HeapHandle& heap_handle_; }; +V8_EXPORT bool IsHeapObjectOld(void*); + } // namespace testing } // namespace cppgc diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl index b34c8551ad64bf..09c420e3a63610 100644 --- a/deps/v8/include/js_protocol.pdl +++ b/deps/v8/include/js_protocol.pdl @@ -104,7 +104,9 @@ domain Debugger # Location in the source code. Location location # JavaScript script name or url. - string url + # Deprecated in favor of using the `location.scriptId` to resolve the URL via a previously + # sent `Debugger.scriptParsed` event. + deprecated string url # Scope chain for this call frame. array of Scope scopeChain # `this` object for this call frame. @@ -1550,6 +1552,18 @@ domain Runtime parameters string name + # This method tries to lookup and populate exception details for a + # JavaScript Error object. + # Note that the stackTrace portion of the resulting exceptionDetails will + # only be populated if the Runtime domain was enabled at the time when the + # Error was thrown. + experimental command getExceptionDetails + parameters + # The error object for which to resolve the exception details. + RemoteObjectId errorObjectId + returns + optional ExceptionDetails exceptionDetails + # Notification is issued every time when binding is called. experimental event bindingCalled parameters diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h index fb79bcfe40784c..9ec60c04f9cce6 100644 --- a/deps/v8/include/libplatform/libplatform.h +++ b/deps/v8/include/libplatform/libplatform.h @@ -89,17 +89,6 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate, double idle_time_in_seconds); -/** - * Attempts to set the tracing controller for the given platform. - * - * The |platform| has to be created using |NewDefaultPlatform|. - * - */ -V8_DEPRECATED("Access the DefaultPlatform directly") -V8_PLATFORM_EXPORT void SetTracingController( - v8::Platform* platform, - v8::platform::tracing::TracingController* tracing_controller); - /** * Notifies the given platform about the Isolate getting deleted soon. Has to be * called for all Isolates which are deleted - unless we're shutting down the diff --git a/deps/v8/include/v8-array-buffer.h b/deps/v8/include/v8-array-buffer.h index 0ce2b653684ea4..e9047b79ce3b1d 100644 --- a/deps/v8/include/v8-array-buffer.h +++ b/deps/v8/include/v8-array-buffer.h @@ -175,8 +175,8 @@ class V8_EXPORT ArrayBuffer : public Object { /** * Convenience allocator. * - * When the virtual memory cage is enabled, this allocator will allocate its - * backing memory inside the cage. Otherwise, it will rely on malloc/free. + * When the sandbox is enabled, this allocator will allocate its backing + * memory inside the sandbox. Otherwise, it will rely on malloc/free. * * Caller takes ownership, i.e. the returned object needs to be freed using * |delete allocator| once it is no longer in use. diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h index b70d59dbeca2a6..70b9c2ae9308a5 100644 --- a/deps/v8/include/v8-callbacks.h +++ b/deps/v8/include/v8-callbacks.h @@ -367,6 +367,20 @@ using HostInitializeImportMetaObjectCallback = void (*)(Local context, Local module, Local meta); +/** + * HostCreateShadowRealmContextCallback is called each time a ShadowRealm is + * being constructed in the initiator_context. + * + * The method combines Context creation and implementation defined abstract + * operation HostInitializeShadowRealm into one. + * + * The embedder should use v8::Context::New or v8::Context:NewFromSnapshot to + * create a new context. If the creation fails, the embedder must propagate + * that exception by returning an empty MaybeLocal. + */ +using HostCreateShadowRealmContextCallback = + MaybeLocal (*)(Local initiator_context); + /** * PrepareStackTraceCallback is called when the stack property of an error is * first accessed. The return value will be used as the stack value. If this diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h index d398ac4b21baad..72dfbaad74d1f9 100644 --- a/deps/v8/include/v8-context.h +++ b/deps/v8/include/v8-context.h @@ -313,17 +313,6 @@ class V8_EXPORT Context : public Data { explicit BackupIncumbentScope(Local backup_incumbent_context); ~BackupIncumbentScope(); - /** - * Returns address that is comparable with JS stack address. Note that JS - * stack may be allocated separately from the native stack. See also - * |TryCatch::JSStackComparableAddressPrivate| for details. - */ - V8_DEPRECATED( - "This is private V8 information that should not be exposed in the API.") - uintptr_t JSStackComparableAddress() const { - return JSStackComparableAddressPrivate(); - } - private: friend class internal::Isolate; @@ -379,7 +368,7 @@ Local Context::GetEmbedderData(int index) { } void* Context::GetAlignedPointerFromEmbedderData(int index) { -#ifndef V8_ENABLE_CHECKS +#if !defined(V8_ENABLE_CHECKS) using A = internal::Address; using I = internal::Internals; A ctx = *reinterpret_cast(this); @@ -387,10 +376,10 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) { I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset); int value_offset = I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index); -#ifdef V8_HEAP_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS value_offset += I::kEmbedderDataSlotRawPayloadOffset; #endif - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx); + internal::Isolate* isolate = I::GetIsolateForSandbox(ctx); return reinterpret_cast( I::ReadExternalPointerField(isolate, embedder_data, value_offset, internal::kEmbedderDataSlotPayloadTag)); diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h index 8ec826a59552b4..201773f59ddd3c 100644 --- a/deps/v8/include/v8-cppgc.h +++ b/deps/v8/include/v8-cppgc.h @@ -12,7 +12,6 @@ #include "cppgc/common.h" #include "cppgc/custom-space.h" #include "cppgc/heap-statistics.h" -#include "cppgc/internal/write-barrier.h" #include "cppgc/visitor.h" #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-platform.h" // NOLINT(build/include_directory) @@ -78,9 +77,6 @@ struct WrapperDescriptor final { }; struct V8_EXPORT CppHeapCreateParams { - CppHeapCreateParams(const CppHeapCreateParams&) = delete; - CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete; - std::vector> custom_spaces; WrapperDescriptor wrapper_descriptor; }; @@ -148,6 +144,14 @@ class V8_EXPORT CppHeap { */ void CollectGarbageForTesting(cppgc::EmbedderStackState stack_state); + /** + * Performs a stop-the-world minor garbage collection for testing purposes. + * + * \param stack_state The stack state to assume for the garbage collection. + */ + void CollectGarbageInYoungGenerationForTesting( + cppgc::EmbedderStackState stack_state); + private: CppHeap() = default; @@ -169,140 +173,6 @@ class JSVisitor : public cppgc::Visitor { virtual void Visit(const TracedReferenceBase& ref) {} }; -/** - * **DO NOT USE: Use the appropriate managed types.** - * - * Consistency helpers that aid in maintaining a consistent internal state of - * the garbage collector. - */ -class V8_EXPORT JSHeapConsistency final { - public: - using WriteBarrierParams = cppgc::internal::WriteBarrier::Params; - using WriteBarrierType = cppgc::internal::WriteBarrier::Type; - - /** - * Gets the required write barrier type for a specific write. - * - * Note: Handling for C++ to JS references. - * - * \param ref The reference being written to. - * \param params Parameters that may be used for actual write barrier calls. - * Only filled if return value indicates that a write barrier is needed. The - * contents of the `params` are an implementation detail. - * \param callback Callback returning the corresponding heap handle. The - * callback is only invoked if the heap cannot otherwise be figured out. The - * callback must not allocate. - * \returns whether a write barrier is needed and which barrier to invoke. - */ - template - V8_DEPRECATED("Write barriers automatically emitted by TracedReference.") - static V8_INLINE WriteBarrierType - GetWriteBarrierType(const TracedReferenceBase& ref, - WriteBarrierParams& params, - HeapHandleCallback callback) { - if (ref.IsEmpty()) return WriteBarrierType::kNone; - - if (V8_LIKELY(!cppgc::internal::WriteBarrier:: - IsAnyIncrementalOrConcurrentMarking())) { - return cppgc::internal::WriteBarrier::Type::kNone; - } - cppgc::HeapHandle& handle = callback(); - if (!cppgc::subtle::HeapState::IsMarking(handle)) { - return cppgc::internal::WriteBarrier::Type::kNone; - } - params.heap = &handle; -#if V8_ENABLE_CHECKS - params.type = cppgc::internal::WriteBarrier::Type::kMarking; -#endif // !V8_ENABLE_CHECKS - return cppgc::internal::WriteBarrier::Type::kMarking; - } - - /** - * Gets the required write barrier type for a specific write. - * - * Note: Handling for JS to C++ references. - * - * \param wrapper The wrapper that has been written into. - * \param wrapper_index The wrapper index in `wrapper` that has been written - * into. - * \param wrappable The value that was written. - * \param params Parameters that may be used for actual write barrier calls. - * Only filled if return value indicates that a write barrier is needed. The - * contents of the `params` are an implementation detail. - * \param callback Callback returning the corresponding heap handle. The - * callback is only invoked if the heap cannot otherwise be figured out. The - * callback must not allocate. - * \returns whether a write barrier is needed and which barrier to invoke. - */ - template - V8_DEPRECATE_SOON( - "Write barriers automatically emitted when using " - "`SetAlignedPointerInInternalFields()`.") - static V8_INLINE WriteBarrierType - GetWriteBarrierType(v8::Local& wrapper, int wrapper_index, - const void* wrappable, WriteBarrierParams& params, - HeapHandleCallback callback) { -#if V8_ENABLE_CHECKS - CheckWrapper(wrapper, wrapper_index, wrappable); -#endif // V8_ENABLE_CHECKS - return cppgc::internal::WriteBarrier:: - GetWriteBarrierTypeForExternallyReferencedObject(wrappable, params, - callback); - } - - /** - * Conservative Dijkstra-style write barrier that processes an object if it - * has not yet been processed. - * - * \param params The parameters retrieved from `GetWriteBarrierType()`. - * \param ref The reference being written to. - */ - V8_DEPRECATED("Write barriers automatically emitted by TracedReference.") - static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params, - cppgc::HeapHandle& heap_handle, - const TracedReferenceBase& ref) { - cppgc::internal::WriteBarrier::CheckParams(WriteBarrierType::kMarking, - params); - DijkstraMarkingBarrierSlow(heap_handle, ref); - } - - /** - * Conservative Dijkstra-style write barrier that processes an object if it - * has not yet been processed. - * - * \param params The parameters retrieved from `GetWriteBarrierType()`. - * \param object The pointer to the object. May be an interior pointer to a - * an interface of the actual object. - */ - V8_DEPRECATE_SOON( - "Write barriers automatically emitted when using " - "`SetAlignedPointerInInternalFields()`.") - static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params, - cppgc::HeapHandle& heap_handle, - const void* object) { - cppgc::internal::WriteBarrier::DijkstraMarkingBarrier(params, object); - } - - /** - * Generational barrier for maintaining consistency when running with multiple - * generations. - * - * \param params The parameters retrieved from `GetWriteBarrierType()`. - * \param ref The reference being written to. - */ - V8_DEPRECATED("Write barriers automatically emitted by TracedReference.") - static V8_INLINE void GenerationalBarrier(const WriteBarrierParams& params, - const TracedReferenceBase& ref) {} - - private: - JSHeapConsistency() = delete; - - static void CheckWrapper(v8::Local&, int, const void*); - - static void DijkstraMarkingBarrierSlow(cppgc::HeapHandle&, - const TracedReferenceBase& ref); -}; - /** * Provided as input to `CppHeap::CollectCustomSpaceStatisticsAtLastGC()`. * diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h index a13ae3f6d6c8c7..52255f3700cb73 100644 --- a/deps/v8/include/v8-debug.h +++ b/deps/v8/include/v8-debug.h @@ -7,8 +7,8 @@ #include -#include "v8-local-handle.h" // NOLINT(build/include_directory) -#include "v8config.h" // NOLINT(build/include_directory) +#include "v8-script.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -20,13 +20,18 @@ class String; */ class V8_EXPORT StackFrame { public: + /** + * Returns the source location, 0-based, for the associated function call. + */ + Location GetLocation() const; + /** * Returns the number, 1-based, of the line for the associate function call. * This method will return Message::kNoLineNumberInfo if it is unable to * retrieve the line number, or if kLineNumber was not passed as an option * when capturing the StackTrace. */ - int GetLineNumber() const; + int GetLineNumber() const { return GetLocation().GetLineNumber() + 1; } /** * Returns the 1-based column offset on the line for the associated function @@ -35,7 +40,7 @@ class V8_EXPORT StackFrame { * the column number, or if kColumnOffset was not passed as an option when * capturing the StackTrace. */ - int GetColumn() const; + int GetColumn() const { return GetLocation().GetColumnNumber() + 1; } /** * Returns the id of the script for the function for this StackFrame. @@ -144,6 +149,18 @@ class V8_EXPORT StackTrace { */ static Local CurrentStackTrace( Isolate* isolate, int frame_limit, StackTraceOptions options = kDetailed); + + /** + * Returns the first valid script name or source URL starting at the top of + * the JS stack. The returned string is either an empty handle if no script + * name/url was found or a non-zero-length string. + * + * This method is equivalent to calling StackTrace::CurrentStackTrace and + * walking the resulting frames from the beginning until a non-empty script + * name/url is found. The difference is that this method won't allocate + * a stack trace. + */ + static Local CurrentScriptNameOrSourceURL(Isolate* isolate); }; } // namespace v8 diff --git a/deps/v8/include/v8-embedder-heap.h b/deps/v8/include/v8-embedder-heap.h index 81390f1a7c5c3c..43f96d7f0a942d 100644 --- a/deps/v8/include/v8-embedder-heap.h +++ b/deps/v8/include/v8-embedder-heap.h @@ -51,7 +51,11 @@ class V8_EXPORT EmbedderRootsHandler { * being treated as roots. */ virtual bool IsRoot(const v8::TracedReference& handle) = 0; - virtual bool IsRoot(const v8::TracedGlobal& handle) = 0; + + V8_DEPRECATED("See v8::TracedGlobal class comment.") + virtual bool IsRoot(const v8::TracedGlobal& handle) { + return true; + } /** * Used in combination with |IsRoot|. Called by V8 when an @@ -88,6 +92,7 @@ class V8_EXPORT EmbedderHeapTracer { class V8_EXPORT TracedGlobalHandleVisitor { public: virtual ~TracedGlobalHandleVisitor() = default; + V8_DEPRECATED("See v8::TracedGlobal class comment.") virtual void VisitTracedGlobalHandle(const TracedGlobal& handle) {} virtual void VisitTracedReference(const TracedReference& handle) {} }; @@ -124,14 +129,6 @@ class V8_EXPORT EmbedderHeapTracer { */ void SetStackStart(void* stack_start); - /** - * Called by the embedder to notify V8 of an empty execution stack. - */ - V8_DEPRECATED( - "This call only optimized internal caches which V8 is able to figure out " - "on its own now.") - void NotifyEmptyEmbedderStack(); - /** * Called by v8 to register internal fields of found wrappers. * @@ -197,6 +194,7 @@ class V8_EXPORT EmbedderHeapTracer { */ virtual bool IsRootForNonTracingGC( const v8::TracedReference& handle); + V8_DEPRECATED("See v8::TracedGlobal class comment.") virtual bool IsRootForNonTracingGC(const v8::TracedGlobal& handle); /** @@ -205,14 +203,6 @@ class V8_EXPORT EmbedderHeapTracer { virtual void ResetHandleInNonTracingGC( const v8::TracedReference& handle); - /* - * Called by the embedder to immediately perform a full garbage collection. - * - * Should only be used in testing code. - */ - V8_DEPRECATE_SOON("Use Isolate::RequestGarbageCollectionForTesting instead") - void GarbageCollectionForTesting(EmbedderStackState stack_state); - /* * Called by the embedder to signal newly allocated or freed memory. Not bound * to tracing phases. Embedders should trade off when increments are reported diff --git a/deps/v8/include/v8-embedder-state-scope.h b/deps/v8/include/v8-embedder-state-scope.h index 37e1f2bacb71f8..d8a3b08d5caeae 100644 --- a/deps/v8/include/v8-embedder-state-scope.h +++ b/deps/v8/include/v8-embedder-state-scope.h @@ -19,9 +19,10 @@ class EmbedderState; // A StateTag represents a possible state of the embedder. enum class EmbedderStateTag : uint8_t { + // reserved EMPTY = 0, - // embedder can define any state in between - OTHER = UINT8_MAX, + OTHER = 1, + // embedder can define any state after }; // A stack-allocated class that manages an embedder state on the isolate. diff --git a/deps/v8/include/v8-exception.h b/deps/v8/include/v8-exception.h index faa46487f8fb31..64126c420a6928 100644 --- a/deps/v8/include/v8-exception.h +++ b/deps/v8/include/v8-exception.h @@ -169,13 +169,6 @@ class V8_EXPORT TryCatch { */ void SetCaptureMessage(bool value); - V8_DEPRECATED( - "This is private information that should not be exposed by the API") - static void* JSStackComparableAddress(TryCatch* handler) { - if (handler == nullptr) return nullptr; - return reinterpret_cast(handler->JSStackComparableAddressPrivate()); - } - TryCatch(const TryCatch&) = delete; void operator=(const TryCatch&) = delete; diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index 0c0c1cbd5a5c5a..3403de93eca852 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -532,10 +532,6 @@ class V8_EXPORT CFunction { }; }; -struct ApiObject { - uintptr_t address; -}; - /** * A struct which may be passed to a fast call callback, like so: * \code @@ -660,7 +656,6 @@ struct CTypeInfoTraits {}; V(void, kVoid) \ V(v8::Local, kV8Value) \ V(v8::Local, kV8Value) \ - V(ApiObject, kApiObject) \ V(AnyCType, kAny) // ApiObject was a temporary solution to wrap the pointer to the v8::Value. diff --git a/deps/v8/include/v8-initialization.h b/deps/v8/include/v8-initialization.h index 7a2ae9316a8b0a..99022cec453784 100644 --- a/deps/v8/include/v8-initialization.h +++ b/deps/v8/include/v8-initialization.h @@ -99,8 +99,10 @@ class V8_EXPORT V8 { const int kBuildConfiguration = (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) | (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) | - (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0) | - (internal::VirtualMemoryCageIsEnabled() ? kVirtualMemoryCage : 0); + (internal::SandboxedExternalPointersAreEnabled() + ? kSandboxedExternalPointers + : 0) | + (internal::SandboxIsEnabled() ? kSandbox : 0); return Initialize(kBuildConfiguration); } @@ -181,64 +183,74 @@ class V8_EXPORT V8 { * V8 was disposed. */ static void DisposePlatform(); - V8_DEPRECATE_SOON("Use DisposePlatform()") + V8_DEPRECATED("Use DisposePlatform()") static void ShutdownPlatform() { DisposePlatform(); } -#ifdef V8_VIRTUAL_MEMORY_CAGE +#ifdef V8_SANDBOX // - // Virtual Memory Cage related API. + // Sandbox related API. // // This API is not yet stable and subject to changes in the future. // /** - * Initializes the virtual memory cage for V8. + * Initializes the V8 sandbox. * * This must be invoked after the platform was initialized but before V8 is - * initialized. The virtual memory cage is torn down during platform shutdown. + * initialized. The sandbox is torn down during platform shutdown. * Returns true on success, false otherwise. * - * TODO(saelo) Once it is no longer optional to create the virtual memory - * cage when compiling with V8_VIRTUAL_MEMORY_CAGE, the cage initialization - * will likely happen as part of V8::Initialize, at which point this function - * should be removed. + * TODO(saelo) Once it is no longer optional to initialize the sandbox when + * compiling with V8_SANDBOX, the sandbox initialization will likely happen + * as part of V8::Initialize, at which point this function should be removed. */ - static bool InitializeVirtualMemoryCage(); + static bool InitializeSandbox(); + V8_DEPRECATE_SOON("Use InitializeSandbox()") + static bool InitializeVirtualMemoryCage() { return InitializeSandbox(); } /** - * Provides access to the virtual memory cage page allocator. + * Provides access to the virtual address subspace backing the sandbox. * - * This allocator allocates pages inside the virtual memory cage. It can for - * example be used to obtain virtual memory for ArrayBuffer backing stores, - * which must be located inside the cage. + * This can be used to allocate pages inside the sandbox, for example to + * obtain virtual memory for ArrayBuffer backing stores, which must be + * located inside the sandbox. * - * It should be assumed that an attacker can corrupt data inside the cage, - * and so in particular the contents of pages returned by this allocator, - * arbitrarily and concurrently. Due to this, it is recommended to to only - * place pure data buffers in pages obtained through this allocator. + * It should be assumed that an attacker can corrupt data inside the sandbox, + * and so in particular the contents of pages allocagted in this virtual + * address space, arbitrarily and concurrently. Due to this, it is + * recommended to to only place pure data buffers in them. * - * This function must only be called after initializing the virtual memory - * cage and V8. + * This function must only be called after initializing the sandbox. */ + static VirtualAddressSpace* GetSandboxAddressSpace(); + V8_DEPRECATE_SOON("Use GetSandboxAddressSpace()") static PageAllocator* GetVirtualMemoryCagePageAllocator(); /** - * Returns the size of the virtual memory cage in bytes. + * Returns the size of the sandbox in bytes. * - * If the cage has not been initialized, or if the initialization failed, + * If the sandbox has not been initialized, or if the initialization failed, * this returns zero. */ - static size_t GetVirtualMemoryCageSizeInBytes(); + static size_t GetSandboxSizeInBytes(); + V8_DEPRECATE_SOON("Use GetSandboxSizeInBytes()") + static size_t GetVirtualMemoryCageSizeInBytes() { + return GetSandboxSizeInBytes(); + } /** - * Returns whether the virtual memory cage is configured securely. + * Returns whether the sandbox is configured securely. * - * If V8 cannot create a proper virtual memory cage, it will fall back to - * creating a cage that doesn't have the desired security properties but at - * least still allows V8 to function. This API can be used to determine if - * such an insecure cage is being used, in which case it will return false. + * If V8 cannot create a proper sandbox, it will fall back to creating a + * sandbox that doesn't have the desired security properties but at least + * still allows V8 to function. This API can be used to determine if such an + * insecure sandbox is being used, in which case it will return false. */ - static bool IsUsingSecureVirtualMemoryCage(); + static bool IsSandboxConfiguredSecurely(); + V8_DEPRECATE_SOON("Use IsSandboxConfiguredSecurely()") + static bool IsUsingSecureVirtualMemoryCage() { + return IsSandboxConfiguredSecurely(); + } #endif /** @@ -274,8 +286,8 @@ class V8_EXPORT V8 { enum BuildConfigurationFeatures { kPointerCompression = 1 << 0, k31BitSmis = 1 << 1, - kHeapSandbox = 1 << 2, - kVirtualMemoryCage = 1 << 3, + kSandboxedExternalPointers = 1 << 2, + kSandbox = 1 << 3, }; /** diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index 2a258d505ac3bc..edd968c766d82d 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -23,6 +23,10 @@ class Value; namespace v8_inspector { +namespace internal { +class V8DebuggerId; +} // namespace internal + namespace protocol { namespace Debugger { namespace API { @@ -106,6 +110,30 @@ class V8_EXPORT V8ContextInfo { V8ContextInfo& operator=(const V8ContextInfo&) = delete; }; +// This debugger id tries to be unique by generating two random +// numbers, which should most likely avoid collisions. +// Debugger id has a 1:1 mapping to context group. It is used to +// attribute stack traces to a particular debugging, when doing any +// cross-debugger operations (e.g. async step in). +// See also Runtime.UniqueDebuggerId in the protocol. +class V8_EXPORT V8DebuggerId { + public: + V8DebuggerId() = default; + V8DebuggerId(const V8DebuggerId&) = default; + V8DebuggerId& operator=(const V8DebuggerId&) = default; + + std::unique_ptr toString() const; + bool isValid() const; + std::pair pair() const; + + private: + friend class internal::V8DebuggerId; + explicit V8DebuggerId(std::pair); + + int64_t m_first = 0; + int64_t m_second = 0; +}; + class V8_EXPORT V8StackTrace { public: virtual StringView firstNonEmptySourceURL() const = 0; @@ -276,6 +304,7 @@ class V8_EXPORT V8Inspector { virtual void contextDestroyed(v8::Local) = 0; virtual void resetContextGroup(int contextGroupId) = 0; virtual v8::MaybeLocal contextById(int contextId) = 0; + virtual V8DebuggerId uniqueDebuggerId(int contextId) = 0; // Various instrumentation. virtual void idleStarted() = 0; diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index f49b54557c628a..196518a2db31f1 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -29,6 +29,13 @@ class Isolate; typedef uintptr_t Address; static const Address kNullAddress = 0; +constexpr int KB = 1024; +constexpr int MB = KB * 1024; +constexpr int GB = MB * 1024; +#ifdef V8_TARGET_ARCH_X64 +constexpr size_t TB = size_t{GB} * 1024; +#endif + /** * Configuration of tagging scheme. */ @@ -109,6 +116,11 @@ struct SmiTagging<8> { }; #ifdef V8_COMPRESS_POINTERS +// See v8:7703 or src/common/ptr-compr-inl.h for details about pointer +// compression. +constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32; +constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32; + static_assert( kApiSystemPointerSize == kApiInt64Size, "Pointer compression can be enabled only for 64-bit architectures"); @@ -121,36 +133,6 @@ constexpr bool PointerCompressionIsEnabled() { return kApiTaggedSize != kApiSystemPointerSize; } -constexpr bool HeapSandboxIsEnabled() { -#ifdef V8_HEAP_SANDBOX - return true; -#else - return false; -#endif -} - -using ExternalPointer_t = Address; - -// If the heap sandbox is enabled, these tag values will be ORed with the -// external pointers in the external pointer table to prevent use of pointers of -// the wrong type. When a pointer is loaded, it is ANDed with the inverse of the -// expected type's tag. The tags are constructed in a way that guarantees that a -// failed type check will result in one or more of the top bits of the pointer -// to be set, rendering the pointer inacessible. This construction allows -// performing the type check and removing GC marking bits from the pointer at -// the same time. -enum ExternalPointerTag : uint64_t { - kExternalPointerNullTag = 0x0000000000000000, - kExternalStringResourceTag = 0x00ff000000000000, // 0b000000011111111 - kExternalStringResourceDataTag = 0x017f000000000000, // 0b000000101111111 - kForeignForeignAddressTag = 0x01bf000000000000, // 0b000000110111111 - kNativeContextMicrotaskQueueTag = 0x01df000000000000, // 0b000000111011111 - kEmbedderDataSlotPayloadTag = 0x01ef000000000000, // 0b000000111101111 - kCodeEntryPointTag = 0x01f7000000000000, // 0b000000111110111 -}; - -constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000; - #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH using PlatformSmiTagging = SmiTagging; #else @@ -171,6 +153,164 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) { kSmiTag; } +/* + * Sandbox related types, constants, and functions. + */ +constexpr bool SandboxIsEnabled() { +#ifdef V8_SANDBOX + return true; +#else + return false; +#endif +} + +constexpr bool SandboxedExternalPointersAreEnabled() { +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + return true; +#else + return false; +#endif +} + +// SandboxedPointers are guaranteed to point into the sandbox. This is achieved +// for example by storing them as offset rather than as raw pointers. +using SandboxedPointer_t = Address; + +// ExternalPointers point to objects located outside the sandbox. When sandboxed +// external pointers are enabled, these are stored in an external pointer table +// and referenced from HeapObjects through indices. +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +using ExternalPointer_t = uint32_t; +#else +using ExternalPointer_t = Address; +#endif + +#ifdef V8_SANDBOX_IS_AVAILABLE + +// Size of the sandbox, excluding the guard regions surrounding it. +constexpr size_t kSandboxSizeLog2 = 40; // 1 TB +constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2; + +// Required alignment of the sandbox. For simplicity, we require the +// size of the guard regions to be a multiple of this, so that this specifies +// the alignment of the sandbox including and excluding surrounding guard +// regions. The alignment requirement is due to the pointer compression cage +// being located at the start of the sandbox. +constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment; + +// Sandboxed pointers are stored inside the heap as offset from the sandbox +// base shifted to the left. This way, it is guaranteed that the offset is +// smaller than the sandbox size after shifting it to the right again. This +// constant specifies the shift amount. +constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2; + +// Size of the guard regions surrounding the sandbox. This assumes a worst-case +// scenario of a 32-bit unsigned index used to access an array of 64-bit +// values. +constexpr size_t kSandboxGuardRegionSize = 32ULL * GB; + +static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0, + "The size of the guard regions around the sandbox must be a " + "multiple of its required alignment."); + +// Minimum size of the sandbox, excluding the guard regions surrounding it. If +// the virtual memory reservation for the sandbox fails, its size is currently +// halved until either the reservation succeeds or the minimum size is reached. +// A minimum of 32GB allows the 4GB pointer compression region as well as the +// ArrayBuffer partition and two 10GB Wasm memory cages to fit into the +// sandbox. 32GB should also be the minimum possible size of the userspace +// address space as there are some machine configurations with only 36 virtual +// address bits. +constexpr size_t kSandboxMinimumSize = 32ULL * GB; + +static_assert(kSandboxMinimumSize <= kSandboxSize, + "The minimal size of the sandbox must be smaller or equal to the " + "regular size."); + +// On OSes where reserving virtual memory is too expensive to reserve the +// entire address space backing the sandbox, notably Windows pre 8.1, we create +// a partially reserved sandbox that doesn't actually reserve most of the +// memory, and so doesn't have the desired security properties as unrelated +// memory allocations could end up inside of it, but which still ensures that +// objects that should be located inside the sandbox are allocated within +// kSandboxSize bytes from the start of the sandbox. The minimum size of the +// region that is actually reserved for such a sandbox is specified by this +// constant and should be big enough to contain the pointer compression cage as +// well as the ArrayBuffer partition. +constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB; + +static_assert(kSandboxMinimumSize > kPtrComprCageReservationSize, + "The sandbox must be larger than the pointer compression cage " + "contained within it."); +static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize, + "The minimum reservation size for a sandbox must be larger than " + "the pointer compression cage contained within it."); + +// For now, even if the sandbox is enabled, we still allow backing stores to be +// allocated outside of it as fallback. This will simplify the initial rollout. +// However, if sandboxed pointers are also enabled, we must always place +// backing stores inside the sandbox as they will be referenced though them. +#ifdef V8_SANDBOXED_POINTERS +constexpr bool kAllowBackingStoresOutsideSandbox = false; +#else +constexpr bool kAllowBackingStoresOutsideSandbox = true; +#endif // V8_SANDBOXED_POINTERS + +// The size of the virtual memory reservation for an external pointer table. +// This determines the maximum number of entries in a table. Using a maximum +// size allows omitting bounds checks on table accesses if the indices are +// guaranteed (e.g. through shifting) to be below the maximum index. This +// value must be a power of two. +static const size_t kExternalPointerTableReservationSize = 128 * MB; + +// The maximum number of entries in an external pointer table. +static const size_t kMaxSandboxedExternalPointers = + kExternalPointerTableReservationSize / kApiSystemPointerSize; + +// The external pointer table indices stored in HeapObjects as external +// pointers are shifted to the left by this amount to guarantee that they are +// smaller than the maximum table size. +static const uint32_t kExternalPointerIndexShift = 8; +static_assert((1 << (32 - kExternalPointerIndexShift)) == + kMaxSandboxedExternalPointers, + "kExternalPointerTableReservationSize and " + "kExternalPointerIndexShift don't match"); + +#endif // V8_SANDBOX_IS_AVAILABLE + +// If sandboxed external pointers are enabled, these tag values will be ORed +// with the external pointers in the external pointer table to prevent use of +// pointers of the wrong type. When a pointer is loaded, it is ANDed with the +// inverse of the expected type's tag. The tags are constructed in a way that +// guarantees that a failed type check will result in one or more of the top +// bits of the pointer to be set, rendering the pointer inacessible. Besides +// the type tag bits (48 through 62), the tags also have the GC mark bit (63) +// set, so that the mark bit is automatically set when a pointer is written +// into the external pointer table (in which case it is clearly alive) and is +// cleared when the pointer is loaded. The exception to this is the free entry +// tag, which doesn't have the mark bit set, as the entry is not alive. This +// construction allows performing the type check and removing GC marking bits +// (the MSB) from the pointer at the same time. +// Note: this scheme assumes a 48-bit address space and will likely break if +// more virtual address bits are used. +constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000; +constexpr uint64_t kExternalPointerTagShift = 48; +#define MAKE_TAG(v) (static_cast(v) << kExternalPointerTagShift) +// clang-format off +enum ExternalPointerTag : uint64_t { + kExternalPointerNullTag = MAKE_TAG(0b0000000000000000), + kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000), + kExternalStringResourceTag = MAKE_TAG(0b1000000011111111), + kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111), + kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111), + kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111), + kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111), + kCodeEntryPointTag = MAKE_TAG(0b1000000111110111), + kExternalObjectValueTag = MAKE_TAG(0b1000000111111011), +}; +// clang-format on +#undef MAKE_TAG + // Converts encoded external pointer to address. V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate, ExternalPointer_t pointer, @@ -214,7 +354,7 @@ class Internals { static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize; static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize; static const int kEmbedderDataSlotSize = kApiSystemPointerSize; -#ifdef V8_HEAP_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize; #endif static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize; @@ -250,10 +390,10 @@ class Internals { kIsolateLongTaskStatsCounterOffset + kApiSizetSize; static const int kExternalPointerTableBufferOffset = 0; - static const int kExternalPointerTableLengthOffset = - kExternalPointerTableBufferOffset + kApiSystemPointerSize; static const int kExternalPointerTableCapacityOffset = - kExternalPointerTableLengthOffset + kApiInt32Size; + kExternalPointerTableBufferOffset + kApiSystemPointerSize; + static const int kExternalPointerTableFreelistHeadOffset = + kExternalPointerTableCapacityOffset + kApiInt32Size; static const int kUndefinedValueRootIndex = 4; static const int kTheHoleValueRootIndex = 5; @@ -432,9 +572,9 @@ class Internals { #endif } - V8_INLINE static internal::Isolate* GetIsolateForHeapSandbox( + V8_INLINE static internal::Isolate* GetIsolateForSandbox( internal::Address obj) { -#ifdef V8_HEAP_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS return internal::IsolateFromNeverReadOnlySpaceObject(obj); #else // Not used in non-sandbox mode. @@ -445,7 +585,7 @@ class Internals { V8_INLINE static Address DecodeExternalPointer( const Isolate* isolate, ExternalPointer_t encoded_pointer, ExternalPointerTag tag) { -#ifdef V8_HEAP_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag); #else return encoded_pointer; @@ -455,7 +595,7 @@ class Internals { V8_INLINE static internal::Address ReadExternalPointerField( internal::Isolate* isolate, internal::Address heap_object_ptr, int offset, ExternalPointerTag tag) { -#ifdef V8_HEAP_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS internal::ExternalPointer_t encoded_value = ReadRawField(heap_object_ptr, offset); // We currently have to treat zero as nullptr in embedder slots. @@ -467,10 +607,6 @@ class Internals { } #ifdef V8_COMPRESS_POINTERS - // See v8:7703 or src/ptr-compr.* for details about pointer compression. - static constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32; - static constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32; - V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress( internal::Address addr) { return addr & -static_cast(kPtrComprCageBaseAlignment); @@ -486,100 +622,6 @@ class Internals { #endif // V8_COMPRESS_POINTERS }; -constexpr bool VirtualMemoryCageIsEnabled() { -#ifdef V8_VIRTUAL_MEMORY_CAGE - return true; -#else - return false; -#endif -} - -// CagedPointers are guaranteed to point into the virtual memory cage. This is -// achieved for example by storing them as offset from the cage base rather -// than as raw pointers. -using CagedPointer_t = Address; - -#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE - -#define GB (1ULL << 30) -#define TB (1ULL << 40) - -// Size of the virtual memory cage, excluding the guard regions surrounding it. -constexpr size_t kVirtualMemoryCageSizeLog2 = 40; // 1 TB -constexpr size_t kVirtualMemoryCageSize = 1ULL << kVirtualMemoryCageSizeLog2; - -// Required alignment of the virtual memory cage. For simplicity, we require the -// size of the guard regions to be a multiple of this, so that this specifies -// the alignment of the cage including and excluding surrounding guard regions. -// The alignment requirement is due to the pointer compression cage being -// located at the start of the virtual memory cage. -constexpr size_t kVirtualMemoryCageAlignment = - Internals::kPtrComprCageBaseAlignment; - -// Caged pointers are stored inside the heap as offset from the cage base -// shifted to the left. This way, it is guaranteed that the offset is smaller -// than the cage size after shifting it to the right again. This constant -// specifies the shift amount. -constexpr uint64_t kCagedPointerShift = 64 - kVirtualMemoryCageSizeLog2; - -// Size of the guard regions surrounding the virtual memory cage. This assumes a -// worst-case scenario of a 32-bit unsigned index being used to access an array -// of 64-bit values. -constexpr size_t kVirtualMemoryCageGuardRegionSize = 32ULL * GB; - -static_assert((kVirtualMemoryCageGuardRegionSize % - kVirtualMemoryCageAlignment) == 0, - "The size of the virtual memory cage guard region must be a " - "multiple of its required alignment."); - -// Minimum size of the virtual memory cage, excluding the guard regions -// surrounding it. If the cage reservation fails, its size is currently halved -// until either the reservation succeeds or the minimum size is reached. A -// minimum of 32GB allows the 4GB pointer compression region as well as the -// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage. -// 32GB should also be the minimum possible size of the userspace address space -// as there are some machine configurations with only 36 virtual address bits. -constexpr size_t kVirtualMemoryCageMinimumSize = 32ULL * GB; - -static_assert(kVirtualMemoryCageMinimumSize <= kVirtualMemoryCageSize, - "The minimal size of the virtual memory cage must be smaller or " - "equal to the regular size."); - -// On OSes where reservation virtual memory is too expensive to create a real -// cage, notably Windows pre 8.1, we create a fake cage that doesn't actually -// reserve most of the memory, and so doesn't have the desired security -// properties, but still ensures that objects that should be located inside the -// cage are allocated within kVirtualMemoryCageSize bytes from the start of the -// cage, and so appear to be inside the cage. The minimum size of the virtual -// memory range that is actually reserved for a fake cage is specified by this -// constant and should be big enough to contain the pointer compression region -// as well as the ArrayBuffer partition. -constexpr size_t kFakeVirtualMemoryCageMinReservationSize = 8ULL * GB; - -static_assert(kVirtualMemoryCageMinimumSize > - Internals::kPtrComprCageReservationSize, - "The virtual memory cage must be larger than the pointer " - "compression cage contained within it."); -static_assert(kFakeVirtualMemoryCageMinReservationSize > - Internals::kPtrComprCageReservationSize, - "The reservation for a fake virtual memory cage must be larger " - "than the pointer compression cage contained within it."); - -// For now, even if the virtual memory cage is enabled, we still allow backing -// stores to be allocated outside of it as fallback. This will simplify the -// initial rollout. However, if the heap sandbox is also enabled, we already use -// the "enforcing mode" of the virtual memory cage. This is useful for testing. -#ifdef V8_HEAP_SANDBOX -constexpr bool kAllowBackingStoresOutsideCage = false; -#else -constexpr bool kAllowBackingStoresOutsideCage = true; -#endif // V8_HEAP_SANDBOX - -#undef GB -#undef TB - -#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE - // Only perform cast check for types derived from v8::Data since // other types do not implement the Cast method. template @@ -608,6 +650,10 @@ V8_INLINE void PerformCastCheck(T* data) { // how static casts work with std::shared_ptr. class BackingStoreBase {}; +// The maximum value in enum GarbageCollectionReason, defined in heap.h. +// This is needed for histograms sampling garbage collection reasons. +constexpr int kGarbageCollectionReasonMaxValue = 25; + } // namespace internal } // namespace v8 diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index 2fc7daf40ba946..2849d7cae1e9e8 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -10,7 +10,6 @@ #include #include -#include #include "cppgc/common.h" #include "v8-array-buffer.h" // NOLINT(build/include_directory) @@ -225,6 +224,7 @@ class V8_EXPORT Isolate { /** * Explicitly specify a startup snapshot blob. The embedder owns the blob. + * The embedder *must* ensure that the snapshot is from a trusted source. */ StartupData* snapshot_blob = nullptr; @@ -282,6 +282,12 @@ class V8_EXPORT Isolate { int embedder_wrapper_type_index = -1; int embedder_wrapper_object_index = -1; + /** + * Callbacks to invoke in case of fatal or OOM errors. + */ + FatalErrorCallback fatal_error_callback = nullptr; + OOMErrorCallback oom_error_callback = nullptr; + /** * The following parameter is experimental and may change significantly. * This is currently for internal testing. @@ -523,6 +529,8 @@ class V8_EXPORT Isolate { kWasmMultiValue = 110, kWasmExceptionHandling = 111, kInvalidatedMegaDOMProtector = 112, + kFunctionPrototypeArguments = 113, + kFunctionPrototypeCaller = 114, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to @@ -628,7 +636,7 @@ class V8_EXPORT Isolate { * This specifies the callback called by the upcoming dynamic * import() language feature to load modules. */ - V8_DEPRECATE_SOON("Use HostImportModuleDynamicallyCallback") + V8_DEPRECATED("Use HostImportModuleDynamicallyCallback") void SetHostImportModuleDynamicallyCallback( HostImportModuleDynamicallyWithImportAssertionsCallback callback); void SetHostImportModuleDynamicallyCallback( @@ -641,6 +649,13 @@ class V8_EXPORT Isolate { void SetHostInitializeImportMetaObjectCallback( HostInitializeImportMetaObjectCallback callback); + /** + * This specifies the callback called by the upcoming ShadowRealm + * construction language feature to retrieve host created globals. + */ + void SetHostCreateShadowRealmContextCallback( + HostCreateShadowRealmContextCallback callback); + /** * This specifies the callback called when the stack property of Error * is accessed. diff --git a/deps/v8/include/v8-locker.h b/deps/v8/include/v8-locker.h index 88ce4beb6219fb..7ca5bf6e421e41 100644 --- a/deps/v8/include/v8-locker.h +++ b/deps/v8/include/v8-locker.h @@ -127,6 +127,7 @@ class V8_EXPORT Locker { * The current implementation is quite confusing and leads to unexpected * results if anybody uses v8::Locker in the current process. */ + V8_DEPRECATE_SOON("This method will be removed.") static bool WasEverUsed(); V8_DEPRECATED("Use WasEverUsed instead") static bool IsActive(); diff --git a/deps/v8/include/v8-message.h b/deps/v8/include/v8-message.h index d771a49ff9cb3f..a13276412a8152 100644 --- a/deps/v8/include/v8-message.h +++ b/deps/v8/include/v8-message.h @@ -61,31 +61,6 @@ class ScriptOriginOptions { */ class V8_EXPORT ScriptOrigin { public: - #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ - V8_DEPRECATED("Use constructor with primitive C++ types") - #endif - ScriptOrigin( - Local resource_name, Local resource_line_offset, - Local resource_column_offset, - Local resource_is_shared_cross_origin = Local(), - Local script_id = Local(), - Local source_map_url = Local(), - Local resource_is_opaque = Local(), - Local is_wasm = Local(), - Local is_module = Local(), - Local host_defined_options = Local()); - #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ - V8_DEPRECATED("Use constructor that takes an isolate") - #endif - explicit ScriptOrigin(Local resource_name, - int resource_line_offset = 0, - int resource_column_offset = 0, - bool resource_is_shared_cross_origin = false, - int script_id = -1, - Local source_map_url = Local(), - bool resource_is_opaque = false, bool is_wasm = false, - bool is_module = false, - Local host_defined_options = Local()); V8_INLINE ScriptOrigin(Isolate* isolate, Local resource_name, int resource_line_offset = 0, int resource_column_offset = 0, @@ -108,12 +83,6 @@ class V8_EXPORT ScriptOrigin { } V8_INLINE Local ResourceName() const; - V8_DEPRECATED("Use getter with primitive C++ types.") - V8_INLINE Local ResourceLineOffset() const; - V8_DEPRECATED("Use getter with primitive C++ types.") - V8_INLINE Local ResourceColumnOffset() const; - V8_DEPRECATED("Use getter with primitive C++ types.") - V8_INLINE Local ScriptID() const; V8_INLINE int LineOffset() const; V8_INLINE int ColumnOffset() const; V8_INLINE int ScriptId() const; @@ -220,8 +189,6 @@ class V8_EXPORT Message { bool IsSharedCrossOrigin() const; bool IsOpaque() const; - V8_DEPRECATED("Use the version that takes a std::ostream&.") - static void PrintCurrentStackTrace(Isolate* isolate, FILE* out); static void PrintCurrentStackTrace(Isolate* isolate, std::ostream& out); static const int kNoLineNumberInfo = 0; @@ -232,18 +199,6 @@ class V8_EXPORT Message { Local ScriptOrigin::ResourceName() const { return resource_name_; } -Local ScriptOrigin::ResourceLineOffset() const { - return v8::Integer::New(isolate_, resource_line_offset_); -} - -Local ScriptOrigin::ResourceColumnOffset() const { - return v8::Integer::New(isolate_, resource_column_offset_); -} - -Local ScriptOrigin::ScriptID() const { - return v8::Integer::New(isolate_, script_id_); -} - Local ScriptOrigin::GetHostDefinedOptions() const { return host_defined_options_; } diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h index 62738442f7ce8c..01bc538e22c769 100644 --- a/deps/v8/include/v8-metrics.h +++ b/deps/v8/include/v8-metrics.h @@ -21,6 +21,7 @@ class Isolate; namespace metrics { struct GarbageCollectionPhases { + int64_t total_wall_clock_duration_in_us = -1; int64_t compact_wall_clock_duration_in_us = -1; int64_t mark_wall_clock_duration_in_us = -1; int64_t sweep_wall_clock_duration_in_us = -1; @@ -34,6 +35,7 @@ struct GarbageCollectionSizes { }; struct GarbageCollectionFullCycle { + int reason = -1; GarbageCollectionPhases total; GarbageCollectionPhases total_cpp; GarbageCollectionPhases main_thread; @@ -73,6 +75,7 @@ struct GarbageCollectionFullMainThreadBatchedIncrementalSweep { }; struct GarbageCollectionYoungCycle { + int reason = -1; int64_t total_wall_clock_duration_in_us = -1; int64_t main_thread_wall_clock_duration_in_us = -1; double collection_rate_in_percent; @@ -230,6 +233,8 @@ struct V8_EXPORT LongTaskStats { int64_t gc_full_atomic_wall_clock_duration_us = 0; int64_t gc_full_incremental_wall_clock_duration_us = 0; int64_t gc_young_wall_clock_duration_us = 0; + // Only collected with --slow-histograms + int64_t v8_execute_us = 0; }; } // namespace metrics diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h index e047c413ac2f70..11ff03dd204664 100644 --- a/deps/v8/include/v8-object.h +++ b/deps/v8/include/v8-object.h @@ -594,7 +594,7 @@ class V8_EXPORT Object : public Value { /** * Returns the context in which the object was created. */ - V8_DEPRECATE_SOON("Use MaybeLocal GetCreationContext()") + V8_DEPRECATED("Use MaybeLocal GetCreationContext()") Local CreationContext(); MaybeLocal GetCreationContext(); @@ -735,7 +735,7 @@ Local Object::GetInternalField(int index) { } void* Object::GetAlignedPointerFromInternalField(int index) { -#ifndef V8_ENABLE_CHECKS +#if !defined(V8_ENABLE_CHECKS) using A = internal::Address; using I = internal::Internals; A obj = *reinterpret_cast(this); @@ -744,10 +744,10 @@ void* Object::GetAlignedPointerFromInternalField(int index) { auto instance_type = I::GetInstanceType(obj); if (v8::internal::CanHaveInternalField(instance_type)) { int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index); -#ifdef V8_HEAP_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS offset += I::kEmbedderDataSlotRawPayloadOffset; #endif - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj); + internal::Isolate* isolate = I::GetIsolateForSandbox(obj); A value = I::ReadExternalPointerField( isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag); return reinterpret_cast(value); diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 9e226331f830ed..91b3fd9cc3f9a3 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -401,6 +401,8 @@ class PageAllocator { // this is used to set the MAP_JIT flag on Apple Silicon. // TODO(jkummerow): Remove this when Wasm has a platform-independent // w^x implementation. + // TODO(saelo): Remove this once all JIT pages are allocated through the + // VirtualAddressSpace API. kNoAccessWillJitLater }; @@ -510,8 +512,59 @@ class PageAllocator { virtual bool CanAllocateSharedPages() { return false; } }; +// Opaque type representing a handle to a shared memory region. +using PlatformSharedMemoryHandle = intptr_t; +static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1; + +// Conversion routines from the platform-dependent shared memory identifiers +// into the opaque PlatformSharedMemoryHandle type. These use the underlying +// types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t) +// to avoid pulling in large OS header files into this header file. Instead, +// the users of these routines are expected to include the respecitve OS +// headers in addition to this one. +#if V8_OS_MACOS +// Convert between a shared memory handle and a mach_port_t referencing a memory +// entry object. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry( + unsigned int port) { + return static_cast(port); +} +inline unsigned int MachMemoryEntryFromSharedMemoryHandle( + PlatformSharedMemoryHandle handle) { + return static_cast(handle); +} +#elif V8_OS_FUCHSIA +// Convert between a shared memory handle and a zx_handle_t to a VMO. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) { + return static_cast(handle); +} +inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) { + return static_cast(handle); +} +#elif V8_OS_WIN +// Convert between a shared memory handle and a Windows HANDLE to a file mapping +// object. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping( + void* handle) { + return reinterpret_cast(handle); +} +inline void* FileMappingFromSharedMemoryHandle( + PlatformSharedMemoryHandle handle) { + return reinterpret_cast(handle); +} +#else +// Convert between a shared memory handle and a file descriptor. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) { + return static_cast(fd); +} +inline int FileDescriptorFromSharedMemoryHandle( + PlatformSharedMemoryHandle handle) { + return static_cast(handle); +} +#endif + /** - * Page permissions. + * Possible permissions for memory pages. */ enum class PagePermissions { kNoAccess, @@ -528,17 +581,21 @@ enum class PagePermissions { * sub-spaces and (private or shared) memory pages can be allocated, freed, and * modified. This interface is meant to eventually replace the PageAllocator * interface, and can be used as an alternative in the meantime. + * + * This API is not yet stable and may change without notice! */ class VirtualAddressSpace { public: using Address = uintptr_t; VirtualAddressSpace(size_t page_size, size_t allocation_granularity, - Address base, size_t size) + Address base, size_t size, + PagePermissions max_page_permissions) : page_size_(page_size), allocation_granularity_(allocation_granularity), base_(base), - size_(size) {} + size_(size), + max_page_permissions_(max_page_permissions) {} virtual ~VirtualAddressSpace() = default; @@ -575,6 +632,14 @@ class VirtualAddressSpace { */ size_t size() const { return size_; } + /** + * The maximum page permissions that pages allocated inside this space can + * obtain. + * + * \returns the maximum page permissions. + */ + PagePermissions max_page_permissions() const { return max_page_permissions_; } + /** * Sets the random seed so that GetRandomPageAddress() will generate * repeatable sequences of random addresses. @@ -598,6 +663,7 @@ class VirtualAddressSpace { * given address first. If that fails, the allocation is attempted to be * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying * zero for the hint always causes this function to choose a random address. + * The hint, if specified, must be aligned to the specified alignment. * * \param size The size of the allocation in bytes. Must be a multiple of the * allocation_granularity(). @@ -618,16 +684,16 @@ class VirtualAddressSpace { /** * Frees previously allocated pages. * + * This function will terminate the process on failure as this implies a bug + * in the client. As such, there is no return value. + * * \param address The start address of the pages to free. This address must - * have been obtains from a call to AllocatePages. + * have been obtained through a call to AllocatePages. * * \param size The size in bytes of the region to free. This must match the * size passed to AllocatePages when the pages were allocated. - * - * \returns true on success, false otherwise. */ - virtual V8_WARN_UNUSED_RESULT bool FreePages(Address address, - size_t size) = 0; + virtual void FreePages(Address address, size_t size) = 0; /** * Sets permissions of all allocated pages in the given range. @@ -645,6 +711,77 @@ class VirtualAddressSpace { virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions( Address address, size_t size, PagePermissions permissions) = 0; + /** + * Creates a guard region at the specified address. + * + * Guard regions are guaranteed to cause a fault when accessed and generally + * do not count towards any memory consumption limits. Further, allocating + * guard regions can usually not fail in subspaces if the region does not + * overlap with another region, subspace, or page allocation. + * + * \param address The start address of the guard region. Must be aligned to + * the allocation_granularity(). + * + * \param size The size of the guard region in bytes. Must be a multiple of + * the allocation_granularity(). + * + * \returns true on success, false otherwise. + */ + virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address, + size_t size) = 0; + + /** + * Frees an existing guard region. + * + * This function will terminate the process on failure as this implies a bug + * in the client. As such, there is no return value. + * + * \param address The start address of the guard region to free. This address + * must have previously been used as address parameter in a successful + * invocation of AllocateGuardRegion. + * + * \param size The size in bytes of the guard region to free. This must match + * the size passed to AllocateGuardRegion when the region was created. + */ + virtual void FreeGuardRegion(Address address, size_t size) = 0; + + /** + * Allocates shared memory pages with the given permissions. + * + * \param hint Placement hint. See AllocatePages. + * + * \param size The size of the allocation in bytes. Must be a multiple of the + * allocation_granularity(). + * + * \param permissions The page permissions of the newly allocated pages. + * + * \param handle A platform-specific handle to a shared memory object. See + * the SharedMemoryHandleFromX routines above for ways to obtain these. + * + * \param offset The offset in the shared memory object at which the mapping + * should start. Must be a multiple of the allocation_granularity(). + * + * \returns the start address of the allocated pages on success, zero on + * failure. + */ + virtual V8_WARN_UNUSED_RESULT Address + AllocateSharedPages(Address hint, size_t size, PagePermissions permissions, + PlatformSharedMemoryHandle handle, uint64_t offset) = 0; + + /** + * Frees previously allocated shared pages. + * + * This function will terminate the process on failure as this implies a bug + * in the client. As such, there is no return value. + * + * \param address The start address of the pages to free. This address must + * have been obtained through a call to AllocateSharedPages. + * + * \param size The size in bytes of the region to free. This must match the + * size passed to AllocateSharedPages when the pages were allocated. + */ + virtual void FreeSharedPages(Address address, size_t size) = 0; + /** * Whether this instance can allocate subspaces or not. * @@ -668,14 +805,14 @@ class VirtualAddressSpace { * \param alignment The alignment of the subspace in bytes. Must be a multiple * of the allocation_granularity() and should be a power of two. * - * \param max_permissions The maximum permissions that pages allocated in the - * subspace can obtain. + * \param max_page_permissions The maximum permissions that pages allocated in + * the subspace can obtain. * * \returns a new subspace or nullptr on failure. */ virtual std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) = 0; + PagePermissions max_page_permissions) = 0; // // TODO(v8) maybe refactor the methods below before stabilizing the API. For @@ -715,6 +852,7 @@ class VirtualAddressSpace { const size_t allocation_granularity_; const Address base_; const size_t size_; + const PagePermissions max_page_permissions_; }; /** diff --git a/deps/v8/include/v8-primitive.h b/deps/v8/include/v8-primitive.h index 11c01876c723cb..1b6de16686b10d 100644 --- a/deps/v8/include/v8-primitive.h +++ b/deps/v8/include/v8-primitive.h @@ -585,8 +585,6 @@ class V8_EXPORT Symbol : public Name { /** * Returns the description string of the symbol, or undefined if none. */ - V8_DEPRECATED("Use Symbol::Description(isolate)") - Local Description() const; Local Description(Isolate* isolate) const; /** @@ -787,7 +785,7 @@ String::ExternalStringResource* String::GetExternalStringResource() const { ExternalStringResource* result; if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) { - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj); + internal::Isolate* isolate = I::GetIsolateForSandbox(obj); A value = I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset, internal::kExternalStringResourceTag); @@ -811,7 +809,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase( ExternalStringResourceBase* resource; if (type == I::kExternalOneByteRepresentationTag || type == I::kExternalTwoByteRepresentationTag) { - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj); + internal::Isolate* isolate = I::GetIsolateForSandbox(obj); A value = I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset, internal::kExternalStringResourceTag); diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h index 770c796396149a..88252ac1897318 100644 --- a/deps/v8/include/v8-script.h +++ b/deps/v8/include/v8-script.h @@ -47,7 +47,7 @@ class V8_EXPORT ScriptOrModule { * The options that were passed by the embedder as HostDefinedOptions to * the ScriptOrigin. */ - V8_DEPRECATE_SOON("Use HostDefinedOptions") + V8_DEPRECATED("Use HostDefinedOptions") Local GetHostDefinedOptions(); Local HostDefinedOptions(); }; @@ -172,29 +172,6 @@ class V8_EXPORT Module : public Data { */ Local GetException() const; - /** - * Returns the number of modules requested by this module. - */ - V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().") - int GetModuleRequestsLength() const; - - /** - * Returns the ith module specifier in this module. - * i must be < GetModuleRequestsLength() and >= 0. - */ - V8_DEPRECATED( - "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().") - Local GetModuleRequest(int i) const; - - /** - * Returns the source location (line number and column number) of the ith - * module specifier's first occurrence in this module. - */ - V8_DEPRECATED( - "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and " - "Module::SourceOffsetToLocation().") - Location GetModuleRequestLocation(int i) const; - /** * Returns the ModuleRequests for this module. */ @@ -211,9 +188,6 @@ class V8_EXPORT Module : public Data { */ int GetIdentityHash() const; - using ResolveCallback = - MaybeLocal (*)(Local context, Local specifier, - Local referrer); using ResolveModuleCallback = MaybeLocal (*)( Local context, Local specifier, Local import_assertions, Local referrer); @@ -225,11 +199,6 @@ class V8_EXPORT Module : public Data { * instantiation. (In the case where the callback throws an exception, that * exception is propagated.) */ - V8_DEPRECATED( - "Use the version of InstantiateModule that takes a ResolveModuleCallback " - "parameter") - V8_WARN_UNUSED_RESULT Maybe InstantiateModule(Local context, - ResolveCallback callback); V8_WARN_UNUSED_RESULT Maybe InstantiateModule( Local context, ResolveModuleCallback callback); @@ -407,6 +376,7 @@ class V8_EXPORT ScriptCompiler { class Source { public: // Source takes ownership of both CachedData and CodeCacheConsumeTask. + // The caller *must* ensure that the cached data is from a trusted source. V8_INLINE Source(Local source_string, const ScriptOrigin& origin, CachedData* cached_data = nullptr, ConsumeCodeCacheTask* consume_cache_task = nullptr); @@ -473,18 +443,6 @@ class V8_EXPORT ScriptCompiler { * V8 has parsed the data it received so far. */ virtual size_t GetMoreData(const uint8_t** src) = 0; - - /** - * [DEPRECATED]: No longer used, will be removed soon. - */ - V8_DEPRECATED("Not used") - virtual bool SetBookmark() { return false; } - - /** - * [DEPRECATED]: No longer used, will be removed soon. - */ - V8_DEPRECATED("Not used") - virtual void ResetToBookmark() {} }; /** @@ -692,6 +650,7 @@ class V8_EXPORT ScriptCompiler { * It is possible to specify multiple context extensions (obj in the above * example). */ + V8_DEPRECATED("Use CompileFunction") static V8_WARN_UNUSED_RESULT MaybeLocal CompileFunctionInContext( Local context, Source* source, size_t arguments_count, Local arguments[], size_t context_extension_count, diff --git a/deps/v8/include/v8-snapshot.h b/deps/v8/include/v8-snapshot.h index ed02598c36bc70..2400357cf6e069 100644 --- a/deps/v8/include/v8-snapshot.h +++ b/deps/v8/include/v8-snapshot.h @@ -5,8 +5,6 @@ #ifndef INCLUDE_V8_SNAPSHOT_H_ #define INCLUDE_V8_SNAPSHOT_H_ -#include - #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h index 7db34a970c8d35..2dcd1d1fb925d7 100644 --- a/deps/v8/include/v8-traced-handle.h +++ b/deps/v8/include/v8-traced-handle.h @@ -11,10 +11,8 @@ #include #include -#include #include #include -#include #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) @@ -199,6 +197,21 @@ class BasicTracedReference : public TracedReferenceBase { /** * A traced handle with destructor that clears the handle. For more details see * BasicTracedReference. + * + * This type is being deprecated and embedders are encouraged to use + * `v8::TracedReference` in combination with `v8::CppHeap`. If this is not + * possible, the following provides feature parity: + * + * \code + * template + * struct TracedGlobalPolyfill { + * v8::TracedReference traced_reference; + * v8::Global weak_reference_for_callback; + * }; + * \endcode + * + * In this example, `weak_reference_for_callback` can be used to emulate + * `SetFinalizationCallback()`. */ template class TracedGlobal : public BasicTracedReference { @@ -213,6 +226,7 @@ class TracedGlobal : public BasicTracedReference { /** * An empty TracedGlobal without storage cell. */ + V8_DEPRECATED("See class comment.") TracedGlobal() : BasicTracedReference() {} /** @@ -222,6 +236,7 @@ class TracedGlobal : public BasicTracedReference { * pointing to the same object. */ template + V8_DEPRECATED("See class comment.") TracedGlobal(Isolate* isolate, Local that) : BasicTracedReference() { this->val_ = this->New(isolate, that.val_, &this->val_, @@ -492,18 +507,20 @@ V8_INLINE bool operator!=(const v8::Local& lhs, template template -void TracedGlobal::Reset(Isolate* isolate, const Local& other) { +void TracedReference::Reset(Isolate* isolate, const Local& other) { static_assert(std::is_base_of::value, "type check"); - Reset(); + this->Reset(); if (other.IsEmpty()) return; - this->val_ = this->New(isolate, other.val_, &this->val_, - internal::GlobalHandleDestructionMode::kWithDestructor, - internal::GlobalHandleStoreMode::kAssigningStore); + this->SetSlotThreadSafe( + this->New(isolate, other.val_, &this->val_, + internal::GlobalHandleDestructionMode::kWithoutDestructor, + internal::GlobalHandleStoreMode::kAssigningStore)); } template template -TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { +TracedReference& TracedReference::operator=( + TracedReference&& rhs) noexcept { static_assert(std::is_base_of::value, "type check"); *this = std::move(rhs.template As()); return *this; @@ -511,14 +528,16 @@ TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { template template -TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { +TracedReference& TracedReference::operator=( + const TracedReference& rhs) { static_assert(std::is_base_of::value, "type check"); *this = rhs.template As(); return *this; } template -TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { +TracedReference& TracedReference::operator=( + TracedReference&& rhs) noexcept { if (this != &rhs) { internal::MoveTracedGlobalReference( reinterpret_cast(&rhs.val_), @@ -528,7 +547,7 @@ TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { } template -TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { +TracedReference& TracedReference::operator=(const TracedReference& rhs) { if (this != &rhs) { this->Reset(); if (rhs.val_ != nullptr) { @@ -540,22 +559,36 @@ TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { return *this; } +void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) { + using I = internal::Internals; + if (IsEmpty()) return; + internal::Address* obj = reinterpret_cast(val_); + uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; + *reinterpret_cast(addr) = class_id; +} + +uint16_t TracedReferenceBase::WrapperClassId() const { + using I = internal::Internals; + if (IsEmpty()) return 0; + internal::Address* obj = reinterpret_cast(val_); + uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; + return *reinterpret_cast(addr); +} + template template -void TracedReference::Reset(Isolate* isolate, const Local& other) { +void TracedGlobal::Reset(Isolate* isolate, const Local& other) { static_assert(std::is_base_of::value, "type check"); - this->Reset(); + Reset(); if (other.IsEmpty()) return; - this->SetSlotThreadSafe( - this->New(isolate, other.val_, &this->val_, - internal::GlobalHandleDestructionMode::kWithoutDestructor, - internal::GlobalHandleStoreMode::kAssigningStore)); + this->val_ = this->New(isolate, other.val_, &this->val_, + internal::GlobalHandleDestructionMode::kWithDestructor, + internal::GlobalHandleStoreMode::kAssigningStore); } template template -TracedReference& TracedReference::operator=( - TracedReference&& rhs) noexcept { +TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { static_assert(std::is_base_of::value, "type check"); *this = std::move(rhs.template As()); return *this; @@ -563,16 +596,14 @@ TracedReference& TracedReference::operator=( template template -TracedReference& TracedReference::operator=( - const TracedReference& rhs) { +TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { static_assert(std::is_base_of::value, "type check"); *this = rhs.template As(); return *this; } template -TracedReference& TracedReference::operator=( - TracedReference&& rhs) noexcept { +TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { if (this != &rhs) { internal::MoveTracedGlobalReference( reinterpret_cast(&rhs.val_), @@ -582,7 +613,7 @@ TracedReference& TracedReference::operator=( } template -TracedReference& TracedReference::operator=(const TracedReference& rhs) { +TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { if (this != &rhs) { this->Reset(); if (rhs.val_ != nullptr) { @@ -594,22 +625,6 @@ TracedReference& TracedReference::operator=(const TracedReference& rhs) { return *this; } -void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) { - using I = internal::Internals; - if (IsEmpty()) return; - internal::Address* obj = reinterpret_cast(val_); - uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; - *reinterpret_cast(addr) = class_id; -} - -uint16_t TracedReferenceBase::WrapperClassId() const { - using I = internal::Internals; - if (IsEmpty()) return 0; - internal::Address* obj = reinterpret_cast(val_); - uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; - return *reinterpret_cast(addr); -} - template void TracedGlobal::SetFinalizationCallback( void* parameter, typename WeakCallbackInfo::Callback callback) { diff --git a/deps/v8/include/v8-value-serializer-version.h b/deps/v8/include/v8-value-serializer-version.h index c72911c64dc571..25eb19ca757d89 100644 --- a/deps/v8/include/v8-value-serializer-version.h +++ b/deps/v8/include/v8-value-serializer-version.h @@ -17,7 +17,7 @@ namespace v8 { -constexpr uint32_t CurrentValueSerializerFormatVersion() { return 13; } +constexpr uint32_t CurrentValueSerializerFormatVersion() { return 15; } } // namespace v8 diff --git a/deps/v8/include/v8-value-serializer.h b/deps/v8/include/v8-value-serializer.h index 574567bd5a759c..078f367c646d06 100644 --- a/deps/v8/include/v8-value-serializer.h +++ b/deps/v8/include/v8-value-serializer.h @@ -67,6 +67,23 @@ class V8_EXPORT ValueSerializer { virtual Maybe GetWasmModuleTransferId( Isolate* isolate, Local module); + + /** + * Returns whether shared values are supported. GetSharedValueId is only + * called if SupportsSharedValues() returns true. + */ + virtual bool SupportsSharedValues() const; + + /** + * Called when the ValueSerializer serializes a value that is shared across + * Isolates. The embedder must return an ID for the object. This function + * must be idempotent for the same object. When deserializing, the ID will + * be passed to ValueDeserializer::Delegate::GetSharedValueFromId as + * |shared_value_id|. + */ + virtual Maybe GetSharedValueId(Isolate* isolate, + Local shared_value); + /** * Allocates memory for the buffer of at least the size provided. The actual * size (which may be greater or equal) is written to |actual_size|. If no @@ -166,17 +183,30 @@ class V8_EXPORT ValueDeserializer { /** * Get a WasmModuleObject given a transfer_id previously provided - * by ValueSerializer::GetWasmModuleTransferId + * by ValueSerializer::Delegate::GetWasmModuleTransferId */ virtual MaybeLocal GetWasmModuleFromId( Isolate* isolate, uint32_t transfer_id); /** * Get a SharedArrayBuffer given a clone_id previously provided - * by ValueSerializer::GetSharedArrayBufferId + * by ValueSerializer::Delegate::GetSharedArrayBufferId */ virtual MaybeLocal GetSharedArrayBufferFromId( Isolate* isolate, uint32_t clone_id); + + /** + * Returns whether shared values are supported. GetSharedValueFromId is only + * called if SupportsSharedValues() returns true. + */ + virtual bool SupportsSharedValues() const; + + /** + * Get a value shared across Isolates given a shared_value_id provided by + * ValueSerializer::Delegate::GetSharedValueId. + */ + virtual MaybeLocal GetSharedValueFromId(Isolate* isolate, + uint32_t shared_value_id); }; ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size); diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 1b2795a877f826..4e3fa99ea67917 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -8,10 +8,10 @@ // These macros define the version number for the current version. // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. -#define V8_MAJOR_VERSION 9 -#define V8_MINOR_VERSION 8 -#define V8_BUILD_NUMBER 177 -#define V8_PATCH_LEVEL 9 +#define V8_MAJOR_VERSION 10 +#define V8_MINOR_VERSION 1 +#define V8_BUILD_NUMBER 124 +#define V8_PATCH_LEVEL 6 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8-weak-callback-info.h b/deps/v8/include/v8-weak-callback-info.h index ff3c08238e3ae3..f1677e9da065ec 100644 --- a/deps/v8/include/v8-weak-callback-info.h +++ b/deps/v8/include/v8-weak-callback-info.h @@ -51,12 +51,26 @@ class WeakCallbackInfo { void* embedder_fields_[kEmbedderFieldsInWeakCallback]; }; -// kParameter will pass a void* parameter back to the callback, kInternalFields -// will pass the first two internal fields back to the callback, kFinalizer -// will pass a void* parameter back, but is invoked before the object is -// actually collected, so it can be resurrected. In the last case, it is not -// possible to request a second pass callback. -enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer }; +/** + * Weakness type for weak handles. + */ +enum class WeakCallbackType { + /** + * Passes a user-defined void* parameter back to the callback. + */ + kParameter, + /** + * Passes the first two internal fields of the object back to the callback. + */ + kInternalFields, + /** + * Passes a user-defined void* parameter back to the callback. Will do so + * before the object is actually reclaimed, allowing it to be resurrected. In + * this case it is not possible to set a second-pass callback. + */ + kFinalizer V8_ENUM_DEPRECATE_SOON("Resurrecting finalizers are deprecated " + "and will not be supported going forward.") +}; template void* WeakCallbackInfo::GetInternalField(int index) const { diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index dd91f880b72063..1067d3eb243efb 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -19,7 +19,6 @@ #include #include -#include #include "cppgc/common.h" #include "v8-array-buffer.h" // NOLINT(build/include_directory) diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 1242d4289ceb93..77fd65c6c5b7d8 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -65,13 +65,14 @@ path. Add it with -I to the command line // Operating system detection (host) // // V8_OS_ANDROID - Android -// V8_OS_BSD - BSDish (Mac OS X, Net/Free/Open/DragonFlyBSD) +// V8_OS_BSD - BSDish (macOS, Net/Free/Open/DragonFlyBSD) // V8_OS_CYGWIN - Cygwin // V8_OS_DRAGONFLYBSD - DragonFlyBSD // V8_OS_FREEBSD - FreeBSD // V8_OS_FUCHSIA - Fuchsia -// V8_OS_LINUX - Linux -// V8_OS_MACOSX - Mac OS X +// V8_OS_LINUX - Linux (Android, ChromeOS, Linux, ...) +// V8_OS_DARWIN - Darwin (macOS, iOS) +// V8_OS_MACOS - macOS // V8_OS_IOS - iOS // V8_OS_NETBSD - NetBSD // V8_OS_OPENBSD - OpenBSD @@ -89,13 +90,14 @@ path. Add it with -I to the command line # define V8_OS_STRING "android" #elif defined(__APPLE__) -# define V8_OS_BSD 1 -# define V8_OS_MACOSX 1 # define V8_OS_POSIX 1 +# define V8_OS_BSD 1 +# define V8_OS_DARWIN 1 # if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE # define V8_OS_IOS 1 # define V8_OS_STRING "ios" # else +# define V8_OS_MACOS 1 # define V8_OS_STRING "macos" # endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE @@ -169,7 +171,7 @@ path. Add it with -I to the command line // V8_TARGET_OS_FUCHSIA // V8_TARGET_OS_IOS // V8_TARGET_OS_LINUX -// V8_TARGET_OS_MACOSX +// V8_TARGET_OS_MACOS // V8_TARGET_OS_WIN // // If not set explicitly, these fall back to corresponding V8_OS_ values. @@ -181,7 +183,7 @@ path. Add it with -I to the command line && !defined(V8_TARGET_OS_FUCHSIA) \ && !defined(V8_TARGET_OS_IOS) \ && !defined(V8_TARGET_OS_LINUX) \ - && !defined(V8_TARGET_OS_MACOSX) \ + && !defined(V8_TARGET_OS_MACOS) \ && !defined(V8_TARGET_OS_WIN) # error No known target OS defined. # endif @@ -192,7 +194,7 @@ path. Add it with -I to the command line || defined(V8_TARGET_OS_FUCHSIA) \ || defined(V8_TARGET_OS_IOS) \ || defined(V8_TARGET_OS_LINUX) \ - || defined(V8_TARGET_OS_MACOSX) \ + || defined(V8_TARGET_OS_MACOS) \ || defined(V8_TARGET_OS_WIN) # error A target OS is defined but V8_HAVE_TARGET_OS is unset. # endif @@ -214,8 +216,8 @@ path. Add it with -I to the command line # define V8_TARGET_OS_LINUX #endif -#ifdef V8_OS_MACOSX -# define V8_TARGET_OS_MACOSX +#ifdef V8_OS_MACOS +# define V8_TARGET_OS_MACOS #endif #ifdef V8_OS_WIN @@ -232,7 +234,7 @@ path. Add it with -I to the command line # define V8_TARGET_OS_STRING "ios" #elif defined(V8_TARGET_OS_LINUX) # define V8_TARGET_OS_STRING "linux" -#elif defined(V8_TARGET_OS_MACOSX) +#elif defined(V8_TARGET_OS_MACOS) # define V8_TARGET_OS_STRING "macos" #elif defined(V8_TARGET_OS_WINDOWS) # define V8_TARGET_OS_STRING "windows" @@ -578,17 +580,15 @@ V8 shared library set USING_V8_SHARED. #endif // V8_OS_WIN -// The virtual memory cage is available (i.e. defined) when pointer compression -// is enabled, but it is only used when V8_VIRTUAL_MEMORY_CAGE is enabled as -// well. This allows better test coverage of the cage. +// The sandbox is available (i.e. defined) when pointer compression +// is enabled, but it is only used when V8_SANDBOX is enabled as +// well. This allows better test coverage of the sandbox. #if defined(V8_COMPRESS_POINTERS) -#define V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE +#define V8_SANDBOX_IS_AVAILABLE #endif -// CagedPointers are currently only used if the heap sandbox is enabled. -// In the future, they will be enabled when the virtual memory cage is enabled. -#if defined(V8_HEAP_SANDBOX) -#define V8_CAGED_POINTERS +#if defined(V8_SANDBOX) && !defined(V8_SANDBOX_IS_AVAILABLE) +#error Inconsistent configuration: sandbox is enabled but not available #endif // From C++17 onwards, static constexpr member variables are defined to be diff --git a/deps/v8/infra/mb/PRESUBMIT.py b/deps/v8/infra/mb/PRESUBMIT.py index 39d15e80b43ff3..85e257c2d4cd1c 100644 --- a/deps/v8/infra/mb/PRESUBMIT.py +++ b/deps/v8/infra/mb/PRESUBMIT.py @@ -7,6 +7,10 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. +# This line is 'magic' in that git-cl looks for it to decide whether to +# use Python3 instead of Python2 when running the code in this file. +USE_PYTHON3 = True + def _CommonChecks(input_api, output_api): results = [] diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl index 6aa0de2358abf0..a5031f1a203a43 100644 --- a/deps/v8/infra/mb/gn_isolate_map.pyl +++ b/deps/v8/infra/mb/gn_isolate_map.pyl @@ -60,7 +60,7 @@ "type": "script", }, "run-gcmole": { - "label": "//tools/gcmole:v8_run_gcmole", + "label": "//tools/gcmole:v8_gcmole_files", "type": "script", }, "run-num-fuzzer": { diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 82964dd7d46598..e20cd6e7bd7183 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -116,6 +116,7 @@ 'V8 Linux64 - pointer compression - builder': 'release_x64_pointer_compression', 'V8 Linux64 - pointer compression without dchecks': 'release_x64_pointer_compression_without_dchecks', + 'V8 Linux64 - python3 - builder': 'release_x64', 'V8 Linux64 - arm64 - sim - pointer compression - builder': 'release_simulate_arm64_pointer_compression', 'V8 Linux64 gcc - debug builder': 'debug_x64_gcc', @@ -169,7 +170,6 @@ }, 'client.v8.perf' : { 'V8 Arm - builder - perf': 'official_arm', - 'V8 Arm64 - builder - perf': 'official_arm64', 'V8 Android Arm - builder - perf': 'official_android_arm', 'V8 Android Arm64 - builder - perf': 'official_android_arm64', 'V8 Linux - builder - perf': 'official_x86', @@ -238,6 +238,7 @@ 'v8_linux64_nodcheck_rel_ng': 'release_x64', 'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto', 'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression', + 'v8_linux64_python3_rel_ng': 'release_x64', 'v8_linux64_rel_ng': 'release_x64_test_features_trybot', 'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap', 'v8_linux64_single_generation_dbg_ng': 'debug_x64_single_generation', @@ -470,8 +471,6 @@ # Official configs for arm 'official_arm': [ 'release_bot', 'arm', 'hard_float', 'official', 'disable_pgo'], - 'official_arm64': [ - 'release_bot', 'arm64', 'hard_float', 'official', 'disable_pgo'], 'official_android_arm': [ 'release_bot', 'arm', 'android', 'minimal_symbols', 'android_strip_outputs', 'official', 'disable_pgo'], @@ -525,7 +524,7 @@ 'release_x64_minimal_symbols': [ 'release_bot', 'x64', 'minimal_symbols'], 'release_x64_pointer_compression': [ - 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', + 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks', 'v8_disable_pointer_compression'], 'release_x64_pointer_compression_without_dchecks': [ 'release_bot', 'x64', 'v8_disable_pointer_compression'], @@ -574,13 +573,14 @@ 'debug_x64_fuchsia': [ 'debug_bot', 'x64', 'fuchsia'], 'debug_x64_gcc': [ - 'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'v8_check_header_includes'], + 'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'no_custom_libcxx', + 'v8_check_header_includes'], 'debug_x64_header_includes': [ 'debug_bot', 'x64', 'v8_check_header_includes'], 'debug_x64_heap_sandbox': [ - 'debug_bot', 'x64', 'v8_enable_heap_sandbox'], + 'debug_bot', 'x64', 'v8_enable_sandbox_future'], 'debug_x64_heap_sandbox_arm64_sim': [ - 'debug_bot', 'simulate_arm64', 'v8_enable_heap_sandbox'], + 'debug_bot', 'simulate_arm64', 'v8_enable_sandbox_future'], 'debug_x64_minimal_symbols': [ 'debug_bot', 'x64', 'minimal_symbols'], 'debug_x64_non_default_cppgc': [ @@ -619,10 +619,11 @@ 'release_bot', 'x86', 'asan', 'lsan', 'symbolized', 'v8_verify_heap'], 'release_x86_gcc': [ - 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'v8_check_header_includes'], - 'release_x86_gcc_minimal_symbols': [ - 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'minimal_symbols', + 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'no_custom_libcxx', 'v8_check_header_includes'], + 'release_x86_gcc_minimal_symbols': [ + 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'no_custom_libcxx', + 'minimal_symbols', 'v8_check_header_includes'], 'release_x86_gcmole': [ 'release_bot', 'x86', 'gcmole'], 'release_x86_gcmole_trybot': [ @@ -720,7 +721,7 @@ 'disable_concurrent_marking': { # Disable concurrent marking and atomic object field writes in order to - # increase the TSAN coverage for background tasks. We need to keep the + # increase the TSAN coverage for background tasks. We need to keep the # atomic marking state enabled because that is needed for the concurrent # write-barrier used by background compilation. 'gn_args': 'v8_enable_concurrent_marking=false ' @@ -922,8 +923,8 @@ 'gn_args': 'v8_enable_runtime_call_stats=false', }, - 'v8_enable_heap_sandbox': { - 'gn_args': 'v8_enable_heap_sandbox=true', + 'v8_enable_sandbox_future': { + 'gn_args': 'v8_enable_sandbox_future=true', }, 'v8_enable_lite_mode': { @@ -934,6 +935,10 @@ 'gn_args': 'v8_enable_slow_dchecks=true', }, + 'v8_enable_javascript_promise_hooks': { + 'gn_args': 'v8_enable_javascript_promise_hooks=true', + }, + 'v8_enable_google_benchmark': { 'gn_args': 'v8_enable_google_benchmark=true', }, @@ -950,7 +955,7 @@ 'gn_args': 'v8_enable_pointer_compression=false', }, 'v8_enable_single_generation': { - 'gn_args': 'v8_enable_single_generation=true ' + 'gn_args': 'v8_enable_single_generation=true ' 'v8_disable_write_barriers=true', }, 'v8_enable_test_features': { diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py index 46ae05163d51d1..2e93155d8132fc 100644 --- a/deps/v8/infra/testing/PRESUBMIT.py +++ b/deps/v8/infra/testing/PRESUBMIT.py @@ -11,10 +11,9 @@ import ast import os -try: - basestring # Python 2 -except NameError: # Python 3 - basestring = str +# This line is 'magic' in that git-cl looks for it to decide whether to +# use Python3 instead of Python2 when running the code in this file. +USE_PYTHON3 = True SUPPORTED_BUILDER_SPEC_KEYS = [ 'swarming_dimensions', @@ -58,14 +57,14 @@ def check_keys(error_msg, src_dict, supported_keys): errors = [] for key in src_dict.keys(): if key not in supported_keys: - errors += error_msg('Key "%s" must be one of %s' % (key, supported_keys)) + errors += error_msg(f'Key "{key}" must be one of {supported_keys}') return errors def _check_properties(error_msg, src_dict, prop_name, supported_keys): properties = src_dict.get(prop_name, {}) if not isinstance(properties, dict): - return error_msg('Value for %s must be a dict' % prop_name) + return error_msg(f'Value for {prop_name} must be a dict') return check_keys(error_msg, properties, supported_keys) @@ -77,11 +76,11 @@ def _check_int_range(error_msg, src_dict, prop_name, lower_bound=None, try: value = int(src_dict[prop_name]) except ValueError: - return error_msg('If specified, %s must be an int' % prop_name) + return error_msg(f'If specified, {prop_name} must be an int') if lower_bound is not None and value < lower_bound: - return error_msg('If specified, %s must be >=%d' % (prop_name, lower_bound)) + return error_msg(f'If specified, {prop_name} must be >={lower_bound}') if upper_bound is not None and value > upper_bound: - return error_msg('If specified, %s must be <=%d' % (prop_name, upper_bound)) + return error_msg(f'If specified, {prop_name} must be <={upper_bound}') return [] @@ -120,7 +119,7 @@ def _check_test(error_msg, test): test_args = test.get('test_args', []) if not isinstance(test_args, list): errors += error_msg('If specified, test_args must be a list of arguments') - if not all(isinstance(x, basestring) for x in test_args): + if not all(isinstance(x, str) for x in test_args): errors += error_msg('If specified, all test_args must be strings') # Limit shards to 14 to avoid erroneous resource exhaustion. @@ -128,7 +127,7 @@ def _check_test(error_msg, test): error_msg, test, 'shards', lower_bound=1, upper_bound=14) variant = test.get('variant', 'default') - if not variant or not isinstance(variant, basestring): + if not variant or not isinstance(variant, str): errors += error_msg('If specified, variant must be a non-empty string') return errors @@ -136,23 +135,23 @@ def _check_test(error_msg, test): def _check_test_spec(file_path, raw_pyl): def error_msg(msg): - return ['Error in %s:\n%s' % (file_path, msg)] + return [f'Error in {file_path}:\n{msg}'] try: # Eval python literal file. full_test_spec = ast.literal_eval(raw_pyl) except SyntaxError as e: - return error_msg('Pyl parsing failed with:\n%s' % e) + return error_msg(f'Pyl parsing failed with:\n{e}') if not isinstance(full_test_spec, dict): return error_msg('Test spec must be a dict') errors = [] - for buildername, builder_spec in full_test_spec.iteritems(): + for buildername, builder_spec in full_test_spec.items(): def error_msg(msg): - return ['Error in %s for builder %s:\n%s' % (file_path, buildername, msg)] + return [f'Error in {file_path} for builder {buildername}:\n{msg}'] - if not isinstance(buildername, basestring) or not buildername: + if not isinstance(buildername, str) or not buildername: errors += error_msg('Buildername must be a non-empty string') if not isinstance(builder_spec, dict) or not builder_spec: diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index db7566addd132b..519adbdb076c34 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -361,7 +361,6 @@ {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'}, {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'}, - {'name': 'v8testing', 'variant': 'no_concurrent_inlining'}, ], }, 'v8_linux64_dict_tracking_dbg_ng_triggered': { @@ -474,7 +473,6 @@ {'name': 'v8testing', 'variant': 'extra'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, - {'name': 'v8testing', 'variant': 'no_concurrent_inlining'}, ], }, 'v8_linux64_perfetto_dbg_ng_triggered': { @@ -493,6 +491,15 @@ {'name': 'v8testing', 'shards': 3}, ], }, + 'v8_linux64_python3_rel_ng_triggered': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 2}, + {'name': 'gcmole'}, + ], + }, 'v8_linux64_single_generation_dbg_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -520,7 +527,6 @@ {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'slow_path'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, - {'name': 'v8testing', 'variant': 'no_concurrent_inlining'}, ], }, 'v8_linux64_tsan_rel_ng_triggered': { @@ -822,7 +828,6 @@ 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', - 'gpu': 'none', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -833,7 +838,6 @@ 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', - 'gpu': 'none', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -844,7 +848,6 @@ 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', - 'gpu': 'none', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -882,7 +885,7 @@ {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, {'name': 'test262', 'shards': 2}, - {'name': 'test262', 'variant': 'extra'}, + {'name': 'test262', 'variant': 'extra', 'shards': 2}, {'name': 'v8initializers'}, {'name': 'v8testing'}, {'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates']}, @@ -1148,7 +1151,7 @@ {'name': 'perf_integration'}, {'name': 'test262', 'shards': 2}, {'name': 'test262', 'variant': 'assert_types'}, - {'name': 'test262', 'variant': 'extra'}, + {'name': 'test262', 'variant': 'extra', 'shards': 2}, {'name': 'v8initializers'}, {'name': 'v8testing'}, {'name': 'v8testing', 'variant': 'assert_types'}, @@ -1156,7 +1159,6 @@ {'name': 'v8testing', 'variant': 'minor_mc'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, - {'name': 'v8testing', 'variant': 'no_concurrent_inlining'}, # Noavx. { 'name': 'mozilla', @@ -1208,7 +1210,7 @@ {'name': 'mozilla'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, - {'name': 'test262', 'shards': 5}, + {'name': 'test262', 'shards': 7}, {'name': 'test262', 'variant': 'extra', 'shards': 5}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, @@ -1218,7 +1220,6 @@ {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'}, {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'}, - {'name': 'v8testing', 'variant': 'no_concurrent_inlining'}, # Noavx. { 'name': 'mozilla', @@ -1368,6 +1369,15 @@ {'name': 'v8testing', 'shards': 2}, ], }, + 'V8 Linux64 - python3': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 2}, + {'name': 'gcmole'}, + ], + }, 'V8 Linux64 - shared': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -1391,7 +1401,7 @@ 'os': 'Ubuntu-18.04', }, 'tests': [ - {'name': 'test262', 'shards': 5}, + {'name': 'test262', 'shards': 7}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, @@ -1571,23 +1581,21 @@ 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', - 'gpu': 'none', }, 'swarming_task_attrs': { 'expiration': 14400, - 'hard_timeout': 3600, + 'hard_timeout': 7200, 'priority': 35, }, 'tests': [ - {'name': 'v8testing', 'shards': 8}, - {'name': 'v8testing', 'variant': 'future', 'shards': 2}, + {'name': 'v8testing', 'shards': 10}, + {'name': 'v8testing', 'variant': 'future', 'shards': 4}, ], }, 'V8 Mac - arm64 - sim - release': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', - 'gpu': 'none', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1841,7 +1849,8 @@ 'name': 'test262', 'suffix': 'novfp3', 'variant': 'default', - 'test_args': ['--novfp3'] + 'test_args': ['--novfp3'], + 'shards': 2 }, { 'name': 'v8testing', diff --git a/deps/v8/samples/cppgc/hello-world.cc b/deps/v8/samples/cppgc/hello-world.cc index 86b0afe92f63df..fe0d002ab44760 100644 --- a/deps/v8/samples/cppgc/hello-world.cc +++ b/deps/v8/samples/cppgc/hello-world.cc @@ -2,17 +2,21 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include -#include -#include -#include -#include -#include - #include #include #include +#include "include/cppgc/allocation.h" +#include "include/cppgc/default-platform.h" +#include "include/cppgc/garbage-collected.h" +#include "include/cppgc/heap.h" +#include "include/cppgc/member.h" +#include "include/cppgc/visitor.h" + +#if !CPPGC_IS_STANDALONE +#include "include/v8-initialization.h" +#endif // !CPPGC_IS_STANDALONE + /** * This sample program shows how to set up a stand-alone cppgc heap. */ @@ -45,6 +49,12 @@ int main(int argc, char* argv[]) { // Create a default platform that is used by cppgc::Heap for execution and // backend allocation. auto cppgc_platform = std::make_shared(); +#if !CPPGC_IS_STANDALONE + // When initializing a stand-alone cppgc heap in a regular V8 build, the + // internal V8 platform will be reused. Reusing the V8 platform requires + // initializing it properly. + v8::V8::InitializePlatform(cppgc_platform->GetV8Platform()); +#endif // !CPPGC_IS_STANDALONE // Initialize the process. This must happen before any cppgc::Heap::Create() // calls. cppgc::DefaultPlatform::InitializeProcess(cppgc_platform.get()); diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc index 557ba63e0fd85e..3ca9ff68024566 100644 --- a/deps/v8/samples/hello-world.cc +++ b/deps/v8/samples/hello-world.cc @@ -20,6 +20,12 @@ int main(int argc, char* argv[]) { v8::V8::InitializeExternalStartupData(argv[0]); std::unique_ptr platform = v8::platform::NewDefaultPlatform(); v8::V8::InitializePlatform(platform.get()); +#ifdef V8_SANDBOX + if (!v8::V8::InitializeSandbox()) { + fprintf(stderr, "Error initializing the V8 sandbox\n"); + return 1; + } +#endif v8::V8::Initialize(); // Create a new Isolate and make it the current one. diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc index 28b6f119c3acd2..1e9498038836d8 100644 --- a/deps/v8/samples/process.cc +++ b/deps/v8/samples/process.cc @@ -703,6 +703,12 @@ int main(int argc, char* argv[]) { v8::V8::InitializeExternalStartupData(argv[0]); std::unique_ptr platform = v8::platform::NewDefaultPlatform(); v8::V8::InitializePlatform(platform.get()); +#ifdef V8_SANDBOX + if (!v8::V8::InitializeSandbox()) { + fprintf(stderr, "Error initializing the V8 sandbox\n"); + return 1; + } +#endif v8::V8::Initialize(); map options; string file; diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index 9a2c8c3f544bf1..7e6be6f8723d19 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -73,6 +73,12 @@ int main(int argc, char* argv[]) { v8::V8::InitializeExternalStartupData(argv[0]); std::unique_ptr platform = v8::platform::NewDefaultPlatform(); v8::V8::InitializePlatform(platform.get()); +#ifdef V8_SANDBOX + if (!v8::V8::InitializeSandbox()) { + fprintf(stderr, "Error initializing the V8 sandbox\n"); + return 1; + } +#endif v8::V8::Initialize(); v8::V8::SetFlagsFromCommandLine(&argc, argv, true); v8::Isolate::CreateParams create_params; diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index 341435e28d3bac..9846e7695c125b 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -36,6 +36,7 @@ include_rules = [ "+src/heap/parked-scope.h", "+src/heap/read-only-heap-inl.h", "+src/heap/read-only-heap.h", + "+src/heap/reference-summarizer.h", "+src/heap/safepoint.h", "+src/heap/base/stack.h", "+src/heap/conservative-stack-visitor.h", @@ -50,6 +51,9 @@ include_rules = [ "+src/interpreter/interpreter.h", "+src/interpreter/interpreter-generator.h", "+src/interpreter/setup-interpreter.h", + "-src/maglev", + "+src/maglev/maglev.h", + "+src/maglev/maglev-concurrent-dispatcher.h", "-src/regexp", "+src/regexp/regexp.h", "+src/regexp/regexp-flags.h", diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS index 519588070b000f..eb4a9242713213 100644 --- a/deps/v8/src/api/OWNERS +++ b/deps/v8/src/api/OWNERS @@ -6,3 +6,6 @@ leszeks@chromium.org mlippautz@chromium.org mslekova@chromium.org verwaest@chromium.org + +# For v8-debug.h implementations. +per-file api.cc=file:../debug/OWNERS diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index 17f8bd94bc2035..975976ae096ae2 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -15,7 +15,6 @@ #include "src/objects/foreign-inl.h" #include "src/objects/js-weak-refs.h" #include "src/objects/objects-inl.h" -#include "src/objects/stack-frame-info.h" namespace v8 { @@ -317,6 +316,22 @@ inline bool V8_EXPORT TryToCopyAndConvertArrayToCppBuffer(Local src, namespace internal { +void HandleScopeImplementer::EnterContext(Context context) { + DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); + DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); + DCHECK(context.IsNativeContext()); + entered_contexts_.push_back(context); + is_microtask_context_.push_back(0); +} + +void HandleScopeImplementer::EnterMicrotaskContext(Context context) { + DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); + DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); + DCHECK(context.IsNativeContext()); + entered_contexts_.push_back(context); + is_microtask_context_.push_back(1); +} + Handle HandleScopeImplementer::LastEnteredContext() { DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); diff --git a/deps/v8/src/api/api-macros.h b/deps/v8/src/api/api-macros.h index 9b339321e7a430..07b2e2d0f20acb 100644 --- a/deps/v8/src/api/api-macros.h +++ b/deps/v8/src/api/api-macros.h @@ -22,12 +22,17 @@ * Exceptions should be handled either by invoking one of the * RETURN_ON_FAILED_EXECUTION* macros. * + * API methods that are part of the debug interface should use + * + * PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE + * + * in a similar fashion to ENTER_V8. + * * Don't use macros with DO_NOT_USE in their name. * - * TODO(jochen): Document debugger specific macros. - * TODO(jochen): Document LOG_API and other RuntimeCallStats macros. - * TODO(jochen): All API methods should invoke one of the ENTER_V8* macros. - * TODO(jochen): Remove calls form API methods to DO_NOT_USE macros. + * TODO(cbruni): Document LOG_API and other RuntimeCallStats macros. + * TODO(verwaest): All API methods should invoke one of the ENTER_V8* macros. + * TODO(verwaest): Remove calls form API methods to DO_NOT_USE macros. */ #define LOG_API(isolate, class_name, function_name) \ diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index a71dd1670ce623..f0f1355f590b37 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -44,6 +44,7 @@ #include "src/common/globals.h" #include "src/compiler-dispatcher/lazy-compile-dispatcher.h" #include "src/date/date.h" +#include "src/objects/primitive-heap-object.h" #if V8_ENABLE_WEBASSEMBLY #include "src/debug/debug-wasm-objects.h" #endif // V8_ENABLE_WEBASSEMBLY @@ -56,7 +57,6 @@ #include "src/execution/isolate-inl.h" #include "src/execution/messages.h" #include "src/execution/microtask-queue.h" -#include "src/execution/runtime-profiler.h" #include "src/execution/simulator.h" #include "src/execution/v8threads.h" #include "src/execution/vm-state-inl.h" @@ -100,7 +100,6 @@ #include "src/objects/shared-function-info.h" #include "src/objects/slots.h" #include "src/objects/smi.h" -#include "src/objects/stack-frame-info-inl.h" #include "src/objects/synthetic-module-inl.h" #include "src/objects/templates.h" #include "src/objects/value-serializer.h" @@ -115,8 +114,8 @@ #include "src/profiler/tick-sample.h" #include "src/regexp/regexp-utils.h" #include "src/runtime/runtime.h" -#include "src/security/external-pointer.h" -#include "src/security/vm-cage.h" +#include "src/sandbox/external-pointer.h" +#include "src/sandbox/sandbox.h" #include "src/snapshot/code-serializer.h" #include "src/snapshot/embedded/embedded-data.h" #include "src/snapshot/snapshot.h" @@ -140,7 +139,7 @@ #include "src/wasm/wasm-serialization.h" #endif // V8_ENABLE_WEBASSEMBLY -#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD +#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD #include #include "include/v8-wasm-trap-handler-posix.h" #include "src/trap-handler/handler-inside-posix.h" @@ -193,37 +192,6 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate, return origin; } -ScriptOrigin::ScriptOrigin( - Local resource_name, Local line_offset, - Local column_offset, Local is_shared_cross_origin, - Local script_id, Local source_map_url, - Local is_opaque, Local is_wasm, Local is_module, - Local host_defined_options) - : ScriptOrigin( - Isolate::GetCurrent(), resource_name, - line_offset.IsEmpty() ? 0 : static_cast(line_offset->Value()), - column_offset.IsEmpty() ? 0 - : static_cast(column_offset->Value()), - !is_shared_cross_origin.IsEmpty() && is_shared_cross_origin->IsTrue(), - static_cast(script_id.IsEmpty() ? -1 : script_id->Value()), - source_map_url, !is_opaque.IsEmpty() && is_opaque->IsTrue(), - !is_wasm.IsEmpty() && is_wasm->IsTrue(), - !is_module.IsEmpty() && is_module->IsTrue(), host_defined_options) {} - -ScriptOrigin::ScriptOrigin(Local resource_name, int line_offset, - int column_offset, bool is_shared_cross_origin, - int script_id, Local source_map_url, - bool is_opaque, bool is_wasm, bool is_module, - Local host_defined_options) - : isolate_(Isolate::GetCurrent()), - resource_name_(resource_name), - resource_line_offset_(line_offset), - resource_column_offset_(column_offset), - options_(is_shared_cross_origin, is_opaque, is_wasm, is_module), - script_id_(script_id), - source_map_url_(source_map_url), - host_defined_options_(host_defined_options) {} - Local ScriptOrigin::HostDefinedOptions() const { // TODO(cbruni, chromium:1244145): remove once migrated to the context. Utils::ApiCheck(!host_defined_options_->IsFixedArray(), @@ -389,11 +357,11 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) { namespace { -#ifdef V8_HEAP_SANDBOX -// ArrayBufferAllocator to use when the heap sandbox is enabled, in which case -// all ArrayBuffer backing stores need to be allocated inside the virtual -// memory cage. Note, the current implementation is extremely inefficient as it -// uses the BoundedPageAllocator. In the future, we'll need a proper allocator +#ifdef V8_SANDBOXED_POINTERS +// ArrayBufferAllocator to use when sandboxed pointers are used in which case +// all ArrayBuffer backing stores need to be allocated inside the sandbox. +// Note, the current implementation is extremely inefficient as it uses the +// BoundedPageAllocator. In the future, we'll need a proper allocator // implementation. class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { public: @@ -461,7 +429,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { return new_data; } }; -#endif // V8_HEAP_SANDBOX +#endif // V8_SANDBOXED_POINTERS struct SnapshotCreatorData { explicit SnapshotCreatorData(Isolate* isolate) @@ -961,7 +929,7 @@ void HandleScope::Initialize(Isolate* isolate) { // We make an exception if the serializer is enabled, which means that the // Isolate is exclusively used to create a snapshot. Utils::ApiCheck( - !v8::Locker::WasEverUsed() || + !internal_isolate->was_locker_ever_used() || internal_isolate->thread_manager()->IsLockedByCurrentThread() || internal_isolate->serializer_enabled(), "HandleScope::HandleScope", @@ -2088,8 +2056,8 @@ MaybeLocal Script::Run(Local context, ENTER_V8(isolate, context, Script, Run, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); - i::NestedTimedHistogramScope execute_timer( - isolate->counters()->execute_precise()); + i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(), + isolate); i::AggregatingHistogramTimerScope histogram_timer( isolate->counters()->compile_lazy()); auto fun = i::Handle::cast(Utils::OpenHandle(this)); @@ -2118,8 +2086,10 @@ MaybeLocal Script::Run(Local context, handle(fun->shared().script(), isolate); if (maybe_script->IsScript() && i::Script::cast(*maybe_script).type() == i::Script::TYPE_WEB_SNAPSHOT) { - i::WebSnapshotDeserializer deserializer(v8_isolate); - deserializer.UseWebSnapshot(i::Handle::cast(maybe_script)); + i::WebSnapshotDeserializer deserializer( + reinterpret_cast(v8_isolate), + i::Handle::cast(maybe_script)); + deserializer.Deserialize(); RETURN_ON_FAILED_EXECUTION(Value); Local result = v8::Undefined(v8_isolate); RETURN_ESCAPED(result); @@ -2292,56 +2262,6 @@ Local Module::GetException() const { return ToApiHandle(i::handle(self->GetException(), isolate)); } -int Module::GetModuleRequestsLength() const { - i::Module self = *Utils::OpenHandle(this); - if (self.IsSyntheticModule()) return 0; - ASSERT_NO_SCRIPT_NO_EXCEPTION(self.GetIsolate()); - return i::SourceTextModule::cast(self).info().module_requests().length(); -} - -Local Module::GetModuleRequest(int i) const { - Utils::ApiCheck(i >= 0, "v8::Module::GetModuleRequest", - "index must be positive"); - i::Handle self = Utils::OpenHandle(this); - Utils::ApiCheck(self->IsSourceTextModule(), "v8::Module::GetModuleRequest", - "Expected SourceTextModule"); - i::Isolate* isolate = self->GetIsolate(); - ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate); - i::Handle module_requests( - i::Handle::cast(self)->info().module_requests(), - isolate); - Utils::ApiCheck(i < module_requests->length(), "v8::Module::GetModuleRequest", - "index is out of bounds"); - i::Handle module_request( - i::ModuleRequest::cast(module_requests->get(i)), isolate); - return ToApiHandle(i::handle(module_request->specifier(), isolate)); -} - -Location Module::GetModuleRequestLocation(int i) const { - Utils::ApiCheck(i >= 0, "v8::Module::GetModuleRequest", - "index must be positive"); - i::Handle self = Utils::OpenHandle(this); - i::Isolate* isolate = self->GetIsolate(); - ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate); - i::HandleScope scope(isolate); - Utils::ApiCheck(self->IsSourceTextModule(), - "Module::GetModuleRequestLocation", - "Expected SourceTextModule"); - i::Handle module_requests( - i::Handle::cast(self)->info().module_requests(), - isolate); - Utils::ApiCheck(i < module_requests->length(), "v8::Module::GetModuleRequest", - "index is out of bounds"); - i::Handle module_request( - i::ModuleRequest::cast(module_requests->get(i)), isolate); - int position = module_request->position(); - i::Handle script( - i::Handle::cast(self)->GetScript(), isolate); - i::Script::PositionInfo info; - i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET); - return v8::Location(info.line, info.column); -} - Local Module::GetModuleRequests() const { i::Handle self = Utils::OpenHandle(this); if (self->IsSyntheticModule()) { @@ -2425,19 +2345,6 @@ bool Module::IsSyntheticModule() const { int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); } -Maybe Module::InstantiateModule(Local context, - Module::ResolveCallback callback) { - auto isolate = reinterpret_cast(context->GetIsolate()); - ENTER_V8(isolate, context, Module, InstantiateModule, Nothing(), - i::HandleScope); - ResolveModuleCallback callback_with_import_assertions = nullptr; - has_pending_exception = - !i::Module::Instantiate(isolate, Utils::OpenHandle(this), context, - callback_with_import_assertions, callback); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return Just(true); -} - Maybe Module::InstantiateModule(Local context, Module::ResolveModuleCallback callback) { auto isolate = reinterpret_cast(context->GetIsolate()); @@ -2455,8 +2362,8 @@ MaybeLocal Module::Evaluate(Local context) { ENTER_V8(isolate, context, Module, Evaluate, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); - i::NestedTimedHistogramScope execute_timer( - isolate->counters()->execute_precise()); + i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(), + isolate); i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy()); i::Handle self = Utils::OpenHandle(this); @@ -2987,7 +2894,7 @@ MaybeLocal v8::TryCatch::StackTrace(Local context, PREPARE_FOR_EXECUTION(context, TryCatch, StackTrace, Value); auto obj = i::Handle::cast(i_exception); i::Handle name = isolate->factory()->stack_string(); - Maybe maybe = i::JSReceiver::HasProperty(obj, name); + Maybe maybe = i::JSReceiver::HasProperty(isolate, obj, name); has_pending_exception = maybe.IsNothing(); RETURN_ON_FAILED_EXECUTION(Value); if (!maybe.FromJust()) return v8::Local(); @@ -3064,6 +2971,7 @@ ScriptOrigin Message::GetScriptOrigin() const { void ScriptOrigin::VerifyHostDefinedOptions() const { // TODO(cbruni, chromium:1244145): Remove checks once we allow arbitrary // host-defined options. + USE(isolate_); if (host_defined_options_.IsEmpty()) return; Utils::ApiCheck(host_defined_options_->IsFixedArray(), "ScriptOrigin()", "Host-defined options has to be a PrimitiveArray"); @@ -3210,14 +3118,6 @@ MaybeLocal Message::GetSourceLine(Local context) const { RETURN_ESCAPED(Utils::ToLocal(self->GetSourceLine())); } -void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) { - i::Isolate* i_isolate = reinterpret_cast(isolate); - ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - std::ostringstream stack_trace_stream; - i_isolate->PrintCurrentStackTrace(stack_trace_stream); - i::PrintF(out, "%s", stack_trace_stream.str().c_str()); -} - void Message::PrintCurrentStackTrace(Isolate* isolate, std::ostream& out) { i::Isolate* i_isolate = reinterpret_cast(isolate); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); @@ -3229,9 +3129,9 @@ void Message::PrintCurrentStackTrace(Isolate* isolate, std::ostream& out) { Local StackTrace::GetFrame(Isolate* v8_isolate, uint32_t index) const { i::Isolate* isolate = reinterpret_cast(v8_isolate); - i::Handle frame( + i::Handle info( i::StackFrameInfo::cast(Utils::OpenHandle(this)->get(index)), isolate); - return Utils::StackFrameToLocal(frame); + return Utils::StackFrameToLocal(info); } int StackTrace::GetFrameCount() const { @@ -3244,89 +3144,97 @@ Local StackTrace::CurrentStackTrace(Isolate* isolate, i::Isolate* i_isolate = reinterpret_cast(isolate); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i::Handle stackTrace = - i_isolate->CaptureCurrentStackTrace(frame_limit, options); + i_isolate->CaptureDetailedStackTrace(frame_limit, options); return Utils::StackTraceToLocal(stackTrace); } -// --- S t a c k F r a m e --- - -int StackFrame::GetLineNumber() const { - return i::StackFrameInfo::GetLineNumber(Utils::OpenHandle(this)); +Local StackTrace::CurrentScriptNameOrSourceURL(Isolate* v8_isolate) { + i::Isolate* isolate = reinterpret_cast(v8_isolate); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); + i::Handle name_or_source_url = + isolate->CurrentScriptNameOrSourceURL(); + return Utils::ToLocal(name_or_source_url); } -int StackFrame::GetColumn() const { - return i::StackFrameInfo::GetColumnNumber(Utils::OpenHandle(this)); +// --- S t a c k F r a m e --- + +Location StackFrame::GetLocation() const { + i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + i::Handle script(self->script(), isolate); + i::Script::PositionInfo info; + CHECK(i::Script::GetPositionInfo(script, + i::StackFrameInfo::GetSourcePosition(self), + &info, i::Script::WITH_OFFSET)); + if (script->HasSourceURLComment()) { + info.line -= script->line_offset(); + if (info.line == 0) { + info.column -= script->column_offset(); + } + } + return {info.line, info.column}; } int StackFrame::GetScriptId() const { - return Utils::OpenHandle(this)->GetScriptId(); + return Utils::OpenHandle(this)->script().id(); } Local StackFrame::GetScriptName() const { - auto self = Utils::OpenHandle(this); - auto isolate = self->GetIsolate(); - i::Handle name(self->GetScriptName(), isolate); + i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + i::Handle name(self->script().name(), isolate); if (!name->IsString()) return {}; - return Local::Cast(Utils::ToLocal(name)); + return Utils::ToLocal(i::Handle::cast(name)); } Local StackFrame::GetScriptNameOrSourceURL() const { - auto self = Utils::OpenHandle(this); - auto isolate = self->GetIsolate(); - i::Handle name_or_url(self->GetScriptNameOrSourceURL(), isolate); - if (!name_or_url->IsString()) return {}; - return Local::Cast(Utils::ToLocal(name_or_url)); + i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + i::Handle name_or_source_url(self->script().GetNameOrSourceURL(), + isolate); + if (!name_or_source_url->IsString()) return {}; + return Utils::ToLocal(i::Handle::cast(name_or_source_url)); } Local StackFrame::GetScriptSource() const { - auto self = Utils::OpenHandle(this); - auto isolate = self->GetIsolate(); - i::Handle source(self->GetScriptSource(), isolate); + i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + if (!self->script().HasValidSource()) return {}; + i::Handle source(self->script().source(), isolate); if (!source->IsString()) return {}; - return Local::Cast(Utils::ToLocal(source)); + return Utils::ToLocal(i::Handle::cast(source)); } Local StackFrame::GetScriptSourceMappingURL() const { - auto self = Utils::OpenHandle(this); - auto isolate = self->GetIsolate(); - i::Handle sourceMappingURL(self->GetScriptSourceMappingURL(), - isolate); - if (!sourceMappingURL->IsString()) return {}; - return Local::Cast(Utils::ToLocal(sourceMappingURL)); + i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + i::Handle source_mapping_url(self->script().source_mapping_url(), + isolate); + if (!source_mapping_url->IsString()) return {}; + return Utils::ToLocal(i::Handle::cast(source_mapping_url)); } Local StackFrame::GetFunctionName() const { - auto self = Utils::OpenHandle(this); -#if V8_ENABLE_WEBASSEMBLY - if (self->IsWasm()) { - auto isolate = self->GetIsolate(); - auto instance = handle(self->GetWasmInstance(), isolate); - auto func_index = self->GetWasmFunctionIndex(); - return Utils::ToLocal( - i::GetWasmFunctionDebugName(isolate, instance, func_index)); - } -#endif // V8_ENABLE_WEBASSEMBLY - auto name = i::StackFrameInfo::GetFunctionName(self); - if (!name->IsString()) return {}; - return Local::Cast(Utils::ToLocal(name)); + i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + i::Handle name(self->function_name(), isolate); + if (name->length() == 0) return {}; + return Utils::ToLocal(name); } -bool StackFrame::IsEval() const { return Utils::OpenHandle(this)->IsEval(); } +bool StackFrame::IsEval() const { + i::Handle self = Utils::OpenHandle(this); + return self->script().compilation_type() == i::Script::COMPILATION_TYPE_EVAL; +} bool StackFrame::IsConstructor() const { - return Utils::OpenHandle(this)->IsConstructor(); + return Utils::OpenHandle(this)->is_constructor(); } -bool StackFrame::IsWasm() const { -#if V8_ENABLE_WEBASSEMBLY - return Utils::OpenHandle(this)->IsWasm(); -#else - return false; -#endif // V8_ENABLE_WEBASSEMBLY -} +bool StackFrame::IsWasm() const { return !IsUserJavaScript(); } bool StackFrame::IsUserJavaScript() const { - return Utils::OpenHandle(this)->IsUserJavaScript(); + return Utils::OpenHandle(this)->script().IsUserJavaScript(); } // --- J S O N --- @@ -3391,6 +3299,17 @@ Maybe ValueSerializer::Delegate::GetWasmModuleTransferId( return Nothing(); } +bool ValueSerializer::Delegate::SupportsSharedValues() const { return false; } + +Maybe ValueSerializer::Delegate::GetSharedValueId( + Isolate* v8_isolate, Local shared_value) { + i::Isolate* isolate = reinterpret_cast(v8_isolate); + isolate->ScheduleThrow(*isolate->factory()->NewError( + isolate->error_function(), i::MessageTemplate::kDataCloneError, + Utils::OpenHandle(*shared_value))); + return Nothing(); +} + void* ValueSerializer::Delegate::ReallocateBufferMemory(void* old_buffer, size_t size, size_t* actual_size) { @@ -3480,6 +3399,17 @@ MaybeLocal ValueDeserializer::Delegate::GetWasmModuleFromId( return MaybeLocal(); } +bool ValueDeserializer::Delegate::SupportsSharedValues() const { return false; } + +MaybeLocal ValueDeserializer::Delegate::GetSharedValueFromId( + Isolate* v8_isolate, uint32_t shared_value_id) { + i::Isolate* isolate = reinterpret_cast(v8_isolate); + isolate->ScheduleThrow(*isolate->factory()->NewError( + isolate->error_function(), + i::MessageTemplate::kDataCloneDeserializationError)); + return MaybeLocal(); +} + MaybeLocal ValueDeserializer::Delegate::GetSharedArrayBufferFromId(Isolate* v8_isolate, uint32_t id) { @@ -3703,15 +3633,7 @@ bool Value::IsBoolean() const { return Utils::OpenHandle(this)->IsBoolean(); } bool Value::IsExternal() const { i::Object obj = *Utils::OpenHandle(this); - if (!obj.IsHeapObject()) return false; - i::HeapObject heap_obj = i::HeapObject::cast(obj); - // Check the instance type is JS_OBJECT (instance type of Externals) before - // attempting to get the Isolate since that guarantees the object is writable - // and GetIsolate will work. - if (heap_obj.map().instance_type() != i::JS_OBJECT_TYPE) return false; - i::Isolate* isolate = i::JSObject::cast(heap_obj).GetIsolate(); - ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate); - return heap_obj.IsExternal(isolate); + return obj.IsJSExternalObject(); } bool Value::IsInt32() const { @@ -4652,14 +4574,16 @@ Maybe v8::Object::SetPrototype(Local context, // We do not allow exceptions thrown while setting the prototype // to propagate outside. TryCatch try_catch(reinterpret_cast(isolate)); - auto result = i::JSProxy::SetPrototype(i::Handle::cast(self), - value_obj, false, i::kThrowOnError); + auto result = + i::JSProxy::SetPrototype(isolate, i::Handle::cast(self), + value_obj, false, i::kThrowOnError); has_pending_exception = result.IsNothing(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); } else { ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - auto result = i::JSObject::SetPrototype(i::Handle::cast(self), - value_obj, false, i::kThrowOnError); + auto result = + i::JSObject::SetPrototype(isolate, i::Handle::cast(self), + value_obj, false, i::kThrowOnError); if (result.IsNothing()) { isolate->clear_pending_exception(); return Nothing(); @@ -4739,7 +4663,10 @@ MaybeLocal v8::Object::ObjectProtoToString(Local context) { Local v8::Object::GetConstructorName() { auto self = Utils::OpenHandle(this); - i::Handle name = i::JSReceiver::GetConstructorName(self); + // TODO(v8:12547): Support shared objects. + DCHECK(!self->InSharedHeap()); + i::Handle name = + i::JSReceiver::GetConstructorName(self->GetIsolate(), self); return Utils::ToLocal(name); } @@ -4807,12 +4734,12 @@ Maybe v8::Object::Has(Local context, Local key) { // Check if the given key is an array index. uint32_t index = 0; if (key_obj->ToArrayIndex(&index)) { - maybe = i::JSReceiver::HasElement(self, index); + maybe = i::JSReceiver::HasElement(isolate, self, index); } else { // Convert the key to a name - possibly by calling back into JavaScript. i::Handle name; if (i::Object::ToName(isolate, key_obj).ToHandle(&name)) { - maybe = i::JSReceiver::HasProperty(self, name); + maybe = i::JSReceiver::HasProperty(isolate, self, name); } } has_pending_exception = maybe.IsNothing(); @@ -4838,7 +4765,7 @@ Maybe v8::Object::Has(Local context, uint32_t index) { auto isolate = reinterpret_cast(context->GetIsolate()); ENTER_V8(isolate, context, Object, Has, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); - auto maybe = i::JSReceiver::HasElement(self, index); + auto maybe = i::JSReceiver::HasElement(isolate, self, index); has_pending_exception = maybe.IsNothing(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return maybe; @@ -4941,7 +4868,7 @@ Maybe v8::Object::HasOwnProperty(Local context, i::HandleScope); auto self = Utils::OpenHandle(this); auto key_val = Utils::OpenHandle(*key); - auto result = i::JSReceiver::HasOwnProperty(self, key_val); + auto result = i::JSReceiver::HasOwnProperty(isolate, self, key_val); has_pending_exception = result.IsNothing(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; @@ -4952,7 +4879,7 @@ Maybe v8::Object::HasOwnProperty(Local context, uint32_t index) { ENTER_V8(isolate, context, Object, HasOwnProperty, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); - auto result = i::JSReceiver::HasOwnProperty(self, index); + auto result = i::JSReceiver::HasOwnProperty(isolate, self, index); has_pending_exception = result.IsNothing(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; @@ -4967,7 +4894,7 @@ Maybe v8::Object::HasRealNamedProperty(Local context, if (!self->IsJSObject()) return Just(false); auto key_val = Utils::OpenHandle(*key); auto result = i::JSObject::HasRealNamedProperty( - i::Handle::cast(self), key_val); + isolate, i::Handle::cast(self), key_val); has_pending_exception = result.IsNothing(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; @@ -4981,7 +4908,7 @@ Maybe v8::Object::HasRealIndexedProperty(Local context, auto self = Utils::OpenHandle(this); if (!self->IsJSObject()) return Just(false); auto result = i::JSObject::HasRealElementProperty( - i::Handle::cast(self), index); + isolate, i::Handle::cast(self), index); has_pending_exception = result.IsNothing(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; @@ -4996,7 +4923,7 @@ Maybe v8::Object::HasRealNamedCallbackProperty(Local context, if (!self->IsJSObject()) return Just(false); auto key_val = Utils::OpenHandle(*key); auto result = i::JSObject::HasRealNamedCallbackProperty( - i::Handle::cast(self), key_val); + isolate, i::Handle::cast(self), key_val); has_pending_exception = result.IsNothing(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; @@ -5165,7 +5092,8 @@ bool v8::Object::IsConstructor() const { bool v8::Object::IsApiWrapper() const { auto self = i::Handle::cast(Utils::OpenHandle(this)); - return self->IsApiWrapper(); + // Objects with embedder fields can wrap API objects. + return self->MayHaveEmbedderFields(); } bool v8::Object::IsUndetectable() const { @@ -5181,8 +5109,8 @@ MaybeLocal Object::CallAsFunction(Local context, ENTER_V8(isolate, context, Object, CallAsFunction, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); - i::NestedTimedHistogramScope execute_timer( - isolate->counters()->execute_precise()); + i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(), + isolate); auto self = Utils::OpenHandle(this); auto recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Handle)); @@ -5201,8 +5129,8 @@ MaybeLocal Object::CallAsConstructor(Local context, int argc, ENTER_V8(isolate, context, Object, CallAsConstructor, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); - i::NestedTimedHistogramScope execute_timer( - isolate->counters()->execute_precise()); + i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(), + isolate); auto self = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Handle)); i::Handle* args = reinterpret_cast*>(argv); @@ -5240,8 +5168,8 @@ MaybeLocal Function::NewInstanceWithSideEffectType( ENTER_V8(isolate, context, Function, NewInstance, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); - i::NestedTimedHistogramScope execute_timer( - isolate->counters()->execute_precise()); + i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(), + isolate); auto self = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Handle)); bool should_set_has_no_side_effect = @@ -5291,8 +5219,8 @@ MaybeLocal Function::Call(Local context, ENTER_V8(isolate, context, Function, Call, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); - i::NestedTimedHistogramScope execute_timer( - isolate->counters()->execute_precise()); + i::NestedTimedHistogramScope execute_timer(isolate->counters()->execute(), + isolate); auto self = Utils::OpenHandle(this); Utils::ApiCheck(!self.is_null(), "v8::Function::Call", "Function to be called is a null pointer"); @@ -5843,7 +5771,7 @@ String::ExternalStringResource* String::GetExternalStringResourceSlow() const { } if (i::StringShape(str).IsExternalTwoByte()) { - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(str.ptr()); + internal::Isolate* isolate = I::GetIsolateForSandbox(str.ptr()); internal::Address value = I::ReadExternalPointerField( isolate, str.ptr(), I::kStringResourceOffset, internal::kExternalStringResourceTag); @@ -5887,7 +5815,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow( *encoding_out = static_cast(type & I::kStringEncodingMask); if (i::StringShape(str).IsExternalOneByte() || i::StringShape(str).IsExternalTwoByte()) { - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(string); + internal::Isolate* isolate = I::GetIsolateForSandbox(string); internal::Address value = I::ReadExternalPointerField(isolate, string, I::kStringResourceOffset, internal::kExternalStringResourceTag); @@ -5911,28 +5839,6 @@ v8::String::GetExternalOneByteStringResource() const { return nullptr; } -Local Symbol::Description() const { - i::Handle sym = Utils::OpenHandle(this); - - i::Isolate* isolate; - if (!i::GetIsolateFromHeapObject(*sym, &isolate)) { - // Symbol is in RO_SPACE, which means that its description is also in - // RO_SPACE. Since RO_SPACE objects are immovable we can use the - // Handle(Address*) constructor with the address of the description - // field in the Symbol object without needing an isolate. - DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL); -#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE - i::Handle ro_description(reinterpret_cast( - sym->GetFieldAddress(i::Symbol::kDescriptionOffset))); - return Utils::ToLocal(ro_description); -#else - isolate = reinterpret_cast(Isolate::GetCurrent()); -#endif - } - - return Description(reinterpret_cast(isolate)); -} - Local Symbol::Description(Isolate* isolate) const { i::Handle sym = Utils::OpenHandle(this); i::Handle description(sym->description(), @@ -6032,18 +5938,36 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { i::Handle obj = Utils::OpenHandle(this); const char* location = "v8::Object::SetAlignedPointerInInternalField()"; if (!InternalFieldOK(obj, index, location)) return; + + i::DisallowGarbageCollection no_gc; + + // There's no need to invalidate slots as embedder fields are always + // tagged. + obj->GetHeap()->NotifyObjectLayoutChange(*obj, no_gc, + i::InvalidateRecordedSlots::kNo); + Utils::ApiCheck(i::EmbedderDataSlot(i::JSObject::cast(*obj), index) .store_aligned_pointer(obj->GetIsolate(), value), location, "Unaligned pointer"); DCHECK_EQ(value, GetAlignedPointerFromInternalField(index)); internal::WriteBarrier::MarkingFromInternalFields(i::JSObject::cast(*obj)); + +#ifdef VERIFY_HEAP + obj->GetHeap()->VerifyObjectLayoutChange(*obj, obj->map()); +#endif // VERIFY_HEAP } void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], void* values[]) { i::Handle obj = Utils::OpenHandle(this); - const char* location = "v8::Object::SetAlignedPointerInInternalFields()"; + i::DisallowGarbageCollection no_gc; + // There's no need to invalidate slots as embedder fields are always + // tagged. + obj->GetHeap()->NotifyObjectLayoutChange(*obj, no_gc, + i::InvalidateRecordedSlots::kNo); + + const char* location = "v8::Object::SetAlignedPointerInInternalFields()"; i::JSObject js_obj = i::JSObject::cast(*obj); int nof_embedder_fields = js_obj.GetEmbedderFieldCount(); for (int i = 0; i < argc; i++) { @@ -6059,15 +5983,10 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], DCHECK_EQ(value, GetAlignedPointerFromInternalField(index)); } internal::WriteBarrier::MarkingFromInternalFields(js_obj); -} -static void* ExternalValue(i::Object obj) { - // Obscure semantics for undefined, but somehow checked in our unit tests... - if (obj.IsUndefined()) { - return nullptr; - } - i::Object foreign = i::JSObject::cast(obj).GetEmbedderField(0); - return reinterpret_cast(i::Foreign::cast(foreign).foreign_address()); +#ifdef VERIFY_HEAP + obj->GetHeap()->VerifyObjectLayoutChange(*obj, obj->map()); +#endif // VERIFY_HEAP } // --- E n v i r o n m e n t --- @@ -6076,10 +5995,8 @@ void v8::V8::InitializePlatform(Platform* platform) { i::V8::InitializePlatform(platform); } -#ifdef V8_VIRTUAL_MEMORY_CAGE -bool v8::V8::InitializeVirtualMemoryCage() { - return i::V8::InitializeVirtualMemoryCage(); -} +#ifdef V8_SANDBOX +bool v8::V8::InitializeSandbox() { return i::V8::InitializeSandbox(); } #endif void v8::V8::DisposePlatform() { i::V8::DisposePlatform(); } @@ -6103,30 +6020,31 @@ bool v8::V8::Initialize(const int build_config) { kEmbedderSmiValueSize, internal::kSmiValueSize); } - const bool kEmbedderHeapSandbox = (build_config & kHeapSandbox) != 0; - if (kEmbedderHeapSandbox != V8_HEAP_SANDBOX_BOOL) { + const bool kEmbedderSandboxedExternalPointers = + (build_config & kSandboxedExternalPointers) != 0; + if (kEmbedderSandboxedExternalPointers != + V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) { FATAL( "Embedder-vs-V8 build configuration mismatch. On embedder side " - "heap sandbox is %s while on V8 side it's %s.", - kEmbedderHeapSandbox ? "ENABLED" : "DISABLED", - V8_HEAP_SANDBOX_BOOL ? "ENABLED" : "DISABLED"); + "sandboxed external pointers is %s while on V8 side it's %s.", + kEmbedderSandboxedExternalPointers ? "ENABLED" : "DISABLED", + V8_SANDBOXED_EXTERNAL_POINTERS_BOOL ? "ENABLED" : "DISABLED"); } - const bool kEmbedderVirtualMemoryCage = - (build_config & kVirtualMemoryCage) != 0; - if (kEmbedderVirtualMemoryCage != V8_VIRTUAL_MEMORY_CAGE_BOOL) { + const bool kEmbedderSandbox = (build_config & kSandbox) != 0; + if (kEmbedderSandbox != V8_SANDBOX_BOOL) { FATAL( "Embedder-vs-V8 build configuration mismatch. On embedder side " - "virtual memory cage is %s while on V8 side it's %s.", - kEmbedderVirtualMemoryCage ? "ENABLED" : "DISABLED", - V8_VIRTUAL_MEMORY_CAGE_BOOL ? "ENABLED" : "DISABLED"); + "sandbox is %s while on V8 side it's %s.", + kEmbedderSandbox ? "ENABLED" : "DISABLED", + V8_SANDBOX_BOOL ? "ENABLED" : "DISABLED"); } i::V8::Initialize(); return true; } -#if V8_OS_LINUX || V8_OS_MACOSX +#if V8_OS_LINUX || V8_OS_DARWIN bool TryHandleWebAssemblyTrapPosix(int sig_code, siginfo_t* info, void* context) { #if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED @@ -6239,31 +6157,38 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) { const char* v8::V8::GetVersion() { return i::Version::GetVersion(); } -#ifdef V8_VIRTUAL_MEMORY_CAGE +#ifdef V8_SANDBOX +VirtualAddressSpace* v8::V8::GetSandboxAddressSpace() { + Utils::ApiCheck(i::GetProcessWideSandbox()->is_initialized(), + "v8::V8::GetSandboxAddressSpace", + "The sandbox must be initialized first."); + return i::GetProcessWideSandbox()->address_space(); +} + PageAllocator* v8::V8::GetVirtualMemoryCagePageAllocator() { - Utils::ApiCheck(i::GetProcessWideVirtualMemoryCage()->is_initialized(), + Utils::ApiCheck(i::GetProcessWideSandbox()->is_initialized(), "v8::V8::GetVirtualMemoryCagePageAllocator", - "The virtual memory cage must be initialized first."); - return i::GetProcessWideVirtualMemoryCage()->page_allocator(); + "The sandbox must be initialized first."); + return i::GetProcessWideSandbox()->page_allocator(); } -size_t v8::V8::GetVirtualMemoryCageSizeInBytes() { - if (!i::GetProcessWideVirtualMemoryCage()->is_initialized()) { +size_t v8::V8::GetSandboxSizeInBytes() { + if (!i::GetProcessWideSandbox()->is_initialized()) { return 0; } else { - return i::GetProcessWideVirtualMemoryCage()->size(); + return i::GetProcessWideSandbox()->size(); } } -bool v8::V8::IsUsingSecureVirtualMemoryCage() { - Utils::ApiCheck(i::GetProcessWideVirtualMemoryCage()->is_initialized(), - "v8::V8::IsUsingSecureVirtualMemoryCage", - "The virtual memory cage must be initialized first."); - // TODO(saelo) For now, we only treat a fake cage as insecure. Once we use - // caged pointers that assume that the cage has a constant size, we'll also - // treat cages smaller than the default size as insecure because caged - // pointers can then access memory outside of them. - return !i::GetProcessWideVirtualMemoryCage()->is_fake_cage(); +bool v8::V8::IsSandboxConfiguredSecurely() { + Utils::ApiCheck(i::GetProcessWideSandbox()->is_initialized(), + "v8::V8::IsSandoxConfiguredSecurely", + "The sandbox must be initialized first."); + // TODO(saelo) For now, we only treat a partially reserved sandbox as + // insecure. Once we use sandboxed pointers, which assume that the sandbox + // has a fixed size, we'll also treat sandboxes with a smaller size as + // insecure because these pointers can then access memory outside of them. + return !i::GetProcessWideSandbox()->is_partially_reserved(); } #endif @@ -6418,7 +6343,7 @@ Local NewContext( // TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't // fail. // Sanity-check that the isolate is initialized and usable. - CHECK(isolate->builtins()->code(i::Builtin::kIllegal).IsCode()); + CHECK(isolate->builtins()->code(i::Builtin::kIllegal).IsCodeT()); TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext"); LOG_API(isolate, Context, New); @@ -6602,6 +6527,7 @@ void v8::Context::SetPromiseHooks(Local init_hook, Local before_hook, Local after_hook, Local resolve_hook) { +#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS i::Handle context = Utils::OpenHandle(this); i::Isolate* isolate = context->GetIsolate(); @@ -6635,6 +6561,10 @@ void v8::Context::SetPromiseHooks(Local init_hook, context->native_context().set_promise_hook_before_function(*before); context->native_context().set_promise_hook_after_function(*after); context->native_context().set_promise_hook_resolve_function(*resolve); +#else // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS + Utils::ApiCheck(false, "v8::Context::SetPromiseHook", + "V8 was compiled without JavaScript Promise hooks"); +#endif // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS } MaybeLocal metrics::Recorder::GetContext( @@ -6786,6 +6716,11 @@ bool FunctionTemplate::IsLeafTemplateForApiObject( Local v8::External::New(Isolate* isolate, void* value) { STATIC_ASSERT(sizeof(value) == sizeof(i::Address)); + // Nullptr is not allowed here because serialization/deserialization of + // nullptr external api references is not possible as nullptr is used as an + // external_references table terminator, see v8::SnapshotCreator() + // constructors. + DCHECK_NOT_NULL(value); i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, External, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); @@ -6794,7 +6729,8 @@ Local v8::External::New(Isolate* isolate, void* value) { } void* External::Value() const { - return ExternalValue(*Utils::OpenHandle(this)); + auto self = Utils::OpenHandle(this); + return i::JSExternalObject::cast(*self).value(); } // anonymous namespace for string creation helper functions @@ -8585,10 +8521,11 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) { void Isolate::RequestGarbageCollectionForTesting( GarbageCollectionType type, EmbedderHeapTracer::EmbedderStackState stack_state) { + base::Optional stack_scope; if (type == kFullGarbageCollection) { - reinterpret_cast(this) - ->heap() - ->SetEmbedderStackStateForNextFinalization(stack_state); + stack_scope.emplace(reinterpret_cast(this)->heap(), + i::EmbedderStackStateScope::kExplicitInvocation, + stack_state); } RequestGarbageCollectionForTesting(type); } @@ -8636,6 +8573,15 @@ void Isolate::Initialize(Isolate* isolate, } else { i_isolate->set_snapshot_blob(i::Snapshot::DefaultSnapshotBlob()); } + + if (params.fatal_error_callback) { + isolate->SetFatalErrorHandler(params.fatal_error_callback); + } + + if (params.oom_error_callback) { + isolate->SetOOMErrorHandler(params.oom_error_callback); + } + if (params.counter_lookup_callback) { isolate->SetCounterFunction(params.counter_lookup_callback); } @@ -8664,7 +8610,7 @@ void Isolate::Initialize(Isolate* isolate, params.experimental_attach_to_shared_isolate)); } - // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this. + // TODO(v8:2487): Once we got rid of Isolate::Current(), we can remove this. Isolate::Scope isolate_scope(isolate); if (i_isolate->snapshot_blob() == nullptr) { FATAL( @@ -8775,6 +8721,12 @@ void Isolate::SetHostInitializeImportMetaObjectCallback( isolate->SetHostInitializeImportMetaObjectCallback(callback); } +void Isolate::SetHostCreateShadowRealmContextCallback( + HostCreateShadowRealmContextCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->SetHostCreateShadowRealmContextCallback(callback); +} + void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) { i::Isolate* isolate = reinterpret_cast(this); isolate->SetPrepareStackTraceCallback(callback); @@ -9265,7 +9217,7 @@ void Isolate::IsolateInBackgroundNotification() { void Isolate::MemoryPressureNotification(MemoryPressureLevel level) { i::Isolate* isolate = reinterpret_cast(this); bool on_isolate_thread = - v8::Locker::WasEverUsed() + isolate->was_locker_ever_used() ? isolate->thread_manager()->IsLockedByCurrentThread() : i::ThreadId::Current() == isolate->thread_id(); isolate->heap()->MemoryPressureNotification(level, on_isolate_thread); @@ -9346,7 +9298,7 @@ JSEntryStubs Isolate::GetJSEntryStubs() { {i::Builtin::kJSRunMicrotasksEntry, &entry_stubs.js_run_microtasks_entry_stub}}}; for (auto& pair : stubs) { - i::Code js_entry = isolate->builtins()->code(pair.first); + i::Code js_entry = FromCodeT(isolate->builtins()->code(pair.first)); pair.second->code.start = reinterpret_cast(js_entry.InstructionStart()); pair.second->code.length_in_bytes = js_entry.InstructionSize(); @@ -10272,14 +10224,6 @@ void EmbedderHeapTracer::SetStackStart(void* stack_start) { stack_start); } -void EmbedderHeapTracer::NotifyEmptyEmbedderStack() { - CHECK(isolate_); - reinterpret_cast(isolate_) - ->heap() - ->local_embedder_heap_tracer() - ->NotifyEmptyEmbedderStack(); -} - void EmbedderHeapTracer::FinalizeTracing() { if (isolate_) { i::Isolate* isolate = reinterpret_cast(isolate_); @@ -10290,19 +10234,6 @@ void EmbedderHeapTracer::FinalizeTracing() { } } -void EmbedderHeapTracer::GarbageCollectionForTesting( - EmbedderStackState stack_state) { - CHECK(isolate_); - Utils::ApiCheck(i::FLAG_expose_gc, - "v8::EmbedderHeapTracer::GarbageCollectionForTesting", - "Must use --expose-gc"); - i::Heap* const heap = reinterpret_cast(isolate_)->heap(); - heap->SetEmbedderStackStateForNextFinalization(stack_state); - heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags, - i::GarbageCollectionReason::kTesting, - kGCCallbackFlagForced); -} - void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) { if (isolate_) { i::LocalEmbedderHeapTracer* const tracer = diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index 320346b22fae05..c238ffb15329c1 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -41,6 +41,7 @@ class JSFinalizationRegistry; namespace debug { class AccessorPair; class GeneratorObject; +class ScriptSource; class Script; class EphemeronTable; } // namespace debug @@ -134,6 +135,7 @@ class RegisteredExtension { V(StackFrame, StackFrameInfo) \ V(Proxy, JSProxy) \ V(debug::GeneratorObject, JSGeneratorObject) \ + V(debug::ScriptSource, HeapObject) \ V(debug::Script, Script) \ V(debug::EphemeronTable, EphemeronHashTable) \ V(debug::AccessorPair, AccessorPair) \ @@ -467,13 +469,6 @@ bool HandleScopeImplementer::HasSavedContexts() { return !saved_contexts_.empty(); } -void HandleScopeImplementer::EnterContext(Context context) { - DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); - DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); - entered_contexts_.push_back(context); - is_microtask_context_.push_back(0); -} - void HandleScopeImplementer::LeaveContext() { DCHECK(!entered_contexts_.empty()); DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); @@ -486,13 +481,6 @@ bool HandleScopeImplementer::LastEnteredContextWas(Context context) { return !entered_contexts_.empty() && entered_contexts_.back() == context; } -void HandleScopeImplementer::EnterMicrotaskContext(Context context) { - DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); - DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); - entered_contexts_.push_back(context); - is_microtask_context_.push_back(1); -} - // If there's a spare block, use it for growing the current scope. internal::Address* HandleScopeImplementer::GetSpareOrNewBlock() { internal::Address* block = diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index 8791e4eae2010e..a1b58f2d4326af 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -42,10 +42,11 @@ Handle StdlibMathMember(Isolate* isolate, Handle stdlib, Handle name) { Handle math_name( isolate->factory()->InternalizeString(base::StaticCharVector("Math"))); - Handle math = JSReceiver::GetDataProperty(stdlib, math_name); + Handle math = JSReceiver::GetDataProperty(isolate, stdlib, math_name); if (!math->IsJSReceiver()) return isolate->factory()->undefined_value(); Handle math_receiver = Handle::cast(math); - Handle value = JSReceiver::GetDataProperty(math_receiver, name); + Handle value = + JSReceiver::GetDataProperty(isolate, math_receiver, name); return value; } @@ -55,13 +56,13 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle stdlib, if (members.contains(wasm::AsmJsParser::StandardMember::kInfinity)) { members.Remove(wasm::AsmJsParser::StandardMember::kInfinity); Handle name = isolate->factory()->Infinity_string(); - Handle value = JSReceiver::GetDataProperty(stdlib, name); + Handle value = JSReceiver::GetDataProperty(isolate, stdlib, name); if (!value->IsNumber() || !std::isinf(value->Number())) return false; } if (members.contains(wasm::AsmJsParser::StandardMember::kNaN)) { members.Remove(wasm::AsmJsParser::StandardMember::kNaN); Handle name = isolate->factory()->NaN_string(); - Handle value = JSReceiver::GetDataProperty(stdlib, name); + Handle value = JSReceiver::GetDataProperty(isolate, stdlib, name); if (!value->IsNaN()) return false; } #define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \ @@ -77,7 +78,7 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle stdlib, return false; \ } \ DCHECK_EQ(shared.GetCode(), \ - isolate->builtins()->codet(Builtin::kMath##FName)); \ + isolate->builtins()->code(Builtin::kMath##FName)); \ } STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC) #undef STDLIB_MATH_FUNC @@ -91,16 +92,16 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle stdlib, } STDLIB_MATH_VALUE_LIST(STDLIB_MATH_CONST) #undef STDLIB_MATH_CONST -#define STDLIB_ARRAY_TYPE(fname, FName) \ - if (members.contains(wasm::AsmJsParser::StandardMember::k##FName)) { \ - members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \ - *is_typed_array = true; \ - Handle name(isolate->factory()->InternalizeString( \ - base::StaticCharVector(#FName))); \ - Handle value = JSReceiver::GetDataProperty(stdlib, name); \ - if (!value->IsJSFunction()) return false; \ - Handle func = Handle::cast(value); \ - if (!func.is_identical_to(isolate->fname())) return false; \ +#define STDLIB_ARRAY_TYPE(fname, FName) \ + if (members.contains(wasm::AsmJsParser::StandardMember::k##FName)) { \ + members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \ + *is_typed_array = true; \ + Handle name(isolate->factory()->InternalizeString( \ + base::StaticCharVector(#FName))); \ + Handle value = JSReceiver::GetDataProperty(isolate, stdlib, name); \ + if (!value->IsJSFunction()) return false; \ + Handle func = Handle::cast(value); \ + if (!func.is_identical_to(isolate->fname())) return false; \ } STDLIB_ARRAY_TYPE(int8_array_fun, Int8Array) STDLIB_ARRAY_TYPE(uint8_array_fun, Uint8Array) diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index 3ff2a44201321d..6849c9ea5dbf39 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -760,7 +760,7 @@ void AsmJsParser::ValidateFunction() { ValidateFunctionParams(¶ms); // Check against limit on number of parameters. - if (params.size() >= kV8MaxWasmFunctionParams) { + if (params.size() > kV8MaxWasmFunctionParams) { FAIL("Number of parameters exceeds internal limit"); } @@ -2246,6 +2246,9 @@ AsmType* AsmJsParser::ValidateCall() { // also determined the complete function type and can perform checking against // the expected type or update the expected type in case of first occurrence. if (function_info->kind == VarKind::kImportedFunction) { + if (param_types.size() > kV8MaxWasmFunctionParams) { + FAILn("Number of parameters exceeds internal limit"); + } for (auto t : param_specific_types) { if (!t->IsA(AsmType::Extern())) { FAILn("Imported function args must be type extern"); diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h index 1aa63658175af7..05105be91d1fe6 100644 --- a/deps/v8/src/asmjs/asm-parser.h +++ b/deps/v8/src/asmjs/asm-parser.h @@ -6,7 +6,6 @@ #define V8_ASMJS_ASM_PARSER_H_ #include -#include #include "src/asmjs/asm-scanner.h" #include "src/asmjs/asm-types.h" diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS index 13586e139c7710..069e31491c5183 100644 --- a/deps/v8/src/ast/OWNERS +++ b/deps/v8/src/ast/OWNERS @@ -1,4 +1,3 @@ -gsathya@chromium.org leszeks@chromium.org marja@chromium.org verwaest@chromium.org diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc index 4dab59fdae26e2..a93c7fd09184d5 100644 --- a/deps/v8/src/ast/ast-value-factory.cc +++ b/deps/v8/src/ast/ast-value-factory.cc @@ -84,7 +84,7 @@ template EXPORT_TEMPLATE_DEFINE( bool AstRawString::AsArrayIndex(uint32_t* index) const { // The StringHasher will set up the hash. Bail out early if we know it // can't be convertible to an array index. - if ((raw_hash_field_ & Name::kIsNotIntegerIndexMask) != 0) return false; + if (!IsIntegerIndex()) return false; if (length() <= Name::kMaxCachedArrayIndexLength) { *index = Name::ArrayIndexValueBits::decode(raw_hash_field_); return true; @@ -97,7 +97,7 @@ bool AstRawString::AsArrayIndex(uint32_t* index) const { } bool AstRawString::IsIntegerIndex() const { - return (raw_hash_field_ & Name::kIsNotIntegerIndexMask) == 0; + return Name::IsIntegerIndex(raw_hash_field_); } bool AstRawString::IsOneByteEqualTo(const char* data) const { @@ -353,16 +353,18 @@ const AstRawString* AstValueFactory::GetString( } AstConsString* AstValueFactory::NewConsString() { - return zone()->New(); + return single_parse_zone()->New(); } AstConsString* AstValueFactory::NewConsString(const AstRawString* str) { - return NewConsString()->AddString(zone(), str); + return NewConsString()->AddString(single_parse_zone(), str); } AstConsString* AstValueFactory::NewConsString(const AstRawString* str1, const AstRawString* str2) { - return NewConsString()->AddString(zone(), str1)->AddString(zone(), str2); + return NewConsString() + ->AddString(single_parse_zone(), str1) + ->AddString(single_parse_zone(), str2); } template @@ -395,9 +397,9 @@ const AstRawString* AstValueFactory::GetString( [&]() { // Copy literal contents for later comparison. int length = literal_bytes.length(); - byte* new_literal_bytes = zone()->NewArray(length); + byte* new_literal_bytes = ast_raw_string_zone()->NewArray(length); memcpy(new_literal_bytes, literal_bytes.begin(), length); - AstRawString* new_string = zone()->New( + AstRawString* new_string = ast_raw_string_zone()->New( is_one_byte, base::Vector(new_literal_bytes, length), raw_hash_field); CHECK_NOT_NULL(new_string); diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h index d036d99604eab3..b0c380ee602dcf 100644 --- a/deps/v8/src/ast/ast-value-factory.h +++ b/deps/v8/src/ast/ast-value-factory.h @@ -80,7 +80,7 @@ class AstRawString final : public ZoneObject { uint32_t Hash() const { // Hash field must be computed. DCHECK_EQ(raw_hash_field_ & Name::kHashNotComputedMask, 0); - return raw_hash_field_ >> Name::kHashShift; + return Name::HashBits::decode(raw_hash_field_); } // This function can be called after internalizing. @@ -311,24 +311,40 @@ class AstValueFactory { public: AstValueFactory(Zone* zone, const AstStringConstants* string_constants, uint64_t hash_seed) + : AstValueFactory(zone, zone, string_constants, hash_seed) {} + + AstValueFactory(Zone* ast_raw_string_zone, Zone* single_parse_zone, + const AstStringConstants* string_constants, + uint64_t hash_seed) : string_table_(string_constants->string_table()), strings_(nullptr), strings_end_(&strings_), string_constants_(string_constants), empty_cons_string_(nullptr), - zone_(zone), + ast_raw_string_zone_(ast_raw_string_zone), + single_parse_zone_(single_parse_zone), hash_seed_(hash_seed) { - DCHECK_NOT_NULL(zone_); + DCHECK_NOT_NULL(ast_raw_string_zone_); + DCHECK_NOT_NULL(single_parse_zone_); DCHECK_EQ(hash_seed, string_constants->hash_seed()); std::fill(one_character_strings_, one_character_strings_ + arraysize(one_character_strings_), nullptr); - empty_cons_string_ = NewConsString(); + + // Allocate the empty ConsString in the AstRawString Zone instead of the + // single parse Zone like other ConsStrings, because unlike those it can be + // reused across parses. + empty_cons_string_ = ast_raw_string_zone_->New(); + } + + Zone* ast_raw_string_zone() const { + DCHECK_NOT_NULL(ast_raw_string_zone_); + return ast_raw_string_zone_; } - Zone* zone() const { - DCHECK_NOT_NULL(zone_); - return zone_; + Zone* single_parse_zone() const { + DCHECK_NOT_NULL(single_parse_zone_); + return single_parse_zone_; } const AstRawString* GetOneByteString(base::Vector literal) { @@ -394,7 +410,8 @@ class AstValueFactory { static const int kMaxOneCharStringValue = 128; const AstRawString* one_character_strings_[kMaxOneCharStringValue]; - Zone* zone_; + Zone* ast_raw_string_zone_; + Zone* single_parse_zone_; uint64_t hash_seed_; }; diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index ac89df574d832b..804a6840c18ee3 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -268,6 +268,14 @@ bool FunctionLiteral::private_name_lookup_skips_outer_class() const { return scope()->private_name_lookup_skips_outer_class(); } +bool FunctionLiteral::class_scope_has_private_brand() const { + return scope()->class_scope_has_private_brand(); +} + +void FunctionLiteral::set_class_scope_has_private_brand(bool value) { + return scope()->set_class_scope_has_private_brand(value); +} + ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value, Kind kind, bool is_computed_name) : LiteralProperty(key, value, is_computed_name), @@ -365,7 +373,14 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) { } } -void ObjectLiteral::InitFlagsForPendingNullPrototype(int i) { +int ObjectLiteralBoilerplateBuilder::ComputeFlags(bool disable_mementos) const { + int flags = LiteralBoilerplateBuilder::ComputeFlags(disable_mementos); + if (fast_elements()) flags |= ObjectLiteral::kFastElements; + if (has_null_prototype()) flags |= ObjectLiteral::kHasNullPrototype; + return flags; +} + +void ObjectLiteralBoilerplateBuilder::InitFlagsForPendingNullPrototype(int i) { // We still check for __proto__:null after computed property names. for (; i < properties()->length(); i++) { if (properties()->at(i)->IsNullPrototype()) { @@ -375,12 +390,19 @@ void ObjectLiteral::InitFlagsForPendingNullPrototype(int i) { } } -int ObjectLiteral::InitDepthAndFlags() { - if (is_initialized()) return depth(); +int ObjectLiteralBoilerplateBuilder::EncodeLiteralType() { + int flags = AggregateLiteral::kNoFlags; + if (fast_elements()) flags |= ObjectLiteral::kFastElements; + if (has_null_prototype()) flags |= ObjectLiteral::kHasNullPrototype; + return flags; +} + +void ObjectLiteralBoilerplateBuilder::InitDepthAndFlags() { + if (is_initialized()) return; bool is_simple = true; bool has_seen_prototype = false; bool needs_initial_allocation_site = false; - int depth_acc = 1; + DepthKind depth_acc = kShallow; uint32_t nof_properties = 0; uint32_t elements = 0; uint32_t max_element_index = 0; @@ -408,8 +430,8 @@ int ObjectLiteral::InitDepthAndFlags() { MaterializedLiteral* literal = property->value()->AsMaterializedLiteral(); if (literal != nullptr) { - int subliteral_depth = literal->InitDepthAndFlags() + 1; - if (subliteral_depth > depth_acc) depth_acc = subliteral_depth; + LiteralBoilerplateBuilder::InitDepthAndFlags(literal); + depth_acc = kNotShallow; needs_initial_allocation_site |= literal->NeedsInitialAllocationSite(); } @@ -440,11 +462,11 @@ int ObjectLiteral::InitDepthAndFlags() { set_has_elements(elements > 0); set_fast_elements((max_element_index <= 32) || ((2 * elements) >= max_element_index)); - return depth_acc; } template -void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) { +void ObjectLiteralBoilerplateBuilder::BuildBoilerplateDescription( + IsolateT* isolate) { if (!boilerplate_description_.is_null()) return; int index_keys = 0; @@ -479,7 +501,7 @@ void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) { MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral(); if (m_literal != nullptr) { - m_literal->BuildConstants(isolate); + BuildConstants(isolate, m_literal); } // Add CONSTANT and COMPUTED properties to boilerplate. Use the @@ -501,12 +523,14 @@ void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) { boilerplate_description_ = boilerplate_description; } -template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral:: +template EXPORT_TEMPLATE_DEFINE( + V8_BASE_EXPORT) void ObjectLiteralBoilerplateBuilder:: BuildBoilerplateDescription(Isolate* isolate); -template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral:: +template EXPORT_TEMPLATE_DEFINE( + V8_BASE_EXPORT) void ObjectLiteralBoilerplateBuilder:: BuildBoilerplateDescription(LocalIsolate* isolate); -bool ObjectLiteral::IsFastCloningSupported() const { +bool ObjectLiteralBoilerplateBuilder::IsFastCloningSupported() const { // The CreateShallowObjectLiteratal builtin doesn't copy elements, and object // literals don't support copy-on-write (COW) elements for now. // TODO(mvstanton): make object literals support COW elements. @@ -515,25 +539,53 @@ bool ObjectLiteral::IsFastCloningSupported() const { ConstructorBuiltins::kMaximumClonedShallowObjectProperties; } -int ArrayLiteral::InitDepthAndFlags() { - if (is_initialized()) return depth(); +// static +template +Handle LiteralBoilerplateBuilder::GetBoilerplateValue( + Expression* expression, IsolateT* isolate) { + if (expression->IsLiteral()) { + return expression->AsLiteral()->BuildValue(isolate); + } + if (expression->IsCompileTimeValue()) { + if (expression->IsObjectLiteral()) { + ObjectLiteral* object_literal = expression->AsObjectLiteral(); + DCHECK(object_literal->builder()->is_simple()); + return object_literal->builder()->boilerplate_description(); + } else { + DCHECK(expression->IsArrayLiteral()); + ArrayLiteral* array_literal = expression->AsArrayLiteral(); + DCHECK(array_literal->builder()->is_simple()); + return array_literal->builder()->boilerplate_description(); + } + } + return isolate->factory()->uninitialized_value(); +} +template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) + Handle LiteralBoilerplateBuilder::GetBoilerplateValue( + Expression* expression, Isolate* isolate); +template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) + Handle LiteralBoilerplateBuilder::GetBoilerplateValue( + Expression* expression, LocalIsolate* isolate); + +void ArrayLiteralBoilerplateBuilder::InitDepthAndFlags() { + if (is_initialized()) return; int constants_length = - first_spread_index_ >= 0 ? first_spread_index_ : values()->length(); + first_spread_index_ >= 0 ? first_spread_index_ : values_->length(); // Fill in the literals. bool is_simple = first_spread_index_ < 0; bool is_holey = false; ElementsKind kind = FIRST_FAST_ELEMENTS_KIND; - int depth_acc = 1; + DepthKind depth_acc = kShallow; int array_index = 0; for (; array_index < constants_length; array_index++) { - Expression* element = values()->at(array_index); + Expression* element = values_->at(array_index); MaterializedLiteral* materialized_literal = element->AsMaterializedLiteral(); if (materialized_literal != nullptr) { - int subliteral_depth = materialized_literal->InitDepthAndFlags() + 1; - if (subliteral_depth > depth_acc) depth_acc = subliteral_depth; + LiteralBoilerplateBuilder::InitDepthAndFlags(materialized_literal); + depth_acc = kNotShallow; } if (!element->IsCompileTimeValue()) { @@ -592,15 +644,15 @@ int ArrayLiteral::InitDepthAndFlags() { // Array literals always need an initial allocation site to properly track // elements transitions. set_needs_initial_allocation_site(true); - return depth_acc; } template -void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) { +void ArrayLiteralBoilerplateBuilder::BuildBoilerplateDescription( + IsolateT* isolate) { if (!boilerplate_description_.is_null()) return; int constants_length = - first_spread_index_ >= 0 ? first_spread_index_ : values()->length(); + first_spread_index_ >= 0 ? first_spread_index_ : values_->length(); ElementsKind kind = boilerplate_descriptor_kind(); bool use_doubles = IsDoubleElementsKind(kind); @@ -616,7 +668,7 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) { // Fill in the literals. int array_index = 0; for (; array_index < constants_length; array_index++) { - Expression* element = values()->at(array_index); + Expression* element = values_->at(array_index); DCHECK(!element->IsSpread()); if (use_doubles) { Literal* literal = element->AsLiteral(); @@ -636,7 +688,7 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) { } else { MaterializedLiteral* m_literal = element->AsMaterializedLiteral(); if (m_literal != nullptr) { - m_literal->BuildConstants(isolate); + BuildConstants(isolate, m_literal); } // New handle scope here, needs to be after BuildContants(). @@ -655,11 +707,9 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) { boilerplate_value = Smi::zero(); } - DCHECK_EQ( - boilerplate_descriptor_kind(), - GetMoreGeneralElementsKind(boilerplate_descriptor_kind(), - boilerplate_value.OptimalElementsKind( - GetPtrComprCageBase(*elements)))); + DCHECK_EQ(kind, GetMoreGeneralElementsKind( + kind, boilerplate_value.OptimalElementsKind( + GetPtrComprCageBase(*elements)))); FixedArray::cast(*elements).set(array_index, boilerplate_value); } @@ -667,130 +717,120 @@ void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) { // Simple and shallow arrays can be lazily copied, we transform the // elements array to a copy-on-write array. - if (is_simple() && depth() == 1 && array_index > 0 && + if (is_simple() && depth() == kShallow && array_index > 0 && IsSmiOrObjectElementsKind(kind)) { - elements->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map()); + elements->set_map_safe_transition( + ReadOnlyRoots(isolate).fixed_cow_array_map()); } boilerplate_description_ = isolate->factory()->NewArrayBoilerplateDescription(kind, elements); } template EXPORT_TEMPLATE_DEFINE( - V8_BASE_EXPORT) void ArrayLiteral::BuildBoilerplateDescription(Isolate* - isolate); + V8_BASE_EXPORT) void ArrayLiteralBoilerplateBuilder:: + BuildBoilerplateDescription(Isolate* isolate); template EXPORT_TEMPLATE_DEFINE( - V8_BASE_EXPORT) void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* - isolate); + V8_BASE_EXPORT) void ArrayLiteralBoilerplateBuilder:: + BuildBoilerplateDescription(LocalIsolate* -bool ArrayLiteral::IsFastCloningSupported() const { - return depth() <= 1 && - values_.length() <= + isolate); + +bool ArrayLiteralBoilerplateBuilder::IsFastCloningSupported() const { + return depth() <= kShallow && + values_->length() <= ConstructorBuiltins::kMaximumClonedShallowArrayElements; } bool MaterializedLiteral::IsSimple() const { - if (IsArrayLiteral()) return AsArrayLiteral()->is_simple(); - if (IsObjectLiteral()) return AsObjectLiteral()->is_simple(); + if (IsArrayLiteral()) return AsArrayLiteral()->builder()->is_simple(); + if (IsObjectLiteral()) return AsObjectLiteral()->builder()->is_simple(); DCHECK(IsRegExpLiteral()); return false; } -template -Handle MaterializedLiteral::GetBoilerplateValue(Expression* expression, - IsolateT* isolate) { - if (expression->IsLiteral()) { - return expression->AsLiteral()->BuildValue(isolate); +// static +void LiteralBoilerplateBuilder::InitDepthAndFlags(MaterializedLiteral* expr) { + if (expr->IsArrayLiteral()) { + return expr->AsArrayLiteral()->builder()->InitDepthAndFlags(); } - if (expression->IsCompileTimeValue()) { - if (expression->IsObjectLiteral()) { - ObjectLiteral* object_literal = expression->AsObjectLiteral(); - DCHECK(object_literal->is_simple()); - return object_literal->boilerplate_description(); - } else { - DCHECK(expression->IsArrayLiteral()); - ArrayLiteral* array_literal = expression->AsArrayLiteral(); - DCHECK(array_literal->is_simple()); - return array_literal->boilerplate_description(); - } + if (expr->IsObjectLiteral()) { + return expr->AsObjectLiteral()->builder()->InitDepthAndFlags(); } - return isolate->factory()->uninitialized_value(); + DCHECK(expr->IsRegExpLiteral()); } -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Handle MaterializedLiteral::GetBoilerplateValue( - Expression* expression, Isolate* isolate); -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Handle MaterializedLiteral::GetBoilerplateValue( - Expression* expression, LocalIsolate* isolate); -int MaterializedLiteral::InitDepthAndFlags() { - if (IsArrayLiteral()) return AsArrayLiteral()->InitDepthAndFlags(); - if (IsObjectLiteral()) return AsObjectLiteral()->InitDepthAndFlags(); - DCHECK(IsRegExpLiteral()); - return 1; -} +bool MaterializedLiteral::NeedsInitialAllocationSite( -bool MaterializedLiteral::NeedsInitialAllocationSite() { +) { if (IsArrayLiteral()) { - return AsArrayLiteral()->needs_initial_allocation_site(); + return AsArrayLiteral()->builder()->needs_initial_allocation_site(); } if (IsObjectLiteral()) { - return AsObjectLiteral()->needs_initial_allocation_site(); + return AsObjectLiteral()->builder()->needs_initial_allocation_site(); } DCHECK(IsRegExpLiteral()); return false; } template -void MaterializedLiteral::BuildConstants(IsolateT* isolate) { - if (IsArrayLiteral()) { - AsArrayLiteral()->BuildBoilerplateDescription(isolate); +void LiteralBoilerplateBuilder::BuildConstants(IsolateT* isolate, + MaterializedLiteral* expr) { + if (expr->IsArrayLiteral()) { + expr->AsArrayLiteral()->builder()->BuildBoilerplateDescription(isolate); return; } - if (IsObjectLiteral()) { - AsObjectLiteral()->BuildBoilerplateDescription(isolate); + if (expr->IsObjectLiteral()) { + expr->AsObjectLiteral()->builder()->BuildBoilerplateDescription(isolate); return; } - DCHECK(IsRegExpLiteral()); + DCHECK(expr->IsRegExpLiteral()); } -template EXPORT_TEMPLATE_DEFINE( - V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(Isolate* isolate); -template EXPORT_TEMPLATE_DEFINE( - V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(LocalIsolate* - isolate); +template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void LiteralBoilerplateBuilder:: + BuildConstants(Isolate* isolate, MaterializedLiteral* expr); +template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void LiteralBoilerplateBuilder:: + BuildConstants(LocalIsolate* isolate, MaterializedLiteral* expr); template Handle GetTemplateObject::GetOrBuildDescription( IsolateT* isolate) { - Handle raw_strings = isolate->factory()->NewFixedArray( + Handle raw_strings_handle = isolate->factory()->NewFixedArray( this->raw_strings()->length(), AllocationType::kOld); bool raw_and_cooked_match = true; - for (int i = 0; i < raw_strings->length(); ++i) { - if (this->raw_strings()->at(i) != this->cooked_strings()->at(i)) { - // If the AstRawStrings don't match, then neither should the allocated - // Strings, since the AstValueFactory should have deduplicated them - // already. - DCHECK_IMPLIES(this->cooked_strings()->at(i) != nullptr, - *this->cooked_strings()->at(i)->string() != - *this->raw_strings()->at(i)->string()); - - raw_and_cooked_match = false; + { + DisallowGarbageCollection no_gc; + FixedArray raw_strings = *raw_strings_handle; + + for (int i = 0; i < raw_strings.length(); ++i) { + if (this->raw_strings()->at(i) != this->cooked_strings()->at(i)) { + // If the AstRawStrings don't match, then neither should the allocated + // Strings, since the AstValueFactory should have deduplicated them + // already. + DCHECK_IMPLIES(this->cooked_strings()->at(i) != nullptr, + *this->cooked_strings()->at(i)->string() != + *this->raw_strings()->at(i)->string()); + + raw_and_cooked_match = false; + } + raw_strings.set(i, *this->raw_strings()->at(i)->string()); } - raw_strings->set(i, *this->raw_strings()->at(i)->string()); } - Handle cooked_strings = raw_strings; + Handle cooked_strings_handle = raw_strings_handle; if (!raw_and_cooked_match) { - cooked_strings = isolate->factory()->NewFixedArray( + cooked_strings_handle = isolate->factory()->NewFixedArray( this->cooked_strings()->length(), AllocationType::kOld); - for (int i = 0; i < cooked_strings->length(); ++i) { + DisallowGarbageCollection no_gc; + FixedArray cooked_strings = *cooked_strings_handle; + ReadOnlyRoots roots(isolate); + for (int i = 0; i < cooked_strings.length(); ++i) { if (this->cooked_strings()->at(i) != nullptr) { - cooked_strings->set(i, *this->cooked_strings()->at(i)->string()); + cooked_strings.set(i, *this->cooked_strings()->at(i)->string()); } else { - cooked_strings->set(i, ReadOnlyRoots(isolate).undefined_value()); + cooked_strings.set_undefined(roots, i); } } } - return isolate->factory()->NewTemplateObjectDescription(raw_strings, - cooked_strings); + return isolate->factory()->NewTemplateObjectDescription( + raw_strings_handle, cooked_strings_handle); } template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) Handle GetTemplateObject::GetOrBuildDescription( diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index f7b3f247f72896..1fb5abdf8f8fc0 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -10,6 +10,7 @@ #include "src/ast/ast-value-factory.h" #include "src/ast/modules.h" #include "src/ast/variables.h" +#include "src/base/pointer-with-payload.h" #include "src/base/threaded-list.h" #include "src/codegen/bailout-reason.h" #include "src/codegen/label.h" @@ -1048,26 +1049,13 @@ class MaterializedLiteral : public Expression { protected: MaterializedLiteral(int pos, NodeType type) : Expression(pos, type) {} - friend class CompileTimeValue; - friend class ArrayLiteral; - friend class ObjectLiteral; - - // Populate the depth field and any flags the literal has, returns the depth. - int InitDepthAndFlags(); - bool NeedsInitialAllocationSite(); - // Populate the constant properties/elements fixed array. - template - void BuildConstants(IsolateT* isolate); + friend class CompileTimeValue; - // If the expression is a literal, return the literal value; - // if the expression is a materialized literal and is_simple - // then return an Array or Object Boilerplate Description - // Otherwise, return undefined literal as the placeholder - // in the object literal boilerplate. - template - Handle GetBoilerplateValue(Expression* expression, IsolateT* isolate); + friend class LiteralBoilerplateBuilder; + friend class ArrayLiteralBoilerplateBuilder; + friend class ObjectLiteralBoilerplateBuilder; }; // Node for capturing a regexp literal. @@ -1090,8 +1078,7 @@ class RegExpLiteral final : public MaterializedLiteral { const AstRawString* const pattern_; }; -// Base class for Array and Object literals, providing common code for handling -// nested subliterals. +// Base class for Array and Object literals class AggregateLiteral : public MaterializedLiteral { public: enum Flags { @@ -1102,22 +1089,47 @@ class AggregateLiteral : public MaterializedLiteral { kIsShallowAndDisableMementos = kIsShallow | kDisableMementos, }; - bool is_initialized() const { return 0 < depth_; } - int depth() const { + protected: + AggregateLiteral(int pos, NodeType type) : MaterializedLiteral(pos, type) {} +}; + +// Base class for build literal boilerplate, providing common code for handling +// nested subliterals. +class LiteralBoilerplateBuilder { + public: + enum DepthKind { kUninitialized, kShallow, kNotShallow }; + + static constexpr int kDepthKindBits = 2; + STATIC_ASSERT((1 << kDepthKindBits) > kNotShallow); + + bool is_initialized() const { + return kUninitialized != DepthField::decode(bit_field_); + } + DepthKind depth() const { DCHECK(is_initialized()); - return depth_; + return DepthField::decode(bit_field_); } - bool is_shallow() const { return depth() == 1; } + // If the expression is a literal, return the literal value; + // if the expression is a materialized literal and is_simple + // then return an Array or Object Boilerplate Description + // Otherwise, return undefined literal as the placeholder + // in the object literal boilerplate. + template + static Handle GetBoilerplateValue(Expression* expression, + IsolateT* isolate); + + bool is_shallow() const { return depth() == kShallow; } bool needs_initial_allocation_site() const { return NeedsInitialAllocationSiteField::decode(bit_field_); } int ComputeFlags(bool disable_mementos = false) const { - int flags = kNoFlags; - if (is_shallow()) flags |= kIsShallow; - if (disable_mementos) flags |= kDisableMementos; - if (needs_initial_allocation_site()) flags |= kNeedsInitialAllocationSite; + int flags = AggregateLiteral::kNoFlags; + if (is_shallow()) flags |= AggregateLiteral::kIsShallow; + if (disable_mementos) flags |= AggregateLiteral::kDisableMementos; + if (needs_initial_allocation_site()) + flags |= AggregateLiteral::kNeedsInitialAllocationSite; return flags; } @@ -1130,19 +1142,22 @@ class AggregateLiteral : public MaterializedLiteral { } private: - int depth_ : 31; - using NeedsInitialAllocationSiteField = - MaterializedLiteral::NextBitField; + // we actually only care three conditions for depth + // - depth == kUninitialized, DCHECK(!is_initialized()) + // - depth == kShallow, which means depth = 1 + // - depth == kNotShallow, which means depth > 1 + using DepthField = base::BitField; + using NeedsInitialAllocationSiteField = DepthField::Next; using IsSimpleField = NeedsInitialAllocationSiteField::Next; using BoilerplateDescriptorKindField = IsSimpleField::Next; protected: - friend class AstNodeFactory; - friend Zone; - AggregateLiteral(int pos, NodeType type) - : MaterializedLiteral(pos, type), depth_(0) { - bit_field_ |= + uint32_t bit_field_; + + LiteralBoilerplateBuilder() { + bit_field_ = + DepthField::encode(kUninitialized) | NeedsInitialAllocationSiteField::encode(false) | IsSimpleField::encode(false) | BoilerplateDescriptorKindField::encode(FIRST_FAST_ELEMENTS_KIND); @@ -1157,15 +1172,22 @@ class AggregateLiteral : public MaterializedLiteral { bit_field_ = BoilerplateDescriptorKindField::update(bit_field_, kind); } - void set_depth(int depth) { + void set_depth(DepthKind depth) { DCHECK(!is_initialized()); - depth_ = depth; + bit_field_ = DepthField::update(bit_field_, depth); } void set_needs_initial_allocation_site(bool required) { bit_field_ = NeedsInitialAllocationSiteField::update(bit_field_, required); } + // Populate the depth field and any flags the literal builder has + static void InitDepthAndFlags(MaterializedLiteral* expr); + + // Populate the constant properties/elements fixed array. + template + void BuildConstants(IsolateT* isolate, MaterializedLiteral* expr); + template using NextBitField = BoilerplateDescriptorKindField::Next; }; @@ -1185,7 +1207,7 @@ class LiteralProperty : public ZoneObject { LiteralProperty(Expression* key, Expression* value, bool is_computed_name) : key_and_is_computed_name_(key, is_computed_name), value_(value) {} - PointerWithPayload key_and_is_computed_name_; + base::PointerWithPayload key_and_is_computed_name_; Expression* value_; }; @@ -1229,18 +1251,30 @@ class ObjectLiteralProperty final : public LiteralProperty { bool emit_store_; }; -// An object literal has a boilerplate object that is used -// for minimizing the work when constructing it at runtime. -class ObjectLiteral final : public AggregateLiteral { +// class for build object boilerplate +class ObjectLiteralBoilerplateBuilder final : public LiteralBoilerplateBuilder { public: using Property = ObjectLiteralProperty; + ObjectLiteralBoilerplateBuilder(ZoneList* properties, + uint32_t boilerplate_properties, + bool has_rest_property) + : properties_(properties), + boilerplate_properties_(boilerplate_properties) { + bit_field_ |= HasElementsField::encode(false) | + HasRestPropertyField::encode(has_rest_property) | + FastElementsField::encode(false) | + HasNullPrototypeField::encode(false); + } Handle boilerplate_description() const { DCHECK(!boilerplate_description_.is_null()); return boilerplate_description_; } + // Determines whether the {CreateShallowArrayLiteral} builtin can be used. + bool IsFastCloningSupported() const; + int properties_count() const { return boilerplate_properties_; } - const ZonePtrList* properties() const { return &properties_; } + const ZonePtrList* properties() const { return properties_; } bool has_elements() const { return HasElementsField::decode(bit_field_); } bool has_rest_property() const { return HasRestPropertyField::decode(bit_field_); @@ -1250,18 +1284,9 @@ class ObjectLiteral final : public AggregateLiteral { return HasNullPrototypeField::decode(bit_field_); } - bool is_empty() const { - DCHECK(is_initialized()); - return !has_elements() && properties_count() == 0 && - properties()->length() == 0; - } - - bool IsEmptyObjectLiteral() const { - return is_empty() && !has_null_prototype(); - } - - // Populate the depth field and flags, returns the depth. - int InitDepthAndFlags(); + // Populate the boilerplate description. + template + void BuildBoilerplateDescription(IsolateT* isolate); // Get the boilerplate description, populating it if necessary. template @@ -1270,37 +1295,53 @@ class ObjectLiteral final : public AggregateLiteral { if (boilerplate_description_.is_null()) { BuildBoilerplateDescription(isolate); } - return boilerplate_description(); + return boilerplate_description_; } - // Populate the boilerplate description. - template - void BuildBoilerplateDescription(IsolateT* isolate); + bool is_empty() const { + DCHECK(is_initialized()); + return !has_elements() && properties_count() == 0 && + properties()->length() == 0; + } + // Assemble bitfield of flags for the CreateObjectLiteral helper. + int ComputeFlags(bool disable_mementos = false) const; - // Mark all computed expressions that are bound to a key that - // is shadowed by a later occurrence of the same key. For the - // marked expressions, no store code is emitted. - void CalculateEmitStore(Zone* zone); + bool IsEmptyObjectLiteral() const { + return is_empty() && !has_null_prototype(); + } - // Determines whether the {CreateShallowObjectLiteratal} builtin can be used. - bool IsFastCloningSupported() const; + int EncodeLiteralType(); - // Assemble bitfield of flags for the CreateObjectLiteral helper. - int ComputeFlags(bool disable_mementos = false) const { - int flags = AggregateLiteral::ComputeFlags(disable_mementos); - if (fast_elements()) flags |= kFastElements; - if (has_null_prototype()) flags |= kHasNullPrototype; - return flags; - } + // Populate the depth field and flags, returns the depth. + void InitDepthAndFlags(); - int EncodeLiteralType() { - int flags = kNoFlags; - if (fast_elements()) flags |= kFastElements; - if (has_null_prototype()) flags |= kHasNullPrototype; - return flags; + private: + void InitFlagsForPendingNullPrototype(int i); + + void set_has_elements(bool has_elements) { + bit_field_ = HasElementsField::update(bit_field_, has_elements); } + void set_fast_elements(bool fast_elements) { + bit_field_ = FastElementsField::update(bit_field_, fast_elements); + } + void set_has_null_protoype(bool has_null_prototype) { + bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype); + } + ZoneList* properties_; + uint32_t boilerplate_properties_; + Handle boilerplate_description_; - Variable* home_object() const { return home_object_; } + using HasElementsField = LiteralBoilerplateBuilder::NextBitField; + using HasRestPropertyField = HasElementsField::Next; + using FastElementsField = HasRestPropertyField::Next; + using HasNullPrototypeField = FastElementsField::Next; +}; + +// An object literal has a boilerplate object that is used +// for minimizing the work when constructing it at runtime. +class ObjectLiteral final : public AggregateLiteral { + public: + using Property = ObjectLiteralProperty; enum Flags { kFastElements = 1 << 3, @@ -1310,6 +1351,19 @@ class ObjectLiteral final : public AggregateLiteral { static_cast(AggregateLiteral::kNeedsInitialAllocationSite) < static_cast(kFastElements)); + // Mark all computed expressions that are bound to a key that + // is shadowed by a later occurrence of the same key. For the + // marked expressions, no store code is emitted. + void CalculateEmitStore(Zone* zone); + + ZoneList* properties() { return &properties_; } + + const ObjectLiteralBoilerplateBuilder* builder() const { return &builder_; } + + ObjectLiteralBoilerplateBuilder* builder() { return &builder_; } + + Variable* home_object() const { return home_object_; } + private: friend class AstNodeFactory; friend Zone; @@ -1318,51 +1372,38 @@ class ObjectLiteral final : public AggregateLiteral { uint32_t boilerplate_properties, int pos, bool has_rest_property, Variable* home_object) : AggregateLiteral(pos, kObjectLiteral), - boilerplate_properties_(boilerplate_properties), properties_(properties.ToConstVector(), zone), - home_object_(home_object) { - bit_field_ |= HasElementsField::encode(false) | - HasRestPropertyField::encode(has_rest_property) | - FastElementsField::encode(false) | - HasNullPrototypeField::encode(false); - } - - void InitFlagsForPendingNullPrototype(int i); + home_object_(home_object), + builder_(&properties_, boilerplate_properties, has_rest_property) {} - void set_has_elements(bool has_elements) { - bit_field_ = HasElementsField::update(bit_field_, has_elements); - } - void set_fast_elements(bool fast_elements) { - bit_field_ = FastElementsField::update(bit_field_, fast_elements); - } - void set_has_null_protoype(bool has_null_prototype) { - bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype); - } - uint32_t boilerplate_properties_; - Handle boilerplate_description_; ZoneList properties_; Variable* home_object_; - - using HasElementsField = AggregateLiteral::NextBitField; - using HasRestPropertyField = HasElementsField::Next; - using FastElementsField = HasRestPropertyField::Next; - using HasNullPrototypeField = FastElementsField::Next; + ObjectLiteralBoilerplateBuilder builder_; }; -// An array literal has a literals object that is used -// for minimizing the work when constructing it at runtime. -class ArrayLiteral final : public AggregateLiteral { +// class for build boilerplate for array literal, including +// array_literal, spread call elements +class ArrayLiteralBoilerplateBuilder final : public LiteralBoilerplateBuilder { public: + ArrayLiteralBoilerplateBuilder(const ZonePtrList* values, + int first_spread_index) + : values_(values), first_spread_index_(first_spread_index) {} Handle boilerplate_description() const { return boilerplate_description_; } - const ZonePtrList* values() const { return &values_; } + // Determines whether the {CreateShallowArrayLiteral} builtin can be used. + bool IsFastCloningSupported() const; + + // Assemble bitfield of flags for the CreateArrayLiteral helper. + int ComputeFlags(bool disable_mementos = false) const { + return LiteralBoilerplateBuilder::ComputeFlags(disable_mementos); + } int first_spread_index() const { return first_spread_index_; } - // Populate the depth field and flags, returns the depth. - int InitDepthAndFlags(); + // Populate the depth field and flags + void InitDepthAndFlags(); // Get the boilerplate description, populating it if necessary. template @@ -1378,13 +1419,19 @@ class ArrayLiteral final : public AggregateLiteral { template void BuildBoilerplateDescription(IsolateT* isolate); - // Determines whether the {CreateShallowArrayLiteral} builtin can be used. - bool IsFastCloningSupported() const; + const ZonePtrList* values_; + int first_spread_index_; + Handle boilerplate_description_; +}; - // Assemble bitfield of flags for the CreateArrayLiteral helper. - int ComputeFlags(bool disable_mementos = false) const { - return AggregateLiteral::ComputeFlags(disable_mementos); - } +// An array literal has a literals object that is used +// for minimizing the work when constructing it at runtime. +class ArrayLiteral final : public AggregateLiteral { + public: + const ZonePtrList* values() const { return &values_; } + + const ArrayLiteralBoilerplateBuilder* builder() const { return &builder_; } + ArrayLiteralBoilerplateBuilder* builder() { return &builder_; } private: friend class AstNodeFactory; @@ -1393,12 +1440,11 @@ class ArrayLiteral final : public AggregateLiteral { ArrayLiteral(Zone* zone, const ScopedPtrList& values, int first_spread_index, int pos) : AggregateLiteral(pos, kArrayLiteral), - first_spread_index_(first_spread_index), - values_(values.ToConstVector(), zone) {} + values_(values.ToConstVector(), zone), + builder_(&values_, first_spread_index) {} - int first_spread_index_; - Handle boilerplate_description_; ZonePtrList values_; + ArrayLiteralBoilerplateBuilder builder_; }; enum class HoleCheckMode { kRequired, kElided }; @@ -2245,12 +2291,8 @@ class FunctionLiteral final : public Expression { return HasStaticPrivateMethodsOrAccessorsField::decode(bit_field_); } - void set_class_scope_has_private_brand(bool value) { - bit_field_ = ClassScopeHasPrivateBrandField::update(bit_field_, value); - } - bool class_scope_has_private_brand() const { - return ClassScopeHasPrivateBrandField::decode(bit_field_); - } + void set_class_scope_has_private_brand(bool value); + bool class_scope_has_private_brand() const; bool private_name_lookup_skips_outer_class() const; @@ -2299,10 +2341,8 @@ class FunctionLiteral final : public Expression { using HasDuplicateParameters = Pretenure::Next; using RequiresInstanceMembersInitializer = HasDuplicateParameters::Next; - using ClassScopeHasPrivateBrandField = - RequiresInstanceMembersInitializer::Next; using HasStaticPrivateMethodsOrAccessorsField = - ClassScopeHasPrivateBrandField::Next; + RequiresInstanceMembersInitializer::Next; using HasBracesField = HasStaticPrivateMethodsOrAccessorsField::Next; using ShouldParallelCompileField = HasBracesField::Next; diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 6758079823be64..679472c7c62916 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -166,17 +166,19 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope, ModuleScope::ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory) - : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, - FunctionKind::kModule), - module_descriptor_(avfactory->zone()->New( - avfactory->zone())) { + : DeclarationScope(avfactory->single_parse_zone(), script_scope, + MODULE_SCOPE, FunctionKind::kModule), + module_descriptor_( + avfactory->single_parse_zone()->New( + avfactory->single_parse_zone())) { set_language_mode(LanguageMode::kStrict); DeclareThis(avfactory); } ModuleScope::ModuleScope(Handle scope_info, AstValueFactory* avfactory) - : DeclarationScope(avfactory->zone(), MODULE_SCOPE, avfactory, scope_info), + : DeclarationScope(avfactory->single_parse_zone(), MODULE_SCOPE, avfactory, + scope_info), module_descriptor_(nullptr) { set_language_mode(LanguageMode::kStrict); } @@ -195,7 +197,7 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone, : Scope(zone, CLASS_SCOPE, ast_value_factory, scope_info), rare_data_and_is_parsing_heritage_(nullptr) { set_language_mode(LanguageMode::kStrict); - if (scope_info->HasClassBrand()) { + if (scope_info->ClassScopeHasPrivateBrand()) { Variable* brand = LookupInScopeInfo(ast_value_factory->dot_brand_string(), this); DCHECK_NOT_NULL(brand); @@ -204,11 +206,10 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone, // If the class variable is context-allocated and its index is // saved for deserialization, deserialize it. - if (scope_info->HasSavedClassVariableIndex()) { - int index = scope_info->SavedClassVariableContextLocalIndex(); - DCHECK_GE(index, 0); - DCHECK_LT(index, scope_info->ContextLocalCount()); - String name = scope_info->ContextLocalName(index); + if (scope_info->HasSavedClassVariable()) { + String name; + int index; + std::tie(name, index) = scope_info->SavedClassVariable(); DCHECK_EQ(scope_info->ContextLocalMode(index), VariableMode::kConst); DCHECK_EQ(scope_info->ContextLocalInitFlag(index), InitializationFlag::kNeedsInitialization); @@ -222,6 +223,10 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone, var->AllocateTo(VariableLocation::CONTEXT, Context::MIN_CONTEXT_SLOTS + index); } + + DCHECK(scope_info->HasPositionInfo()); + set_start_position(scope_info->StartPosition()); + set_end_position(scope_info->EndPosition()); } template ClassScope::ClassScope(Isolate* isolate, Zone* zone, AstValueFactory* ast_value_factory, @@ -252,11 +257,9 @@ Scope::Scope(Zone* zone, ScopeType scope_type, if (scope_type == BLOCK_SCOPE) { // Set is_block_scope_for_object_literal_ based on the existince of the home // object variable (we don't store it explicitly). - VariableLookupResult lookup_result; DCHECK_NOT_NULL(ast_value_factory); - int home_object_index = ScopeInfo::ContextSlotIndex( - *scope_info, *(ast_value_factory->dot_home_object_string()->string()), - &lookup_result); + int home_object_index = scope_info->ContextSlotIndex( + ast_value_factory->dot_home_object_string()->string()); DCHECK_IMPLIES(home_object_index >= 0, scope_type == CLASS_SCOPE || scope_type == BLOCK_SCOPE); if (home_object_index >= 0) { @@ -277,6 +280,10 @@ DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type, DCHECK(!is_eval_scope()); sloppy_eval_can_extend_vars_ = true; } + if (scope_info->ClassScopeHasPrivateBrand()) { + DCHECK(IsClassConstructor(function_kind())); + class_scope_has_private_brand_ = true; + } } Scope::Scope(Zone* zone, const AstRawString* catch_variable_name, @@ -324,6 +331,7 @@ void DeclarationScope::SetDefaults() { was_lazily_parsed_ = false; is_skipped_function_ = false; preparse_data_builder_ = nullptr; + class_scope_has_private_brand_ = false; #ifdef DEBUG DeclarationScope* outer_declaration_scope = outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr; @@ -469,7 +477,8 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone, DCHECK_EQ(scope_info.ContextLocalCount(), 1); DCHECK_EQ(scope_info.ContextLocalMode(0), VariableMode::kVar); DCHECK_EQ(scope_info.ContextLocalInitFlag(0), kCreatedInitialized); - String name = scope_info.ContextLocalName(0); + DCHECK(scope_info.HasInlinedLocalNames()); + String name = scope_info.ContextInlinedLocalName(0); MaybeAssignedFlag maybe_assigned = scope_info.ContextLocalMaybeAssignedFlag(0); outer_scope = @@ -499,10 +508,8 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone, : ScopeInfo(); } - if (deserialization_mode == DeserializationMode::kIncludingVariables && - script_scope->scope_info_.is_null()) { - script_scope->SetScriptScopeInfo( - ReadOnlyRoots(isolate).global_this_binding_scope_info_handle()); + if (deserialization_mode == DeserializationMode::kIncludingVariables) { + SetScriptScopeInfo(isolate, script_scope); } if (innermost_scope == nullptr) return script_scope; @@ -510,6 +517,24 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone, return innermost_scope; } +template +void Scope::SetScriptScopeInfo(IsolateT* isolate, + DeclarationScope* script_scope) { + if (script_scope->scope_info_.is_null()) { + script_scope->SetScriptScopeInfo( + ReadOnlyRoots(isolate).global_this_binding_scope_info_handle()); + } +} + +template EXPORT_TEMPLATE_DEFINE( + V8_EXPORT_PRIVATE) void Scope::SetScriptScopeInfo(Isolate* isolate, + DeclarationScope* + script_scope); +template EXPORT_TEMPLATE_DEFINE( + V8_EXPORT_PRIVATE) void Scope::SetScriptScopeInfo(LocalIsolate* isolate, + DeclarationScope* + script_scope); + template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Scope* Scope::DeserializeScopeChain( Isolate* isolate, Zone* zone, ScopeInfo scope_info, @@ -722,14 +747,28 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) { DCHECK(is_function_scope()); DCHECK(!is_arrow_scope()); + // Because when arguments_ is not nullptr, we already declared + // "arguments exotic object" to add it into parameters before + // impl()->InsertShadowingVarBindingInitializers, so here + // only declare "arguments exotic object" when arguments_ + // is nullptr + if (arguments_ != nullptr) { + return; + } + // Declare 'arguments' variable which exists in all non arrow functions. Note // that it might never be accessed, in which case it won't be allocated during // variable allocation. - bool was_added; + bool was_added = false; + arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VariableMode::kVar, NORMAL_VARIABLE, kCreatedInitialized, kNotAssigned, &was_added); - if (!was_added && IsLexicalVariableMode(arguments_->mode())) { + // According to ES#sec-functiondeclarationinstantiation step 18 + // we should set argumentsObjectNeeded to false if has lexical + // declared arguments only when hasParameterExpressions is false + if (!was_added && IsLexicalVariableMode(arguments_->mode()) && + has_simple_parameters_) { // Check if there's lexically declared variable named arguments to avoid // redeclaration. See ES#sec-functiondeclarationinstantiation, step 20. arguments_ = nullptr; @@ -939,8 +978,7 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { { location = VariableLocation::CONTEXT; - index = - ScopeInfo::ContextSlotIndex(scope_info, name_handle, &lookup_result); + index = scope_info.ContextSlotIndex(name->string(), &lookup_result); found = index >= 0; } @@ -1446,7 +1484,7 @@ bool Scope::NeedsScopeInfo() const { DCHECK(!already_resolved_); DCHECK(GetClosureScope()->ShouldEagerCompile()); // The debugger expects all functions to have scope infos. - // TODO(jochen|yangguo): Remove this requirement. + // TODO(yangguo): Remove this requirement. if (is_function_scope()) return true; return NeedsContext(); } @@ -1465,6 +1503,18 @@ DeclarationScope* Scope::GetReceiverScope() { return scope->AsDeclarationScope(); } +DeclarationScope* Scope::GetConstructorScope() { + Scope* scope = this; + while (scope != nullptr && !scope->IsConstructorScope()) { + scope = scope->outer_scope(); + } + if (scope == nullptr) { + return nullptr; + } + DCHECK(scope->IsConstructorScope()); + return scope->AsDeclarationScope(); +} + Scope* Scope::GetHomeObjectScope() { Scope* scope = this; while (scope != nullptr && !scope->is_home_object_scope()) { @@ -1532,6 +1582,11 @@ void Scope::ForEach(FunctionType callback) { } } +bool Scope::IsConstructorScope() const { + return is_declaration_scope() && + IsClassConstructor(AsDeclarationScope()->function_kind()); +} + bool Scope::IsOuterScopeOf(Scope* other) const { Scope* scope = other; while (scope) { @@ -1634,18 +1689,18 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory, has_rest_ = false; function_ = nullptr; - DCHECK_NE(zone(), ast_value_factory->zone()); + DCHECK_NE(zone(), ast_value_factory->single_parse_zone()); // Make sure this scope and zone aren't used for allocation anymore. { // Get the zone, while variables_ is still valid Zone* zone = this->zone(); variables_.Invalidate(); - zone->ReleaseMemory(); + zone->Reset(); } if (aborted) { // Prepare scope for use in the outer zone. - variables_ = VariableMap(ast_value_factory->zone()); + variables_ = VariableMap(ast_value_factory->single_parse_zone()); if (!IsArrowFunction(function_kind_)) { has_simple_parameters_ = true; DeclareDefaultFunctionVariables(ast_value_factory); @@ -1906,6 +1961,9 @@ void Scope::Print(int n) { } Indent(n1, "// "); PrintF("%s\n", FunctionKind2String(scope->function_kind())); + if (scope->class_scope_has_private_brand()) { + Indent(n1, "// class scope has private brand\n"); + } } if (num_stack_slots_ > 0) { Indent(n1, "// "); @@ -2656,7 +2714,7 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, IsolateT* isolate) { // The debugger expects all shared function infos to contain a scope info. // Since the top-most scope will end up in a shared function info, make sure // it has one, even if it doesn't need a scope info. - // TODO(jochen|yangguo): Remove this requirement. + // TODO(yangguo): Remove this requirement. if (scope->scope_info_.is_null()) { scope->scope_info_ = ScopeInfo::Create(isolate, scope->zone(), scope, outer_scope); @@ -2684,6 +2742,48 @@ int Scope::ContextLocalCount() const { (is_function_var_in_context ? 1 : 0); } +VariableProxy* Scope::NewHomeObjectVariableProxy(AstNodeFactory* factory, + const AstRawString* name, + int start_pos) { + // VariableProxies of the home object cannot be resolved like a normal + // variable. Consider the case of a super.property usage in heritage position: + // + // class C extends super.foo { m() { super.bar(); } } + // + // The super.foo property access is logically nested under C's class scope, + // which also has a home object due to its own method m's usage of + // super.bar(). However, super.foo must resolve super in C's outer scope. + // + // Because of the above, home object VariableProxies are always made directly + // on the Scope that needs the home object instead of the innermost scope. + DCHECK(needs_home_object()); + if (!scope_info_.is_null()) { + // This is a lazy compile, so the home object's context slot is already + // known. + Variable* home_object = variables_.Lookup(name); + if (home_object == nullptr) { + VariableLookupResult lookup_result; + int index = scope_info_->ContextSlotIndex(name->string(), &lookup_result); + DCHECK_GE(index, 0); + bool was_added; + home_object = variables_.Declare(zone(), this, name, lookup_result.mode, + NORMAL_VARIABLE, lookup_result.init_flag, + lookup_result.maybe_assigned_flag, + IsStaticFlag::kNotStatic, &was_added); + DCHECK(was_added); + home_object->AllocateTo(VariableLocation::CONTEXT, index); + } + return factory->NewVariableProxy(home_object, start_pos); + } + // This is not a lazy compile. Add the unresolved home object VariableProxy to + // the unresolved list of the home object scope, which is not necessarily the + // innermost scope. + VariableProxy* proxy = + factory->NewVariableProxy(name, NORMAL_VARIABLE, start_pos); + AddUnresolved(proxy); + return proxy; +} + bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) { switch (a) { case VariableMode::kPrivateGetterOnly: @@ -2695,53 +2795,42 @@ bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) { } } -void ClassScope::ReplaceReparsedClassScope(Isolate* isolate, - AstValueFactory* ast_value_factory, - ClassScope* old_scope) { - DCHECK_EQ(outer_scope_, old_scope->outer_scope()); - Scope* outer = outer_scope_; - - outer->RemoveInnerScope(old_scope); - // The outer scope should only have this deserialized inner scope, - // otherwise we have to update the sibling scopes. - DCHECK_EQ(outer->inner_scope_, this); - DCHECK_NULL(sibling_); - - DCHECK_NULL(old_scope->inner_scope_); +void ClassScope::FinalizeReparsedClassScope( + Isolate* isolate, MaybeHandle maybe_scope_info, + AstValueFactory* ast_value_factory, bool needs_allocation_fixup) { + // Set this bit so that DelcarationScope::Analyze recognizes + // the reparsed instance member initializer scope. +#ifdef DEBUG + is_reparsed_class_scope_ = true; +#endif - Handle scope_info = old_scope->scope_info_; - DCHECK(!scope_info.is_null()); - DCHECK(!scope_info->IsEmpty()); + if (!needs_allocation_fixup) { + return; + } // Restore variable allocation results for context-allocated variables in // the class scope from ScopeInfo, so that we don't need to run // resolution and allocation on these variables again when generating // code for the initializer function. - int context_local_count = scope_info->ContextLocalCount(); + DCHECK(!maybe_scope_info.is_null()); + Handle scope_info = maybe_scope_info.ToHandleChecked(); + DCHECK_EQ(scope_info->scope_type(), CLASS_SCOPE); + DCHECK_EQ(scope_info->StartPosition(), start_position_); + int context_header_length = scope_info->ContextHeaderLength(); DisallowGarbageCollection no_gc; - for (int i = 0; i < context_local_count; ++i) { - int slot_index = context_header_length + i; + for (auto it : ScopeInfo::IterateLocalNames(scope_info)) { + int slot_index = context_header_length + it->index(); DCHECK_LT(slot_index, scope_info->ContextLength()); - String name = scope_info->ContextLocalName(i); const AstRawString* string = ast_value_factory->GetString( - name, SharedStringAccessGuardIfNeeded(isolate)); - Variable* var = nullptr; - - var = string->IsPrivateName() ? LookupLocalPrivateName(string) - : LookupLocal(string); + it->name(), SharedStringAccessGuardIfNeeded(isolate)); + Variable* var = string->IsPrivateName() ? LookupLocalPrivateName(string) + : LookupLocal(string); DCHECK_NOT_NULL(var); var->AllocateTo(VariableLocation::CONTEXT, slot_index); } - scope_info_ = scope_info; - - // Set this bit so that DelcarationScope::Analyze recognizes - // the reparsed instance member initializer scope. -#ifdef DEBUG - is_reparsed_class_scope_ = true; -#endif } Variable* ClassScope::DeclarePrivateName(const AstRawString* name, @@ -2833,10 +2922,8 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { DCHECK_NULL(LookupLocalPrivateName(name)); DisallowGarbageCollection no_gc; - String name_handle = *name->string(); VariableLookupResult lookup_result; - int index = - ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &lookup_result); + int index = scope_info_->ContextSlotIndex(name->string(), &lookup_result); if (index < 0) { return nullptr; } diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index c04d99b4b0eab3..6f701ead0b1f19 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -10,16 +10,28 @@ #include "src/ast/ast.h" #include "src/base/compiler-specific.h" #include "src/base/hashmap.h" +#include "src/base/pointer-with-payload.h" #include "src/base/threaded-list.h" #include "src/common/globals.h" #include "src/objects/function-kind.h" #include "src/objects/objects.h" -#include "src/utils/pointer-with-payload.h" #include "src/utils/utils.h" #include "src/zone/zone-hashmap.h" #include "src/zone/zone.h" namespace v8 { + +namespace internal { +class Scope; +} // namespace internal + +namespace base { +template <> +struct PointerWithPayloadTraits { + static constexpr int kAvailableBits = 1; +}; +} // namespace base + namespace internal { class AstNodeFactory; @@ -64,13 +76,6 @@ class VariableMap : public ZoneHashMap { Zone* zone() const { return allocator().zone(); } }; -class Scope; - -template <> -struct PointerWithPayloadTraits { - static constexpr int value = 1; -}; - // Global invariants after AST construction: Each reference (i.e. identifier) // to a JavaScript variable (including global properties) is represented by a // VariableProxy node. Immediately after AST construction and before variable @@ -155,7 +160,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // Upon move assignment we store whether the new inner scope calls eval into // the move target calls_eval bit, and restore calls eval on the outer // scope. - PointerWithPayload outer_scope_and_calls_eval_; + base::PointerWithPayload outer_scope_and_calls_eval_; Scope* top_inner_scope_; UnresolvedList::Iterator top_unresolved_; base::ThreadedList::Iterator top_local_; @@ -171,6 +176,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { AstValueFactory* ast_value_factory, DeserializationMode deserialization_mode); + template + EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) + static void SetScriptScopeInfo(IsolateT* isolate, + DeclarationScope* script_scope); + // Checks if the block scope is redundant, i.e. it does not contain any // block scoped declarations. In that case it is removed from the scope // tree and its children are reparented. @@ -454,6 +464,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { kDescend }; + bool IsConstructorScope() const; + // Check is this scope is an outer scope of the given scope. bool IsOuterScopeOf(Scope* other) const; @@ -549,6 +561,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // 'this' is bound, and what determines the function kind. DeclarationScope* GetReceiverScope(); + // Find the first constructor scope. Its outer scope is where the instance + // members that should be initialized right after super() is called + // are declared. + DeclarationScope* GetConstructorScope(); + // Find the first class scope or object literal block scope. This is where // 'super' is bound. Scope* GetHomeObjectScope(); @@ -608,6 +625,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { needs_home_object_ = true; } + VariableProxy* NewHomeObjectVariableProxy(AstNodeFactory* factory, + const AstRawString* name, + int start_pos); + bool RemoveInnerScope(Scope* inner_scope) { DCHECK_NOT_NULL(inner_scope); if (inner_scope == inner_scope_) { @@ -866,7 +887,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { FunctionKind function_kind() const { return function_kind_; } // Inform the scope that the corresponding code uses "super". - void RecordSuperPropertyUsage() { + Scope* RecordSuperPropertyUsage() { DCHECK(IsConciseMethod(function_kind()) || IsAccessorFunction(function_kind()) || IsClassConstructor(function_kind())); @@ -874,6 +895,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { Scope* home_object_scope = GetHomeObjectScope(); DCHECK_NOT_NULL(home_object_scope); home_object_scope->set_needs_home_object(); + return home_object_scope; } bool uses_super_property() const { return uses_super_property_; } @@ -1229,6 +1251,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { // to REPL_GLOBAL. Should only be called on REPL scripts. void RewriteReplGlobalVariables(); + void set_class_scope_has_private_brand(bool value) { + class_scope_has_private_brand_ = value; + } + bool class_scope_has_private_brand() const { + return class_scope_has_private_brand_; + } + private: V8_INLINE void AllocateParameter(Variable* var, int index); @@ -1276,7 +1305,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { bool has_this_reference_ : 1; bool has_this_declaration_ : 1; bool needs_private_name_context_chain_recalc_ : 1; - + bool class_scope_has_private_brand_ : 1; // If the scope is a function scope, this is the function kind. FunctionKind function_kind_; @@ -1477,9 +1506,14 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { should_save_class_variable_index_ = true; } - void ReplaceReparsedClassScope(Isolate* isolate, - AstValueFactory* ast_value_factory, - ClassScope* old_scope); + // Finalize the reparsed class scope, called when reparsing the + // class scope for the initializer member function. + // If the reparsed scope declares any variable that needs allocation + // fixup using the scope info, needs_allocation_fixup is true. + void FinalizeReparsedClassScope(Isolate* isolate, + MaybeHandle outer_scope_info, + AstValueFactory* ast_value_factory, + bool needs_allocation_fixup); #ifdef DEBUG bool is_reparsed_class_scope() const { return is_reparsed_class_scope_; } #endif @@ -1519,7 +1553,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { rare_data_and_is_parsing_heritage_.SetPayload(v); } - PointerWithPayload rare_data_and_is_parsing_heritage_; + base::PointerWithPayload + rare_data_and_is_parsing_heritage_; Variable* class_variable_ = nullptr; // These are only maintained when the scope is parsed, not when the // scope is deserialized. diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h index 84015af36228f6..be045b0e68c153 100644 --- a/deps/v8/src/base/atomic-utils.h +++ b/deps/v8/src/base/atomic-utils.h @@ -66,6 +66,13 @@ class AsAtomicImpl { public: using AtomicStorageType = TAtomicStorageType; + template + static T SeqCst_Load(T* addr) { + STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType)); + return cast_helper::to_return_type( + base::SeqCst_Load(to_storage_addr(addr))); + } + template static T Acquire_Load(T* addr) { STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType)); @@ -80,6 +87,14 @@ class AsAtomicImpl { base::Relaxed_Load(to_storage_addr(addr))); } + template + static void SeqCst_Store(T* addr, + typename std::remove_reference::type new_value) { + STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType)); + base::SeqCst_Store(to_storage_addr(addr), + cast_helper::to_storage_type(new_value)); + } + template static void Release_Store(T* addr, typename std::remove_reference::type new_value) { diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h index 56fd5f3094a886..f6b516ad9e986d 100644 --- a/deps/v8/src/base/atomicops.h +++ b/deps/v8/src/base/atomicops.h @@ -241,6 +241,16 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { std::memory_order_acquire); } +inline Atomic8 SeqCst_Load(volatile const Atomic8* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_seq_cst); +} + +inline Atomic32 SeqCst_Load(volatile const Atomic32* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_seq_cst); +} + #if defined(V8_HOST_ARCH_64_BIT) inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, @@ -314,6 +324,11 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { std::memory_order_acquire); } +inline Atomic64 SeqCst_Load(volatile const Atomic64* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_seq_cst); +} + #endif // defined(V8_HOST_ARCH_64_BIT) inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src, @@ -441,7 +456,7 @@ inline int Relaxed_Memcmp(volatile const Atomic8* s1, // On some platforms we need additional declarations to make // AtomicWord compatible with our other Atomic* types. -#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX) +#if defined(V8_OS_DARWIN) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX) #include "src/base/atomicops_internals_atomicword_compat.h" #endif diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h index 7b2796e3df23b6..63142a20fa2c29 100644 --- a/deps/v8/src/base/bit-field.h +++ b/deps/v8/src/base/bit-field.h @@ -16,7 +16,7 @@ namespace base { // BitField is a help template for encoding and decode bitfield with // unsigned content. // Instantiate them via 'using', which is cheaper than deriving a new class: -// using MyBitField = base::BitField; +// using MyBitField = base::BitField; // The BitField class is final to enforce this style over derivation. template diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc index a51206aec698e8..37924a6d674ce9 100644 --- a/deps/v8/src/base/bounded-page-allocator.cc +++ b/deps/v8/src/base/bounded-page-allocator.cc @@ -118,8 +118,7 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) { MutexGuard guard(&mutex_); Address address = reinterpret_cast
(raw_address); - size_t freed_size = region_allocator_.FreeRegion(address); - if (freed_size != size) return false; + CHECK_EQ(size, region_allocator_.FreeRegion(address)); if (page_initialization_mode_ == PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) { // When we are required to return zero-initialized pages, we decommit the @@ -167,15 +166,15 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size, if (page_initialization_mode_ == PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) { // See comment in FreePages(). - return page_allocator_->DecommitPages(reinterpret_cast(free_address), - free_size); + CHECK(page_allocator_->DecommitPages(reinterpret_cast(free_address), + free_size)); } else { DCHECK_EQ(page_initialization_mode_, PageInitializationMode::kAllocatedPagesCanBeUninitialized); - return page_allocator_->SetPermissions( - reinterpret_cast(free_address), free_size, - PageAllocator::kNoAccess); + CHECK(page_allocator_->SetPermissions(reinterpret_cast(free_address), + free_size, PageAllocator::kNoAccess)); } + return true; } bool BoundedPageAllocator::SetPermissions(void* address, size_t size, diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h index 07c5cda3070ed4..ade9aa2d347f50 100644 --- a/deps/v8/src/base/bounded-page-allocator.h +++ b/deps/v8/src/base/bounded-page-allocator.h @@ -27,7 +27,6 @@ enum class PageInitializationMode { // pre-reserved region of virtual space. This class requires the virtual space // to be kept reserved during the lifetime of this object. // The main application of bounded page allocator are -// - the V8 virtual memory cage // - V8 heap pointer compression which requires the whole V8 heap to be // allocated within a contiguous range of virtual address space, // - executable page allocation, which allows to use PC-relative 32-bit code diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h index 3303916776fa36..3befde51e7f42c 100644 --- a/deps/v8/src/base/build_config.h +++ b/deps/v8/src/base/build_config.h @@ -154,8 +154,8 @@ #error Target architecture ia32 is only supported on ia32 host #endif #if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT && \ - !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_64_BIT)) -#error Target architecture x64 is only supported on x64 host + !((V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64) && V8_HOST_ARCH_64_BIT)) +#error Target architecture x64 is only supported on x64 and arm64 host #endif #if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT && \ !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_32_BIT)) @@ -222,7 +222,7 @@ #endif // pthread_jit_write_protect is only available on arm64 Mac. -#if defined(V8_OS_MACOSX) && !defined(V8_OS_IOS) && defined(V8_HOST_ARCH_ARM64) +#if defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64) #define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 1 #else #define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 0 @@ -237,8 +237,9 @@ constexpr int kReturnAddressStackSlotCount = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0; // Number of bits to represent the page size for paged spaces. -#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) -// PPC has large (64KB) physical pages. +#if (defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_PPC64)) && !defined(_AIX) +// Native PPC linux has large (64KB) physical pages. +// Simulator (and Aix) need to use the same value as x64. const int kPageSizeBits = 19; #elif defined(ENABLE_HUGEPAGE) // When enabling huge pages, adjust V8 page size to take up exactly one huge diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index ab263c7e7781d9..dc61f4bf11ed85 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -50,6 +50,8 @@ #include "src/base/logging.h" #include "src/base/platform/wrappers.h" #if V8_OS_WIN +#include + #include "src/base/win32-headers.h" #endif @@ -85,7 +87,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { #endif // !V8_LIBC_MSVCRT #elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS || \ - V8_HOST_ARCH_MIPS64 + V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64 #if V8_OS_LINUX @@ -354,7 +356,7 @@ static bool HasListItem(const char* list, const char* item) { #endif // V8_OS_LINUX #endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || - // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 + // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64 #if defined(V8_OS_STARBOARD) @@ -444,7 +446,8 @@ CPU::CPU() is_fp64_mode_(false), has_non_stop_time_stamp_counter_(false), is_running_in_vm_(false), - has_msa_(false) { + has_msa_(false), + has_rvv_(false) { memcpy(vendor_, "Unknown", 8); #if defined(V8_OS_STARBOARD) @@ -498,6 +501,9 @@ CPU::CPU() has_avx_ = (cpu_info[2] & 0x10000000) != 0; has_avx2_ = (cpu_info7[1] & 0x00000020) != 0; has_fma3_ = (cpu_info[2] & 0x00001000) != 0; + // CET shadow stack feature flag. See + // https://en.wikipedia.org/wiki/CPUID#EAX=7,_ECX=0:_Extended_Features + has_cetss_ = (cpu_info7[2] & 0x00000080) != 0; // "Hypervisor Present Bit: Bit 31 of ECX of CPUID leaf 0x1." // See https://lwn.net/Articles/301888/ // This is checking for any hypervisor. Hypervisors may choose not to @@ -758,6 +764,13 @@ CPU::CPU() // user-space. has_non_stop_time_stamp_counter_ = true; + // Defined in winnt.h, but in a newer version of the Windows SDK than the one + // that V8 requires, so we must copy the value here. + constexpr int PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE = 44; + + has_jscvt_ = + IsProcessorFeaturePresent(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE); + #elif V8_OS_LINUX // Try to extract the list of CPU features from ELF hwcaps. uint32_t hwcaps = ReadELFHWCaps(); @@ -770,7 +783,7 @@ CPU::CPU() has_jscvt_ = HasListItem(features, "jscvt"); delete[] features; } -#elif V8_OS_MACOSX +#elif V8_OS_DARWIN // ARM64 Macs always have JSCVT. has_jscvt_ = true; #endif // V8_OS_WIN @@ -854,7 +867,19 @@ CPU::CPU() } #endif // V8_OS_AIX #endif // !USE_SIMULATOR -#endif // V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 + +#elif V8_HOST_ARCH_RISCV64 + CPUInfo cpu_info; + char* features = cpu_info.ExtractField("isa"); + + if (HasListItem(features, "rv64imafdc")) { + has_fpu_ = true; + } + if (HasListItem(features, "rv64imafdcv")) { + has_fpu_ = true; + has_rvv_ = true; + } +#endif // V8_HOST_ARCH_RISCV64 } } // namespace base diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h index 9fcf90b3bcd845..3050f2c4665797 100644 --- a/deps/v8/src/base/cpu.h +++ b/deps/v8/src/base/cpu.h @@ -101,6 +101,7 @@ class V8_BASE_EXPORT CPU final { bool has_lzcnt() const { return has_lzcnt_; } bool has_popcnt() const { return has_popcnt_; } bool is_atom() const { return is_atom_; } + bool has_cetss() const { return has_cetss_; } bool has_non_stop_time_stamp_counter() const { return has_non_stop_time_stamp_counter_; } @@ -127,6 +128,9 @@ class V8_BASE_EXPORT CPU final { bool is_fp64_mode() const { return is_fp64_mode_; } bool has_msa() const { return has_msa_; } + // riscv features + bool has_rvv() const { return has_rvv_; } + private: #if defined(V8_OS_STARBOARD) bool StarboardDetectCPU(); @@ -156,6 +160,7 @@ class V8_BASE_EXPORT CPU final { bool has_sse41_; bool has_sse42_; bool is_atom_; + bool has_cetss_; bool has_osxsave_; bool has_avx_; bool has_avx2_; @@ -175,6 +180,7 @@ class V8_BASE_EXPORT CPU final { bool has_non_stop_time_stamp_counter_; bool is_running_in_vm_; bool has_msa_; + bool has_rvv_; }; } // namespace base diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc index 270f1ca4e02792..b76c098d8808ed 100644 --- a/deps/v8/src/base/debug/stack_trace_posix.cc +++ b/deps/v8/src/base/debug/stack_trace_posix.cc @@ -33,7 +33,7 @@ #include #include #endif -#if V8_OS_MACOSX +#if V8_OS_DARWIN #include #endif diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.cc b/deps/v8/src/base/emulated-virtual-address-subspace.cc index fbfb1255693ac8..ae07a3cd96d38e 100644 --- a/deps/v8/src/base/emulated-virtual-address-subspace.cc +++ b/deps/v8/src/base/emulated-virtual-address-subspace.cc @@ -16,7 +16,7 @@ EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace( size_t total_size) : VirtualAddressSpace(parent_space->page_size(), parent_space->allocation_granularity(), base, - total_size), + total_size, parent_space->max_page_permissions()), mapped_size_(mapped_size), parent_space_(parent_space), region_allocator_(base, mapped_size, parent_space_->page_size()) { @@ -30,7 +30,7 @@ EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace( } EmulatedVirtualAddressSubspace::~EmulatedVirtualAddressSubspace() { - CHECK(parent_space_->FreePages(base(), mapped_size_)); + parent_space_->FreePages(base(), mapped_size_); } void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) { @@ -40,7 +40,7 @@ void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) { Address EmulatedVirtualAddressSubspace::RandomPageAddress() { MutexGuard guard(&mutex_); - Address addr = base() + (rng_.NextInt64() % size()); + Address addr = base() + (static_cast(rng_.NextInt64()) % size()); return RoundDown(addr, allocation_granularity()); } @@ -64,26 +64,27 @@ Address EmulatedVirtualAddressSubspace::AllocatePages( // No luck or hint is outside of the mapped region. Try to allocate pages in // the unmapped space using page allocation hints instead. - - // Somewhat arbitrary size limitation to ensure that the loop below for - // finding a fitting base address hint terminates quickly. - if (size >= (unmapped_size() / 2)) return kNullAddress; + if (!IsUsableSizeForUnmappedRegion(size)) return kNullAddress; static constexpr int kMaxAttempts = 10; for (int i = 0; i < kMaxAttempts; i++) { - // If the hint wouldn't result in the entire allocation being inside the - // managed region, simply retry. There is at least a 50% chance of - // getting a usable address due to the size restriction above. + // If an unmapped region exists, it must cover at least 50% of the whole + // space (unmapped + mapped region). Since we limit the size of allocation + // to 50% of the unmapped region (see IsUsableSizeForUnmappedRegion), a + // random page address has at least a 25% chance of being a usable base. As + // such, this loop should usually terminate quickly. + DCHECK_GE(unmapped_size(), mapped_size()); while (!UnmappedRegionContains(hint, size)) { hint = RandomPageAddress(); } + hint = RoundDown(hint, alignment); - Address region = + const Address result = parent_space_->AllocatePages(hint, size, alignment, permissions); - if (region && UnmappedRegionContains(region, size)) { - return region; - } else if (region) { - CHECK(parent_space_->FreePages(region, size)); + if (UnmappedRegionContains(result, size)) { + return result; + } else if (result) { + parent_space_->FreePages(result, size); } // Retry at a different address. @@ -93,15 +94,49 @@ Address EmulatedVirtualAddressSubspace::AllocatePages( return kNullAddress; } -bool EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) { +void EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) { if (MappedRegionContains(address, size)) { MutexGuard guard(&mutex_); - if (region_allocator_.FreeRegion(address) != size) return false; + CHECK_EQ(size, region_allocator_.FreeRegion(address)); CHECK(parent_space_->DecommitPages(address, size)); - return true; + } else { + DCHECK(UnmappedRegionContains(address, size)); + parent_space_->FreePages(address, size); } - if (!UnmappedRegionContains(address, size)) return false; - return parent_space_->FreePages(address, size); +} + +Address EmulatedVirtualAddressSubspace::AllocateSharedPages( + Address hint, size_t size, PagePermissions permissions, + PlatformSharedMemoryHandle handle, uint64_t offset) { + // Can only allocate shared pages in the unmapped region. + if (!IsUsableSizeForUnmappedRegion(size)) return kNullAddress; + + static constexpr int kMaxAttempts = 10; + for (int i = 0; i < kMaxAttempts; i++) { + // See AllocatePages() for why this loop usually terminates quickly. + DCHECK_GE(unmapped_size(), mapped_size()); + while (!UnmappedRegionContains(hint, size)) { + hint = RandomPageAddress(); + } + + Address region = parent_space_->AllocateSharedPages(hint, size, permissions, + handle, offset); + if (UnmappedRegionContains(region, size)) { + return region; + } else if (region) { + parent_space_->FreeSharedPages(region, size); + } + + hint = RandomPageAddress(); + } + + return kNullAddress; +} + +void EmulatedVirtualAddressSubspace::FreeSharedPages(Address address, + size_t size) { + DCHECK(UnmappedRegionContains(address, size)); + parent_space_->FreeSharedPages(address, size); } bool EmulatedVirtualAddressSubspace::SetPagePermissions( @@ -110,6 +145,27 @@ bool EmulatedVirtualAddressSubspace::SetPagePermissions( return parent_space_->SetPagePermissions(address, size, permissions); } +bool EmulatedVirtualAddressSubspace::AllocateGuardRegion(Address address, + size_t size) { + if (MappedRegionContains(address, size)) { + MutexGuard guard(&mutex_); + return region_allocator_.AllocateRegionAt(address, size); + } + if (!UnmappedRegionContains(address, size)) return false; + return parent_space_->AllocateGuardRegion(address, size); +} + +void EmulatedVirtualAddressSubspace::FreeGuardRegion(Address address, + size_t size) { + if (MappedRegionContains(address, size)) { + MutexGuard guard(&mutex_); + CHECK_EQ(size, region_allocator_.FreeRegion(address)); + } else { + DCHECK(UnmappedRegionContains(address, size)); + parent_space_->FreeGuardRegion(address, size); + } +} + bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() { // This is not supported, mostly because it's not (yet) needed in practice. return false; @@ -118,7 +174,7 @@ bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() { std::unique_ptr EmulatedVirtualAddressSubspace::AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) { + PagePermissions max_page_permissions) { UNREACHABLE(); } diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.h b/deps/v8/src/base/emulated-virtual-address-subspace.h index 480c3e1ae0f00b..c5078355504434 100644 --- a/deps/v8/src/base/emulated-virtual-address-subspace.h +++ b/deps/v8/src/base/emulated-virtual-address-subspace.h @@ -48,16 +48,27 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions) override; - bool FreePages(Address address, size_t size) override; + void FreePages(Address address, size_t size) override; + + Address AllocateSharedPages(Address hint, size_t size, + PagePermissions permissions, + PlatformSharedMemoryHandle handle, + uint64_t offset) override; + + void FreeSharedPages(Address address, size_t size) override; bool SetPagePermissions(Address address, size_t size, PagePermissions permissions) override; + bool AllocateGuardRegion(Address address, size_t size) override; + + void FreeGuardRegion(Address address, size_t size) override; + bool CanAllocateSubspaces() override; std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) override; + PagePermissions max_page_permissions) override; bool DiscardSystemPages(Address address, size_t size) override; @@ -88,6 +99,13 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final return Contains(unmapped_base(), unmapped_size(), addr, length); } + // Helper function to define a limit for the size of allocations in the + // unmapped region. This limit makes it possible to estimate the expected + // runtime of some loops in the Allocate methods. + bool IsUsableSizeForUnmappedRegion(size_t size) const { + return size <= (unmapped_size() / 2); + } + // Size of the mapped region located at the beginning of this address space. const size_t mapped_size_; diff --git a/deps/v8/src/base/immediate-crash.h b/deps/v8/src/base/immediate-crash.h index ef1f922317774e..770cb273f92a25 100644 --- a/deps/v8/src/base/immediate-crash.h +++ b/deps/v8/src/base/immediate-crash.h @@ -42,7 +42,7 @@ #if V8_CC_GNU -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 +#if V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 // TODO(https://crbug.com/958675): In theory, it should be possible to use just // int3. However, there are a number of crashes with SIGILL as the exception @@ -50,13 +50,13 @@ // to continue after SIGTRAP. #define TRAP_SEQUENCE1_() asm volatile("int3") -#if V8_OS_MACOSX +#if V8_OS_DARWIN // Intentionally empty: __builtin_unreachable() is always part of the sequence // (see IMMEDIATE_CRASH below) and already emits a ud2 on Mac. #define TRAP_SEQUENCE2_() asm volatile("") #else #define TRAP_SEQUENCE2_() asm volatile("ud2") -#endif // V8_OS_MACOSX +#endif // V8_OS_DARWIN #elif V8_HOST_ARCH_ARM diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index 3a73afc1ce7712..fcb9f8756fd3ce 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -313,7 +313,7 @@ V8_INLINE A implicit_cast(A x) { #endif // Fix for Mac OS X defining uintptr_t as "unsigned long": -#if V8_OS_MACOSX +#if V8_OS_DARWIN #undef V8PRIxPTR #define V8PRIxPTR "lx" #undef V8PRIdPTR diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc index 2956bf14755197..d2a8621b5fbc57 100644 --- a/deps/v8/src/base/page-allocator.cc +++ b/deps/v8/src/base/page-allocator.cc @@ -7,7 +7,7 @@ #include "src/base/platform/platform.h" #include "src/base/platform/wrappers.h" -#if V8_OS_MACOSX +#if V8_OS_DARWIN #include // For MAP_JIT. #endif @@ -132,13 +132,15 @@ void* PageAllocator::RemapShared(void* old_address, void* new_address, } bool PageAllocator::FreePages(void* address, size_t size) { - return base::OS::Free(address, size); + base::OS::Free(address, size); + return true; } bool PageAllocator::ReleasePages(void* address, size_t size, size_t new_size) { DCHECK_LT(new_size, size); - return base::OS::Release(reinterpret_cast(address) + new_size, - size - new_size); + base::OS::Release(reinterpret_cast(address) + new_size, + size - new_size); + return true; } bool PageAllocator::SetPermissions(void* address, size_t size, diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc index 5ab66d39a4df2a..b7b21c99473b36 100644 --- a/deps/v8/src/base/platform/condition-variable.cc +++ b/deps/v8/src/base/platform/condition-variable.cc @@ -40,7 +40,7 @@ ConditionVariable::ConditionVariable() { ConditionVariable::~ConditionVariable() { -#if defined(V8_OS_MACOSX) +#if defined(V8_OS_DARWIN) // This hack is necessary to avoid a fatal pthreads subsystem bug in the // Darwin kernel. http://crbug.com/517681. { @@ -86,7 +86,7 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { struct timespec ts; int result; mutex->AssertHeldAndUnmark(); -#if V8_OS_MACOSX +#if V8_OS_DARWIN // Mac OS X provides pthread_cond_timedwait_relative_np(), which does // not depend on the real time clock, which is what you really WANT here! ts = rel_time.ToTimespec(); @@ -111,7 +111,7 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { ts = end_time.ToTimespec(); result = pthread_cond_timedwait( &native_handle_, &mutex->native_handle(), &ts); -#endif // V8_OS_MACOSX +#endif // V8_OS_DARWIN mutex->AssertUnheldAndMark(); if (result == ETIMEDOUT) { return false; diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h index 2947c31237b2cf..c5ac56043dea25 100644 --- a/deps/v8/src/base/platform/elapsed-timer.h +++ b/deps/v8/src/base/platform/elapsed-timer.h @@ -116,7 +116,7 @@ class ElapsedTimer final { private: static V8_INLINE TimeTicks Now() { - TimeTicks now = TimeTicks::HighResolutionNow(); + TimeTicks now = TimeTicks::Now(); DCHECK(!now.IsNull()); return now; } diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc index 7bf60996ee4626..423ab0d98a34ab 100644 --- a/deps/v8/src/base/platform/mutex.cc +++ b/deps/v8/src/base/platform/mutex.cc @@ -222,7 +222,7 @@ bool RecursiveMutex::TryLock() { return true; } -#if V8_OS_MACOSX +#if V8_OS_DARWIN SharedMutex::SharedMutex() { InitializeNativeHandle(&native_handle_); } @@ -251,7 +251,7 @@ bool SharedMutex::TryLockExclusive() { return true; } -#else // !V8_OS_MACOSX +#else // !V8_OS_DARWIN SharedMutex::SharedMutex() { pthread_rwlock_init(&native_handle_, nullptr); } @@ -301,7 +301,7 @@ bool SharedMutex::TryLockExclusive() { return result; } -#endif // !V8_OS_MACOSX +#endif // !V8_OS_DARWIN #elif V8_OS_WIN diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h index 5fefa25ab6f953..ce13d8d7634fae 100644 --- a/deps/v8/src/base/platform/mutex.h +++ b/deps/v8/src/base/platform/mutex.h @@ -265,7 +265,7 @@ class V8_BASE_EXPORT SharedMutex final { private: // The implementation-defined native handle type. -#if V8_OS_MACOSX +#if V8_OS_DARWIN // pthread_rwlock_t is broken on MacOS when signals are being sent to the // process (see https://crbug.com/v8/11399). Until Apple fixes that in the OS, // we have to fall back to a non-shared mutex. diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc index b27bfbc8bcf8d2..9c9adda3897b80 100644 --- a/deps/v8/src/base/platform/platform-aix.cc +++ b/deps/v8/src/base/platform/platform-aix.cc @@ -164,5 +164,43 @@ Stack::StackSlot Stack::GetStackStart() { return reinterpret_cast(buf.__pi_stackend); } +// static +bool OS::DecommitPages(void* address, size_t size) { + // The difference between this implementation and the alternative under + // platform-posix.cc is that on AIX, calling mmap on a pre-designated address + // with MAP_FIXED will fail and return -1 unless the application has requested + // SPEC1170 compliant behaviour: + // https://www.ibm.com/docs/en/aix/7.3?topic=m-mmap-mmap64-subroutine + // Therefore in case if failure we need to unmap the address before trying to + // map it again. The downside is another thread could place another mapping at + // the same address after the munmap but before the mmap, therefore a CHECK is + // also added to assure the address is mapped successfully. Refer to the + // comments under https://crrev.com/c/3010195 for more details. +#define MMAP() \ + mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0) + DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); + void* ptr; + // Try without mapping first. + ptr = MMAP(); + if (ptr != address) { + DCHECK_EQ(ptr, MAP_FAILED); + // Returns 0 when successful. + if (munmap(address, size)) { + return false; + } + // Try again after unmap. + ptr = MMAP(); + // If this check fails it's most likely due to a racing condition where + // another thread has mapped the same address right before we do. + // Since this could cause hard-to-debug issues, potentially with security + // impact, and we can't recover from this, the best we can do is abort the + // process. + CHECK_EQ(ptr, address); + } +#undef MMAP + return true; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc index 5aae01c9c41d81..0875bf263c4e2b 100644 --- a/deps/v8/src/base/platform/platform-cygwin.cc +++ b/deps/v8/src/base/platform/platform-cygwin.cc @@ -118,7 +118,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, if (base == aligned_base) return reinterpret_cast(base); // Otherwise, free it and try a larger allocation. - CHECK(Free(base, size)); + Free(base, size); // Clear the hint. It's unlikely we can allocate at this address. hint = nullptr; @@ -134,7 +134,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, // Try to trim the allocation by freeing the padded allocation and then // calling VirtualAlloc at the aligned base. - CHECK(Free(base, padded_size)); + Free(base, padded_size); aligned_base = RoundUp(base, alignment); base = reinterpret_cast( VirtualAlloc(aligned_base, size, flags, protect)); @@ -147,18 +147,18 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, } // static -bool OS::Free(void* address, const size_t size) { +void OS::Free(void* address, const size_t size) { DCHECK_EQ(0, static_cast(address) % AllocatePageSize()); DCHECK_EQ(0, size % AllocatePageSize()); USE(size); - return VirtualFree(address, 0, MEM_RELEASE) != 0; + CHECK_NE(0, VirtualFree(address, 0, MEM_RELEASE)); } // static -bool OS::Release(void* address, size_t size) { +void OS::Release(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); - return VirtualFree(address, size, MEM_DECOMMIT) != 0; + CHECK_NE(0, VirtualFree(address, size, MEM_DECOMMIT)); } // static diff --git a/deps/v8/src/base/platform/platform-darwin.cc b/deps/v8/src/base/platform/platform-darwin.cc new file mode 100644 index 00000000000000..bf360e3136350d --- /dev/null +++ b/deps/v8/src/base/platform/platform-darwin.cc @@ -0,0 +1,107 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code shared between macOS and iOS goes here. The macOS +// specific part is in platform-macos.cc, the POSIX-compatible parts in +// platform-posix.cc. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform-posix-time.h" +#include "src/base/platform/platform-posix.h" +#include "src/base/platform/platform.h" + +namespace v8 { +namespace base { + +std::vector OS::GetSharedLibraryAddresses() { + std::vector result; + unsigned int images_count = _dyld_image_count(); + for (unsigned int i = 0; i < images_count; ++i) { + const mach_header* header = _dyld_get_image_header(i); + if (header == nullptr) continue; +#if V8_HOST_ARCH_I32 + unsigned int size; + char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); +#else + uint64_t size; + char* code_ptr = getsectdatafromheader_64( + reinterpret_cast(header), SEG_TEXT, SECT_TEXT, + &size); +#endif + if (code_ptr == nullptr) continue; + const intptr_t slide = _dyld_get_image_vmaddr_slide(i); + const uintptr_t start = reinterpret_cast(code_ptr) + slide; + result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start, + start + size, slide)); + } + return result; +} + +void OS::SignalCodeMovingGC() {} + +TimezoneCache* OS::CreateTimezoneCache() { + return new PosixDefaultTimezoneCache(); +} + +void OS::AdjustSchedulingParams() { +#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 + { + // Check availability of scheduling params. + uint32_t val = 0; + size_t valSize = sizeof(val); + int rc = sysctlbyname("kern.tcsm_available", &val, &valSize, NULL, 0); + if (rc < 0 || !val) return; + } + + { + // Adjust scheduling params. + uint32_t val = 1; + int rc = sysctlbyname("kern.tcsm_enable", NULL, NULL, &val, sizeof(val)); + DCHECK_GE(rc, 0); + USE(rc); + } +#endif +} + +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + +// static +Stack::StackSlot Stack::GetStackStart() { + return pthread_get_stackaddr_np(pthread_self()); +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index f090ea5b6ac094..1f4c35cca4539d 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include +#include #include #include #include @@ -18,6 +20,27 @@ namespace base { namespace { +static zx_handle_t g_vmex_resource = ZX_HANDLE_INVALID; + +static void* g_root_vmar_base = nullptr; + +#ifdef V8_USE_VMEX_RESOURCE +void SetVmexResource() { + DCHECK_EQ(g_vmex_resource, ZX_HANDLE_INVALID); + zx::resource vmex_resource; + fuchsia::kernel::VmexResourceSyncPtr vmex_resource_svc; + zx_status_t status = fdio_service_connect( + "/svc/fuchsia.kernel.VmexResource", + vmex_resource_svc.NewRequest().TakeChannel().release()); + DCHECK_EQ(status, ZX_OK); + status = vmex_resource_svc->Get(&vmex_resource); + USE(status); + DCHECK_EQ(status, ZX_OK); + DCHECK(vmex_resource.is_valid()); + g_vmex_resource = vmex_resource.release(); +} +#endif + zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: @@ -56,29 +79,22 @@ zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) { return alignment_log2 << ZX_VM_ALIGN_BASE; } -void* AllocateInternal(const zx::vmar& vmar, size_t page_size, - size_t vmar_offset, bool vmar_offset_is_hint, - size_t size, size_t alignment, - OS::MemoryPermission access) { +enum class PlacementMode { + // Attempt to place the object at the provided address, otherwise elsewhere. + kUseHint, + // Place the object anywhere it fits. + kAnywhere, + // Place the object at the provided address, otherwise fail. + kFixed +}; + +void* MapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size, + void* address, const zx::vmo& vmo, uint64_t offset, + PlacementMode placement, size_t size, size_t alignment, + OS::MemoryPermission access) { DCHECK_EQ(0, size % page_size); - DCHECK_EQ(0, alignment % page_size); - DCHECK_EQ(0, vmar_offset % page_size); - - zx::vmo vmo; - if (zx::vmo::create(size, 0, &vmo) != ZX_OK) { - return nullptr; - } - static const char kVirtualMemoryName[] = "v8-virtualmem"; - vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName, - strlen(kVirtualMemoryName)); - - // Always call zx_vmo_replace_as_executable() in case the memory will need - // to be marked as executable in the future. - // TOOD(https://crbug.com/v8/8899): Only call this when we know that the - // region will need to be marked as executable in the future. - if (vmo.replace_as_executable(zx::resource(), &vmo) != ZX_OK) { - return nullptr; - } + DCHECK_EQ(0, reinterpret_cast(address) % page_size); + DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr); zx_vm_option_t options = GetProtectionFromMemoryPermission(access); @@ -86,30 +102,60 @@ void* AllocateInternal(const zx::vmar& vmar, size_t page_size, CHECK_NE(0, alignment_option); // Invalid alignment specified options |= alignment_option; - if (vmar_offset != 0) { + size_t vmar_offset = 0; + if (placement != PlacementMode::kAnywhere) { + // Try placing the mapping at the specified address. + uintptr_t target_addr = reinterpret_cast(address); + uintptr_t base = reinterpret_cast(vmar_base); + DCHECK_GE(target_addr, base); + vmar_offset = target_addr - base; options |= ZX_VM_SPECIFIC; } - zx_vaddr_t address; - zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &address); + zx_vaddr_t result; + zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &result); - if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) { - // If a vmar_offset was specified and the allocation failed (for example, - // because the offset overlapped another mapping), then we should retry - // again without a vmar_offset if that offset was just meant to be a hint. + if (status != ZX_OK && placement == PlacementMode::kUseHint) { + // If a placement hint was specified but couldn't be used (for example, + // because the offset overlapped another mapping), then retry again without + // a vmar_offset to let the kernel pick another location. options &= ~(ZX_VM_SPECIFIC); - status = vmar.map(options, 0, vmo, 0, size, &address); + status = vmar.map(options, 0, vmo, 0, size, &result); } if (status != ZX_OK) { return nullptr; } - return reinterpret_cast(address); + return reinterpret_cast(result); +} + +void* CreateAndMapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size, + void* address, PlacementMode placement, size_t size, + size_t alignment, OS::MemoryPermission access) { + zx::vmo vmo; + if (zx::vmo::create(size, 0, &vmo) != ZX_OK) { + return nullptr; + } + static const char kVirtualMemoryName[] = "v8-virtualmem"; + vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName, + strlen(kVirtualMemoryName)); + + // Always call zx_vmo_replace_as_executable() in case the memory will need + // to be marked as executable in the future. + // TOOD(https://crbug.com/v8/8899): Only call this when we know that the + // region will need to be marked as executable in the future. + zx::unowned_resource vmex(g_vmex_resource); + if (vmo.replace_as_executable(*vmex, &vmo) != ZX_OK) { + return nullptr; + } + + return MapVmo(vmar, vmar_base, page_size, address, vmo, 0, placement, size, + alignment, access); } -bool FreeInternal(const zx::vmar& vmar, size_t page_size, void* address, - const size_t size) { +bool UnmapVmo(const zx::vmar& vmar, size_t page_size, void* address, + size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % page_size); DCHECK_EQ(0, size % page_size); return vmar.unmap(reinterpret_cast(address), size) == ZX_OK; @@ -135,13 +181,14 @@ bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size, } zx_status_t CreateAddressSpaceReservationInternal( - const zx::vmar& vmar, size_t page_size, size_t vmar_offset, - bool vmar_offset_is_hint, size_t size, size_t alignment, + const zx::vmar& vmar, void* vmar_base, size_t page_size, void* address, + PlacementMode placement, size_t size, size_t alignment, OS::MemoryPermission max_permission, zx::vmar* child, zx_vaddr_t* child_addr) { DCHECK_EQ(0, size % page_size); DCHECK_EQ(0, alignment % page_size); - DCHECK_EQ(0, vmar_offset % page_size); + DCHECK_EQ(0, reinterpret_cast(address) % alignment); + DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr); // TODO(v8) determine these based on max_permission. zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | @@ -151,16 +198,22 @@ zx_status_t CreateAddressSpaceReservationInternal( CHECK_NE(0, alignment_option); // Invalid alignment specified options |= alignment_option; - if (vmar_offset != 0) { + size_t vmar_offset = 0; + if (placement != PlacementMode::kAnywhere) { + // Try placing the mapping at the specified address. + uintptr_t target_addr = reinterpret_cast(address); + uintptr_t base = reinterpret_cast(vmar_base); + DCHECK_GE(target_addr, base); + vmar_offset = target_addr - base; options |= ZX_VM_SPECIFIC; } zx_status_t status = vmar.allocate(options, vmar_offset, size, child, child_addr); - if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) { - // If a vmar_offset was specified and the allocation failed (for example, - // because the offset overlapped another mapping), then we should retry - // again without a vmar_offset if that offset was just meant to be a hint. + if (status != ZX_OK && placement == PlacementMode::kUseHint) { + // If a placement hint was specified but couldn't be used (for example, + // because the offset overlapped another mapping), then retry again without + // a vmar_offset to let the kernel pick another location. options &= ~(ZX_VM_SPECIFIC); status = vmar.allocate(options, 0, size, child, child_addr); } @@ -174,24 +227,56 @@ TimezoneCache* OS::CreateTimezoneCache() { return new PosixDefaultTimezoneCache(); } +// static +void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { + PosixInitializeCommon(hard_abort, gc_fake_mmap); + + // Determine base address of root VMAR. + zx_info_vmar_t info; + zx_status_t status = zx::vmar::root_self()->get_info( + ZX_INFO_VMAR, &info, sizeof(info), nullptr, nullptr); + CHECK_EQ(ZX_OK, status); + g_root_vmar_base = reinterpret_cast(info.base); + +#ifdef V8_USE_VMEX_RESOURCE + SetVmexResource(); +#endif +} + // static void* OS::Allocate(void* address, size_t size, size_t alignment, MemoryPermission access) { - constexpr bool vmar_offset_is_hint = true; - DCHECK_EQ(0, reinterpret_cast
(address) % alignment); - return AllocateInternal(*zx::vmar::root_self(), AllocatePageSize(), - reinterpret_cast(address), - vmar_offset_is_hint, size, alignment, access); + PlacementMode placement = + address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere; + return CreateAndMapVmo(*zx::vmar::root_self(), g_root_vmar_base, + AllocatePageSize(), address, placement, size, + alignment, access); } // static -bool OS::Free(void* address, const size_t size) { - return FreeInternal(*zx::vmar::root_self(), AllocatePageSize(), address, - size); +void OS::Free(void* address, size_t size) { + CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size)); } // static -bool OS::Release(void* address, size_t size) { return Free(address, size); } +void* OS::AllocateShared(void* address, size_t size, + OS::MemoryPermission access, + PlatformSharedMemoryHandle handle, uint64_t offset) { + PlacementMode placement = + address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere; + zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle)); + return MapVmo(*zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(), + address, *vmo, offset, placement, size, AllocatePageSize(), + access); +} + +// static +void OS::FreeShared(void* address, size_t size) { + CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size)); +} + +// static +void OS::Release(void* address, size_t size) { Free(address, size); } // static bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { @@ -224,22 +309,37 @@ Optional OS::CreateAddressSpaceReservation( DCHECK_EQ(0, reinterpret_cast
(hint) % alignment); zx::vmar child; zx_vaddr_t child_addr; - uint64_t vmar_offset = reinterpret_cast(hint); - constexpr bool vmar_offset_is_hint = true; + PlacementMode placement = + hint != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere; zx_status_t status = CreateAddressSpaceReservationInternal( - *zx::vmar::root_self(), AllocatePageSize(), vmar_offset, - vmar_offset_is_hint, size, alignment, max_permission, &child, - &child_addr); + *zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(), hint, + placement, size, alignment, max_permission, &child, &child_addr); if (status != ZX_OK) return {}; return AddressSpaceReservation(reinterpret_cast(child_addr), size, child.release()); } // static -bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { +void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { // Destroy the vmar and release the handle. zx::vmar vmar(reservation.vmar_); - return vmar.destroy() == ZX_OK; + CHECK_EQ(ZX_OK, vmar.destroy()); +} + +// static +PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) { + zx::vmo vmo; + if (zx::vmo::create(size, 0, &vmo) != ZX_OK) { + return kInvalidSharedMemoryHandle; + } + return SharedMemoryHandleFromVMO(vmo.release()); +} + +// static +void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) { + DCHECK_NE(kInvalidSharedMemoryHandle, handle); + zx_handle_t vmo = VMOFromSharedMemoryHandle(handle); + zx_handle_close(vmo); } // static @@ -287,16 +387,10 @@ Optional AddressSpaceReservation::CreateSubReservation( zx::vmar child; zx_vaddr_t child_addr; - size_t vmar_offset = 0; - if (address != 0) { - vmar_offset = - reinterpret_cast(address) - reinterpret_cast(base()); - } - constexpr bool vmar_offset_is_hint = false; zx_status_t status = CreateAddressSpaceReservationInternal( - *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset, - vmar_offset_is_hint, size, OS::AllocatePageSize(), max_permission, &child, - &child_addr); + *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address, + PlacementMode::kFixed, size, OS::AllocatePageSize(), max_permission, + &child, &child_addr); if (status != ZX_OK) return {}; DCHECK_EQ(reinterpret_cast(child_addr), address); return AddressSpaceReservation(reinterpret_cast(child_addr), size, @@ -305,29 +399,41 @@ Optional AddressSpaceReservation::CreateSubReservation( bool AddressSpaceReservation::FreeSubReservation( AddressSpaceReservation reservation) { - return OS::FreeAddressSpaceReservation(reservation); + OS::FreeAddressSpaceReservation(reservation); + return true; } bool AddressSpaceReservation::Allocate(void* address, size_t size, OS::MemoryPermission access) { DCHECK(Contains(address, size)); - size_t vmar_offset = 0; - if (address != 0) { - vmar_offset = - reinterpret_cast(address) - reinterpret_cast(base()); - } - constexpr bool vmar_offset_is_hint = false; - void* allocation = AllocateInternal( - *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset, - vmar_offset_is_hint, size, OS::AllocatePageSize(), access); + void* allocation = CreateAndMapVmo( + *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address, + PlacementMode::kFixed, size, OS::AllocatePageSize(), access); DCHECK(!allocation || allocation == address); return allocation != nullptr; } bool AddressSpaceReservation::Free(void* address, size_t size) { DCHECK(Contains(address, size)); - return FreeInternal(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address, - size); + return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address, + size); +} + +bool AddressSpaceReservation::AllocateShared(void* address, size_t size, + OS::MemoryPermission access, + PlatformSharedMemoryHandle handle, + uint64_t offset) { + DCHECK(Contains(address, size)); + zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle)); + return MapVmo(*zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), + address, *vmo, offset, PlacementMode::kFixed, size, + OS::AllocatePageSize(), access); +} + +bool AddressSpaceReservation::FreeShared(void* address, size_t size) { + DCHECK(Contains(address, size)); + return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address, + size); } bool AddressSpaceReservation::SetPermissions(void* address, size_t size, diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc index 3ab88060f55e6e..370facf141bad7 100644 --- a/deps/v8/src/base/platform/platform-linux.cc +++ b/deps/v8/src/base/platform/platform-linux.cc @@ -138,7 +138,7 @@ void OS::SignalCodeMovingGC() { void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0); DCHECK_NE(MAP_FAILED, addr); - CHECK(Free(addr, size)); + Free(addr, size); fclose(f); } diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc index d1675bdc44d9c8..bba8d3c699a993 100644 --- a/deps/v8/src/base/platform/platform-macos.cc +++ b/deps/v8/src/base/platform/platform-macos.cc @@ -2,106 +2,104 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -// Platform-specific code for MacOS goes here. For the POSIX-compatible -// parts, the implementation is in platform-posix.cc. +// Platform-specific code for MacOS goes here. Code shared between iOS and +// macOS is in platform-darwin.cc, while the POSIX-compatible are in in +// platform-posix.cc. -#include -#include -#include -#include -#include -#include - -#include - -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#undef MAP_TYPE - -#include "src/base/macros.h" -#include "src/base/platform/platform-posix-time.h" -#include "src/base/platform/platform-posix.h" +#include +#include + #include "src/base/platform/platform.h" namespace v8 { namespace base { -std::vector OS::GetSharedLibraryAddresses() { - std::vector result; - unsigned int images_count = _dyld_image_count(); - for (unsigned int i = 0; i < images_count; ++i) { - const mach_header* header = _dyld_get_image_header(i); - if (header == nullptr) continue; -#if V8_HOST_ARCH_I32 - unsigned int size; - char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); -#else - uint64_t size; - char* code_ptr = getsectdatafromheader_64( - reinterpret_cast(header), SEG_TEXT, SECT_TEXT, - &size); -#endif - if (code_ptr == nullptr) continue; - const intptr_t slide = _dyld_get_image_vmaddr_slide(i); - const uintptr_t start = reinterpret_cast(code_ptr) + slide; - result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start, - start + size, slide)); +namespace { + +vm_prot_t GetVMProtFromMemoryPermission(OS::MemoryPermission access) { + switch (access) { + case OS::MemoryPermission::kNoAccess: + case OS::MemoryPermission::kNoAccessWillJitLater: + return VM_PROT_NONE; + case OS::MemoryPermission::kRead: + return VM_PROT_READ; + case OS::MemoryPermission::kReadWrite: + return VM_PROT_READ | VM_PROT_WRITE; + case OS::MemoryPermission::kReadWriteExecute: + return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; + case OS::MemoryPermission::kReadExecute: + return VM_PROT_READ | VM_PROT_EXECUTE; } - return result; + UNREACHABLE(); } -void OS::SignalCodeMovingGC() {} - -TimezoneCache* OS::CreateTimezoneCache() { - return new PosixDefaultTimezoneCache(); +kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address, + mach_vm_size_t size, int flags, + mach_port_t port, + memory_object_offset_t offset, + vm_prot_t prot) { + vm_prot_t current_prot = prot; + vm_prot_t maximum_prot = current_prot; + return mach_vm_map(mach_task_self(), address, size, 0, flags, port, offset, + FALSE, current_prot, maximum_prot, VM_INHERIT_NONE); } -void OS::AdjustSchedulingParams() { -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 - { - // Check availability of scheduling params. - uint32_t val = 0; - size_t valSize = sizeof(val); - int rc = sysctlbyname("kern.tcsm_available", &val, &valSize, NULL, 0); - if (rc < 0 || !val) return; - } +} // namespace - { - // Adjust scheduling params. - uint32_t val = 1; - int rc = sysctlbyname("kern.tcsm_enable", NULL, NULL, &val, sizeof(val)); - DCHECK_GE(rc, 0); - USE(rc); - } -#endif +// static +PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) { + mach_vm_size_t vm_size = size; + mach_port_t port; + kern_return_t kr = mach_make_memory_entry_64( + mach_task_self(), &vm_size, 0, + MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, + MACH_PORT_NULL); + if (kr != KERN_SUCCESS) return kInvalidSharedMemoryHandle; + return SharedMemoryHandleFromMachMemoryEntry(port); } -std::vector OS::GetFreeMemoryRangesWithin( - OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, - size_t alignment) { - return {}; +// static +void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) { + DCHECK_NE(kInvalidSharedMemoryHandle, handle); + mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle); + CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port)); } // static -Stack::StackSlot Stack::GetStackStart() { - return pthread_get_stackaddr_np(pthread_self()); +void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access, + PlatformSharedMemoryHandle handle, uint64_t offset) { + DCHECK_EQ(0, size % AllocatePageSize()); + + mach_vm_address_t addr = reinterpret_cast(hint); + vm_prot_t prot = GetVMProtFromMemoryPermission(access); + mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle); + kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED, + shared_mem_port, offset, prot); + + if (kr != KERN_SUCCESS) { + // Retry without hint. + kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port, + offset, prot); + } + + if (kr != KERN_SUCCESS) return nullptr; + return reinterpret_cast(addr); +} + +bool AddressSpaceReservation::AllocateShared(void* address, size_t size, + OS::MemoryPermission access, + PlatformSharedMemoryHandle handle, + uint64_t offset) { + DCHECK(Contains(address, size)); + + vm_prot_t prot = GetVMProtFromMemoryPermission(access); + mach_vm_address_t addr = reinterpret_cast(address); + mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle); + kern_return_t kr = + mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, + shared_mem_port, offset, prot); + return kr == KERN_SUCCESS; } } // namespace base diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc index f15800aa878419..325c40aec84e50 100644 --- a/deps/v8/src/base/platform/platform-openbsd.cc +++ b/deps/v8/src/base/platform/platform-openbsd.cc @@ -116,7 +116,7 @@ void OS::SignalCodeMovingGC() { void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0); DCHECK(addr != MAP_FAILED); - CHECK(OS::Free(addr, size)); + OS::Free(addr, size); fclose(f); } diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 155af37155a4d7..280d7f88f83736 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -12,15 +12,15 @@ #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) #include // for pthread_set_name_np #endif +#include #include // for sched_yield #include -#include -#include - #include #include #include #include +#include +#include #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ defined(__NetBSD__) || defined(__OpenBSD__) #include // for sysctl @@ -46,8 +46,11 @@ #include #endif -#if V8_OS_MACOSX -#include +#if V8_OS_DARWIN || V8_OS_LINUX +#include // for dlsym +#endif + +#if V8_OS_DARWIN #include #endif @@ -65,7 +68,7 @@ #include #endif -#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS +#if V8_OS_FREEBSD || V8_OS_DARWIN || V8_OS_OPENBSD || V8_OS_SOLARIS #define MAP_ANONYMOUS MAP_ANON #endif @@ -102,16 +105,16 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator, static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER; #if !V8_OS_FUCHSIA -#if V8_OS_MACOSX +#if V8_OS_DARWIN // kMmapFd is used to pass vm_alloc flags to tag the region with the user // defined tag 255 This helps identify V8-allocated regions in memory analysis // tools like vmmap(1). const int kMmapFd = VM_MAKE_TAG(255); -#else // !V8_OS_MACOSX +#else // !V8_OS_DARWIN const int kMmapFd = -1; -#endif // !V8_OS_MACOSX +#endif // !V8_OS_DARWIN -#if defined(V8_TARGET_OS_MACOSX) && V8_HOST_ARCH_ARM64 +#if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64 // During snapshot generation in cross builds, sysconf() runs on the Intel // host and returns host page size, while the snapshot needs to use the // target page size. @@ -153,7 +156,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, flags |= MAP_LAZY; #endif // V8_OS_QNX } -#if V8_OS_MACOSX +#if V8_OS_DARWIN // MAP_JIT is required to obtain writable and executable pages when the // hardened runtime/memory protection is enabled, which is optional (via code // signing) on Intel-based Macs but mandatory on Apple silicon ones. See also @@ -161,7 +164,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, if (access == OS::MemoryPermission::kNoAccessWillJitLater) { flags |= MAP_JIT; } -#endif // V8_OS_MACOSX +#endif // V8_OS_DARWIN return flags; } @@ -237,11 +240,17 @@ bool OS::ArmUsingHardFloat() { #endif // def __arm__ #endif -void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { +void PosixInitializeCommon(bool hard_abort, const char* const gc_fake_mmap) { g_hard_abort = hard_abort; g_gc_fake_mmap = gc_fake_mmap; } +#if !V8_OS_FUCHSIA +void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { + PosixInitializeCommon(hard_abort, gc_fake_mmap); +} +#endif // !V8_OS_FUCHSIA + int OS::ActivationFrameAlignment() { #if V8_TARGET_ARCH_ARM // On EABI ARM targets this is required for fp correctness in the @@ -263,7 +272,7 @@ int OS::ActivationFrameAlignment() { // static size_t OS::AllocatePageSize() { -#if defined(V8_TARGET_OS_MACOSX) && V8_HOST_ARCH_ARM64 +#if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64 return kAppleArmPageSize; #else static size_t page_size = static_cast(sysconf(_SC_PAGESIZE)); @@ -293,7 +302,7 @@ void* OS::GetRandomMmapAddr() { GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr)); } #if V8_HOST_ARCH_ARM64 -#if defined(V8_TARGET_OS_MACOSX) +#if defined(V8_TARGET_OS_MACOS) DCHECK_EQ(1 << 14, AllocatePageSize()); #endif // Keep the address page-aligned, AArch64 supports 4K, 16K and 64K @@ -400,14 +409,14 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, if (aligned_base != base) { DCHECK_LT(base, aligned_base); size_t prefix_size = static_cast(aligned_base - base); - CHECK(Free(base, prefix_size)); + Free(base, prefix_size); request_size -= prefix_size; } // Unmap memory allocated after the potentially unaligned end. if (size != request_size) { DCHECK_LT(size, request_size); size_t suffix_size = request_size - size; - CHECK(Free(aligned_base + size, suffix_size)); + Free(aligned_base + size, suffix_size); request_size -= suffix_size; } @@ -422,17 +431,37 @@ void* OS::AllocateShared(size_t size, MemoryPermission access) { } // static -bool OS::Free(void* address, const size_t size) { +void OS::Free(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % AllocatePageSize()); DCHECK_EQ(0, size % AllocatePageSize()); - return munmap(address, size) == 0; + CHECK_EQ(0, munmap(address, size)); } +// macOS specific implementation in platform-macos.cc. +#if !defined(V8_OS_MACOS) // static -bool OS::Release(void* address, size_t size) { +void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access, + PlatformSharedMemoryHandle handle, uint64_t offset) { + DCHECK_EQ(0, size % AllocatePageSize()); + int prot = GetProtectionFromMemoryPermission(access); + int fd = FileDescriptorFromSharedMemoryHandle(handle); + void* result = mmap(hint, size, prot, MAP_SHARED, fd, offset); + if (result == MAP_FAILED) return nullptr; + return result; +} +#endif // !defined(V8_OS_MACOS) + +// static +void OS::FreeShared(void* address, size_t size) { + DCHECK_EQ(0, size % AllocatePageSize()); + CHECK_EQ(0, munmap(address, size)); +} + +// static +void OS::Release(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); - return munmap(address, size) == 0; + CHECK_EQ(0, munmap(address, size)); } // static @@ -445,7 +474,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { // MacOS 11.2 on Apple Silicon refuses to switch permissions from // rwx to none. Just use madvise instead. -#if defined(V8_OS_MACOSX) +#if defined(V8_OS_DARWIN) if (ret != 0 && access == OS::MemoryPermission::kNoAccess) { ret = madvise(address, size, MADV_FREE_REUSABLE); return ret == 0; @@ -463,7 +492,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { // The cost is a syscall that effectively no-ops. // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary. // https://crbug.com/823915 -#if defined(V8_OS_MACOSX) +#if defined(V8_OS_DARWIN) if (access != OS::MemoryPermission::kNoAccess) madvise(address, size, MADV_FREE_REUSE); #endif @@ -473,33 +502,33 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { // static bool OS::DiscardSystemPages(void* address, size_t size) { + // Roughly based on PartitionAlloc's DiscardSystemPagesInternal + // (base/allocator/partition_allocator/page_allocator_internals_posix.h) DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); -#if defined(V8_OS_MACOSX) +#if defined(V8_OS_DARWIN) // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also // marks the pages with the reusable bit, which allows both Activity Monitor // and memory-infra to correctly track the pages. int ret = madvise(address, size, MADV_FREE_REUSABLE); + if (ret) { + // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED. + ret = madvise(address, size, MADV_DONTNEED); + } #elif defined(_AIX) || defined(V8_OS_SOLARIS) int ret = madvise(reinterpret_cast(address), size, MADV_FREE); -#else - int ret = madvise(address, size, MADV_FREE); -#endif if (ret != 0 && errno == ENOSYS) return true; // madvise is not available on all systems. - if (ret != 0 && errno == EINVAL) { -// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older -// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't -// imply runtime support. -#if defined(_AIX) || defined(V8_OS_SOLARIS) + if (ret != 0 && errno == EINVAL) ret = madvise(reinterpret_cast(address), size, MADV_DONTNEED); #else - ret = madvise(address, size, MADV_DONTNEED); + int ret = madvise(address, size, MADV_DONTNEED); #endif - } return ret == 0; } +#if !defined(_AIX) +// See AIX version for details. // static bool OS::DecommitPages(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); @@ -514,6 +543,7 @@ bool OS::DecommitPages(void* address, size_t size) { MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); return ptr == address; } +#endif // !defined(_AIX) // static bool OS::CanReserveAddressSpace() { return true; } @@ -541,13 +571,48 @@ Optional OS::CreateAddressSpaceReservation( } // static -bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { - return Free(reservation.base(), reservation.size()); +void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { + Free(reservation.base(), reservation.size()); +} + +// macOS specific implementation in platform-macos.cc. +#if !defined(V8_OS_MACOS) +// static +// Need to disable CFI_ICALL due to the indirect call to memfd_create. +DISABLE_CFI_ICALL +PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) { +#if V8_OS_LINUX && !V8_OS_ANDROID + // Use memfd_create if available, otherwise mkstemp. + using memfd_create_t = int (*)(const char*, unsigned int); + memfd_create_t memfd_create = + reinterpret_cast(dlsym(RTLD_DEFAULT, "memfd_create")); + int fd = -1; + if (memfd_create) { + fd = memfd_create("V8MemFDForTesting", MFD_CLOEXEC); + } else { + char filename[] = "/tmp/v8_tmp_file_for_testing_XXXXXX"; + fd = mkstemp(filename); + if (fd != -1) CHECK_EQ(0, unlink(filename)); + } + if (fd == -1) return kInvalidSharedMemoryHandle; + CHECK_EQ(0, ftruncate(fd, size)); + return SharedMemoryHandleFromFileDescriptor(fd); +#else + return kInvalidSharedMemoryHandle; +#endif +} + +// static +void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) { + DCHECK_NE(kInvalidSharedMemoryHandle, handle); + int fd = FileDescriptorFromSharedMemoryHandle(handle); + CHECK_EQ(0, close(fd)); } +#endif // !defined(V8_OS_MACOS) // static bool OS::HasLazyCommits() { -#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX +#if V8_OS_AIX || V8_OS_LINUX || V8_OS_DARWIN return true; #else // TODO(bbudge) Return true for all POSIX platforms. @@ -669,7 +734,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize()))); + if (memory_) OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize())); fclose(file_); } @@ -680,7 +745,7 @@ int OS::GetCurrentProcessId() { int OS::GetCurrentThreadId() { -#if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__)) +#if V8_OS_DARWIN || (V8_OS_ANDROID && defined(__APPLE__)) return static_cast(pthread_mach_thread_np(pthread_self())); #elif V8_OS_LINUX return static_cast(syscall(__NR_gettid)); @@ -885,6 +950,12 @@ bool AddressSpaceReservation::Allocate(void* address, size_t size, OS::MemoryPermission access) { // The region is already mmap'ed, so it just has to be made accessible now. DCHECK(Contains(address, size)); + if (access == OS::MemoryPermission::kNoAccess) { + // Nothing to do. We don't want to call SetPermissions with kNoAccess here + // as that will for example mark the pages as discardable, which is + // probably not desired here. + return true; + } return OS::SetPermissions(address, size, access); } @@ -893,6 +964,26 @@ bool AddressSpaceReservation::Free(void* address, size_t size) { return OS::DecommitPages(address, size); } +// macOS specific implementation in platform-macos.cc. +#if !defined(V8_OS_MACOS) +bool AddressSpaceReservation::AllocateShared(void* address, size_t size, + OS::MemoryPermission access, + PlatformSharedMemoryHandle handle, + uint64_t offset) { + DCHECK(Contains(address, size)); + int prot = GetProtectionFromMemoryPermission(access); + int fd = FileDescriptorFromSharedMemoryHandle(handle); + return mmap(address, size, prot, MAP_SHARED | MAP_FIXED, fd, offset) != + MAP_FAILED; +} +#endif // !defined(V8_OS_MACOS) + +bool AddressSpaceReservation::FreeShared(void* address, size_t size) { + DCHECK(Contains(address, size)); + return mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, + -1, 0) == address; +} + bool AddressSpaceReservation::SetPermissions(void* address, size_t size, OS::MemoryPermission access) { DCHECK(Contains(address, size)); @@ -944,7 +1035,7 @@ static void SetThreadName(const char* name) { #elif V8_OS_NETBSD STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP); pthread_setname_np(pthread_self(), "%s", name); -#elif V8_OS_MACOSX +#elif V8_OS_DARWIN // pthread_setname_np is only available in 10.6 or later, so test // for it at runtime. int (*dynamic_pthread_setname_np)(const char*); @@ -990,7 +1081,7 @@ bool Thread::Start() { if (result != 0) return false; size_t stack_size = stack_size_; if (stack_size == 0) { -#if V8_OS_MACOSX +#if V8_OS_DARWIN // Default on Mac OS X is 512kB -- bump up to 1MB stack_size = 1 * 1024 * 1024; #elif V8_OS_AIX @@ -1139,7 +1230,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { // pthread_getattr_np used below is non portable (hence the _np suffix). We // keep this version in POSIX as most Linux-compatible derivatives will // support it. MacOS and FreeBSD are different here. -#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX) && \ +#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && !defined(_AIX) && \ !defined(V8_OS_SOLARIS) // static @@ -1166,7 +1257,7 @@ Stack::StackSlot Stack::GetStackStart() { #endif // !defined(V8_LIBC_GLIBC) } -#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && +#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && // !defined(_AIX) && !defined(V8_OS_SOLARIS) // static diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h index 7d732b4a8f1f56..38db2441444625 100644 --- a/deps/v8/src/base/platform/platform-posix.h +++ b/deps/v8/src/base/platform/platform-posix.h @@ -11,6 +11,8 @@ namespace v8 { namespace base { +void PosixInitializeCommon(bool hard_abort, const char* const gc_fake_mmap); + class PosixTimezoneCache : public TimezoneCache { public: double DaylightSavingsOffset(double time_ms) override; diff --git a/deps/v8/src/base/platform/platform-starboard.cc b/deps/v8/src/base/platform/platform-starboard.cc index a688c70692a706..c0cccbe122d3b8 100644 --- a/deps/v8/src/base/platform/platform-starboard.cc +++ b/deps/v8/src/base/platform/platform-starboard.cc @@ -172,14 +172,14 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, if (aligned_base != base) { DCHECK_LT(base, aligned_base); size_t prefix_size = static_cast(aligned_base - base); - CHECK(Free(base, prefix_size)); + Free(base, prefix_size); request_size -= prefix_size; } // Unmap memory allocated after the potentially unaligned end. if (size != request_size) { DCHECK_LT(size, request_size); size_t suffix_size = request_size - size; - CHECK(Free(aligned_base + size, suffix_size)); + Free(aligned_base + size, suffix_size); request_size -= suffix_size; } @@ -188,13 +188,13 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, } // static -bool OS::Free(void* address, const size_t size) { - return SbMemoryUnmap(address, size); +void OS::Free(void* address, const size_t size) { + CHECK(SbMemoryUnmap(address, size)); } // static -bool OS::Release(void* address, size_t size) { - return SbMemoryUnmap(address, size); +void OS::Release(void* address, size_t size) { + CHECK(SbMemoryUnmap(address, size)); } // static diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index d00c4f5ebb977a..b696669142aaf3 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -722,9 +722,17 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { g_hard_abort = hard_abort; } -typedef PVOID (*VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, - MEM_EXTENDED_PARAMETER*, ULONG); -VirtualAlloc2_t VirtualAlloc2; +typedef PVOID(__stdcall* VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, + MEM_EXTENDED_PARAMETER*, ULONG); +VirtualAlloc2_t VirtualAlloc2 = nullptr; + +typedef PVOID(__stdcall* MapViewOfFile3_t)(HANDLE, HANDLE, PVOID, ULONG64, + SIZE_T, ULONG, ULONG, + MEM_EXTENDED_PARAMETER*, ULONG); +MapViewOfFile3_t MapViewOfFile3 = nullptr; + +typedef PVOID(__stdcall* UnmapViewOfFile2_t)(HANDLE, PVOID, ULONG); +UnmapViewOfFile2_t UnmapViewOfFile2 = nullptr; void OS::EnsureWin32MemoryAPILoaded() { static bool loaded = false; @@ -732,6 +740,12 @@ void OS::EnsureWin32MemoryAPILoaded() { VirtualAlloc2 = (VirtualAlloc2_t)GetProcAddress( GetModuleHandle(L"kernelbase.dll"), "VirtualAlloc2"); + MapViewOfFile3 = (MapViewOfFile3_t)GetProcAddress( + GetModuleHandle(L"kernelbase.dll"), "MapViewOfFile3"); + + UnmapViewOfFile2 = (UnmapViewOfFile2_t)GetProcAddress( + GetModuleHandle(L"kernelbase.dll"), "UnmapViewOfFile2"); + loaded = true; } } @@ -815,43 +829,47 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { UNREACHABLE(); } -void* VirtualAllocWrapper(void* hint, size_t size, DWORD flags, DWORD protect) { +// Desired access parameter for MapViewOfFile +DWORD GetFileViewAccessFromMemoryPermission(OS::MemoryPermission access) { + switch (access) { + case OS::MemoryPermission::kNoAccess: + case OS::MemoryPermission::kNoAccessWillJitLater: + case OS::MemoryPermission::kRead: + return FILE_MAP_READ; + case OS::MemoryPermission::kReadWrite: + return FILE_MAP_READ | FILE_MAP_WRITE; + default: + // Execute access is not supported + break; + } + UNREACHABLE(); +} + +void* VirtualAllocWrapper(void* address, size_t size, DWORD flags, + DWORD protect) { if (VirtualAlloc2) { - return VirtualAlloc2(nullptr, hint, size, flags, protect, NULL, 0); + return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0); } else { - return VirtualAlloc(hint, size, flags, protect); + return VirtualAlloc(address, size, flags, protect); } } -uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect, - void* hint) { - LPVOID base = nullptr; - static BOOL use_aslr = -1; -#ifdef V8_HOST_ARCH_32_BIT - // Don't bother randomizing on 32-bit hosts, because they lack the room and - // don't have viable ASLR anyway. - if (use_aslr == -1 && !IsWow64Process(GetCurrentProcess(), &use_aslr)) - use_aslr = FALSE; -#else - use_aslr = TRUE; -#endif - - if (use_aslr && protect != PAGE_READWRITE) { - // For executable or reserved pages try to randomize the allocation address. - base = VirtualAllocWrapper(hint, size, flags, protect); - } +uint8_t* VirtualAllocWithHint(size_t size, DWORD flags, DWORD protect, + void* hint) { + LPVOID base = VirtualAllocWrapper(hint, size, flags, protect); // On failure, let the OS find an address to use. - if (base == nullptr) { + if (hint && base == nullptr) { base = VirtualAllocWrapper(nullptr, size, flags, protect); } + return reinterpret_cast(base); } void* AllocateInternal(void* hint, size_t size, size_t alignment, size_t page_size, DWORD flags, DWORD protect) { // First, try an exact size aligned allocation. - uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint); + uint8_t* base = VirtualAllocWithHint(size, flags, protect, hint); if (base == nullptr) return nullptr; // Can't allocate, we're OOM. // If address is suitably aligned, we're done. @@ -871,7 +889,7 @@ void* AllocateInternal(void* hint, size_t size, size_t alignment, const int kMaxAttempts = 3; aligned_base = nullptr; for (int i = 0; i < kMaxAttempts; ++i) { - base = RandomizedVirtualAlloc(padded_size, flags, protect, hint); + base = VirtualAllocWithHint(padded_size, flags, protect, hint); if (base == nullptr) return nullptr; // Can't allocate, we're OOM. // Try to trim the allocation by freeing the padded allocation and then @@ -909,18 +927,46 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, } // static -bool OS::Free(void* address, const size_t size) { +void OS::Free(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % AllocatePageSize()); DCHECK_EQ(0, size % AllocatePageSize()); USE(size); - return VirtualFree(address, 0, MEM_RELEASE) != 0; + CHECK_NE(0, VirtualFree(address, 0, MEM_RELEASE)); +} + +// static +void* OS::AllocateShared(void* hint, size_t size, MemoryPermission permission, + PlatformSharedMemoryHandle handle, uint64_t offset) { + DCHECK_EQ(0, reinterpret_cast(hint) % AllocatePageSize()); + DCHECK_EQ(0, size % AllocatePageSize()); + DCHECK_EQ(0, offset % AllocatePageSize()); + + DWORD off_hi = static_cast(offset >> 32); + DWORD off_lo = static_cast(offset); + DWORD access = GetFileViewAccessFromMemoryPermission(permission); + + HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle); + void* result = + MapViewOfFileEx(file_mapping, access, off_hi, off_lo, size, hint); + + if (!result) { + // Retry without hint. + result = MapViewOfFile(file_mapping, access, off_hi, off_lo, size); + } + + return result; +} + +// static +void OS::FreeShared(void* address, size_t size) { + CHECK(UnmapViewOfFile(address)); } // static -bool OS::Release(void* address, size_t size) { +void OS::Release(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); - return VirtualFree(address, size, MEM_DECOMMIT) != 0; + CHECK_NE(0, VirtualFree(address, size, MEM_DECOMMIT)); } // static @@ -977,7 +1023,10 @@ bool OS::DecommitPages(void* address, size_t size) { } // static -bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr; } +bool OS::CanReserveAddressSpace() { + return VirtualAlloc2 != nullptr && MapViewOfFile3 != nullptr && + UnmapViewOfFile2 != nullptr; +} // static Optional OS::CreateAddressSpaceReservation( @@ -1001,8 +1050,23 @@ Optional OS::CreateAddressSpaceReservation( } // static -bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { - return OS::Free(reservation.base(), reservation.size()); +void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { + OS::Free(reservation.base(), reservation.size()); +} + +// static +PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) { + HANDLE handle = CreateFileMapping(INVALID_HANDLE_VALUE, nullptr, + PAGE_READWRITE, 0, size, nullptr); + if (!handle) return kInvalidSharedMemoryHandle; + return SharedMemoryHandleFromFileMapping(handle); +} + +// static +void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) { + DCHECK_NE(kInvalidSharedMemoryHandle, handle); + HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle); + CHECK(CloseHandle(file_mapping)); } // static @@ -1159,7 +1223,7 @@ bool AddressSpaceReservation::Allocate(void* address, size_t size, ? MEM_RESERVE | MEM_REPLACE_PLACEHOLDER : MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER; DWORD protect = GetProtectionFromMemoryPermission(access); - return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0); + return VirtualAlloc2(nullptr, address, size, flags, protect, nullptr, 0); } bool AddressSpaceReservation::Free(void* address, size_t size) { @@ -1167,6 +1231,26 @@ bool AddressSpaceReservation::Free(void* address, size_t size) { return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER); } +bool AddressSpaceReservation::AllocateShared(void* address, size_t size, + OS::MemoryPermission access, + PlatformSharedMemoryHandle handle, + uint64_t offset) { + DCHECK(Contains(address, size)); + CHECK(MapViewOfFile3); + + DWORD protect = GetProtectionFromMemoryPermission(access); + HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle); + return MapViewOfFile3(file_mapping, nullptr, address, offset, size, + MEM_REPLACE_PLACEHOLDER, protect, nullptr, 0); +} + +bool AddressSpaceReservation::FreeShared(void* address, size_t size) { + DCHECK(Contains(address, size)); + CHECK(UnmapViewOfFile2); + + return UnmapViewOfFile2(nullptr, address, MEM_PRESERVE_PLACEHOLDER); +} + bool AddressSpaceReservation::SetPermissions(void* address, size_t size, OS::MemoryPermission access) { DCHECK(Contains(address, size)); diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 53a7267889cd3d..0a359ad211f59b 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -26,6 +26,7 @@ #include #include +#include "include/v8-platform.h" #include "src/base/base-export.h" #include "src/base/build_config.h" #include "src/base/compiler-specific.h" @@ -84,7 +85,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { __readfsdword(kTibInlineTlsOffset + kSystemPointerSize * index)); } intptr_t extra = static_cast(__readfsdword(kTibExtraTlsOffset)); - DCHECK_NE(extra, 0); + if (!extra) return 0; return *reinterpret_cast(extra + kSystemPointerSize * (index - kMaxInlineSlots)); } @@ -144,10 +145,10 @@ class V8_BASE_EXPORT OS { // On Windows, ensure the newer memory API is loaded if available. This // includes function like VirtualAlloc2 and MapViewOfFile3. // TODO(chromium:1218005) this should probably happen as part of Initialize, - // but that is currently invoked too late, after the virtual memory cage - // is initialized. However, eventually the virtual memory cage initialization - // will happen as part of V8::Initialize, at which point this function can - // probably be merged into OS::Initialize. + // but that is currently invoked too late, after the sandbox is initialized. + // However, eventually the sandbox initialization will probably happen as + // part of V8::Initialize, at which point this function can probably be + // merged into OS::Initialize. static void EnsureWin32MemoryAPILoaded(); #endif @@ -196,12 +197,11 @@ class V8_BASE_EXPORT OS { static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args); // Memory permissions. These should be kept in sync with the ones in - // v8::PageAllocator. + // v8::PageAllocator and v8::PagePermissions. enum class MemoryPermission { kNoAccess, kRead, kReadWrite, - // TODO(hpayer): Remove this flag. Memory should never be rwx. kReadWriteExecute, kReadExecute, // TODO(jkummerow): Remove this when Wasm has a platform-independent @@ -209,6 +209,11 @@ class V8_BASE_EXPORT OS { kNoAccessWillJitLater }; + // Helpers to create shared memory objects. Currently only used for testing. + static PlatformSharedMemoryHandle CreateSharedMemoryHandleForTesting( + size_t size); + static void DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle); + static bool HasLazyCommits(); // Sleep for a specified time interval. @@ -336,9 +341,15 @@ class V8_BASE_EXPORT OS { void* new_address, size_t size); - V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size); + static void Free(void* address, size_t size); + + V8_WARN_UNUSED_RESULT static void* AllocateShared( + void* address, size_t size, OS::MemoryPermission access, + PlatformSharedMemoryHandle handle, uint64_t offset); - V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size); + static void FreeShared(void* address, size_t size); + + static void Release(void* address, size_t size); V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size, MemoryPermission access); @@ -354,8 +365,7 @@ class V8_BASE_EXPORT OS { CreateAddressSpaceReservation(void* hint, size_t size, size_t alignment, MemoryPermission max_permission); - V8_WARN_UNUSED_RESULT static bool FreeAddressSpaceReservation( - AddressSpaceReservation reservation); + static void FreeAddressSpaceReservation(AddressSpaceReservation reservation); static const int msPerSecond = 1000; @@ -383,6 +393,10 @@ inline void EnsureConsoleOutput() { // // This class provides the same memory management functions as OS but operates // inside a previously reserved contiguous region of virtual address space. +// +// Reserved address space in which no pages have been allocated is guaranteed +// to be inaccessible and cause a fault on access. As such, creating guard +// regions requires no further action. class V8_BASE_EXPORT AddressSpaceReservation { public: using Address = uintptr_t; @@ -402,6 +416,13 @@ class V8_BASE_EXPORT AddressSpaceReservation { V8_WARN_UNUSED_RESULT bool Free(void* address, size_t size); + V8_WARN_UNUSED_RESULT bool AllocateShared(void* address, size_t size, + OS::MemoryPermission access, + PlatformSharedMemoryHandle handle, + uint64_t offset); + + V8_WARN_UNUSED_RESULT bool FreeShared(void* address, size_t size); + V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size, OS::MemoryPermission access); diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc index 2fc748da87b6bf..3e9f6334d9c4c3 100644 --- a/deps/v8/src/base/platform/semaphore.cc +++ b/deps/v8/src/base/platform/semaphore.cc @@ -4,7 +4,7 @@ #include "src/base/platform/semaphore.h" -#if V8_OS_MACOSX +#if V8_OS_DARWIN #include #elif V8_OS_WIN #include @@ -19,7 +19,7 @@ namespace v8 { namespace base { -#if V8_OS_MACOSX +#if V8_OS_DARWIN Semaphore::Semaphore(int count) { native_handle_ = dispatch_semaphore_create(count); @@ -174,7 +174,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) { return native_handle_.TakeWait(microseconds); } -#endif // V8_OS_MACOSX +#endif // V8_OS_DARWIN } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h index ec107bd290ebde..2d5b50bca92d50 100644 --- a/deps/v8/src/base/platform/semaphore.h +++ b/deps/v8/src/base/platform/semaphore.h @@ -11,7 +11,7 @@ #include "src/base/win32-headers.h" #endif -#if V8_OS_MACOSX +#if V8_OS_DARWIN #include #elif V8_OS_POSIX #include @@ -55,7 +55,7 @@ class V8_BASE_EXPORT Semaphore final { // the semaphore counter is decremented and true is returned. bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT; -#if V8_OS_MACOSX +#if V8_OS_DARWIN using NativeHandle = dispatch_semaphore_t; #elif V8_OS_POSIX using NativeHandle = sem_t; diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index af214f0a6da308..5efa99879534db 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -9,7 +9,7 @@ #include #include #endif -#if V8_OS_MACOSX +#if V8_OS_DARWIN #include #include #include @@ -39,7 +39,7 @@ namespace { -#if V8_OS_MACOSX +#if V8_OS_DARWIN int64_t ComputeThreadTicks() { mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT; thread_basic_info_data_t thread_info_data; @@ -111,23 +111,37 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) { #endif } -V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) { - // Limit duration of timer resolution measurement to 100 ms. If we cannot - // measure timer resoltuion within this time, we assume a low resolution - // timer. - int64_t end = - ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond; - int64_t start, delta; - do { - start = ClockNow(clk_id); - // Loop until we can detect that the clock has changed. Non-HighRes timers - // will increment in chunks, i.e. 15ms. By spinning until we see a clock - // change, we detect the minimum time between measurements. - do { - delta = ClockNow(clk_id) - start; - } while (delta == 0); - } while (delta > 1 && start < end); - return delta <= 1; +V8_INLINE int64_t NanosecondsNow() { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return int64_t{ts.tv_sec} * v8::base::Time::kNanosecondsPerSecond + + ts.tv_nsec; +} + +inline bool IsHighResolutionTimer(clockid_t clk_id) { + // Currently this is only needed for CLOCK_MONOTONIC. If other clocks need + // to be checked, care must be taken to support all platforms correctly; + // see ClockNow() above for precedent. + DCHECK_EQ(clk_id, CLOCK_MONOTONIC); + int64_t previous = NanosecondsNow(); + // There should be enough attempts to make the loop run for more than one + // microsecond if the early return is not taken -- the elapsed time can't + // be measured in that situation, so we have to estimate it offline. + constexpr int kAttempts = 100; + for (int i = 0; i < kAttempts; i++) { + int64_t next = NanosecondsNow(); + int64_t delta = next - previous; + if (delta == 0) continue; + // We expect most systems to take this branch on the first iteration. + if (delta <= v8::base::Time::kNanosecondsPerMicrosecond) { + return true; + } + previous = next; + } + // As of 2022, we expect that the loop above has taken at least 2 μs (on + // a fast desktop). If we still haven't seen a non-zero clock increment + // in sub-microsecond range, assume a low resolution timer. + return false; } #elif V8_OS_WIN @@ -142,8 +156,7 @@ V8_INLINE uint64_t QPCNowRaw() { USE(result); return perf_counter_now.QuadPart; } -#endif // V8_OS_MACOSX - +#endif // V8_OS_DARWIN } // namespace @@ -231,8 +244,7 @@ int64_t TimeDelta::InNanoseconds() const { return delta_ * Time::kNanosecondsPerMicrosecond; } - -#if V8_OS_MACOSX +#if V8_OS_DARWIN TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) { DCHECK_GE(ts.tv_nsec, 0); @@ -252,8 +264,7 @@ struct mach_timespec TimeDelta::ToMachTimespec() const { return ts; } -#endif // V8_OS_MACOSX - +#endif // V8_OS_DARWIN #if V8_OS_POSIX @@ -463,16 +474,6 @@ Time Time::NowFromSystemTime() { return Now(); } #endif // V8_OS_STARBOARD -// static -TimeTicks TimeTicks::HighResolutionNow() { - // a DCHECK of TimeTicks::IsHighResolution() was removed from here - // as it turns out this path is used in the wild for logs and counters. - // - // TODO(hpayer) We may eventually want to split TimedHistograms based - // on low resolution clocks to avoid polluting metrics - return TimeTicks::Now(); -} - Time Time::FromJsTime(double ms_since_epoch) { // The epoch is a valid time, so this constructor doesn't interpret // 0 as the null time. @@ -709,7 +710,7 @@ bool TimeTicks::IsHighResolution() { TimeTicks TimeTicks::Now() { int64_t ticks; -#if V8_OS_MACOSX +#if V8_OS_DARWIN static struct mach_timebase_info info; if (info.denom == 0) { kern_return_t result = mach_timebase_info(&info); @@ -725,18 +726,18 @@ TimeTicks TimeTicks::Now() { #elif V8_OS_STARBOARD ticks = SbTimeGetMonotonicNow(); #else -#error platform does not implement TimeTicks::HighResolutionNow. -#endif // V8_OS_MACOSX +#error platform does not implement TimeTicks::Now. +#endif // V8_OS_DARWIN // Make sure we never return 0 here. return TimeTicks(ticks + 1); } // static bool TimeTicks::IsHighResolution() { -#if V8_OS_MACOSX +#if V8_OS_DARWIN return true; #elif V8_OS_POSIX - static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC); + static const bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC); return is_high_resolution; #else return true; @@ -759,7 +760,7 @@ bool ThreadTicks::IsSupported() { // Thread CPU time accounting is unavailable in PASE return false; #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \ - defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS) + defined(V8_OS_DARWIN) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS) return true; #elif defined(V8_OS_WIN) return IsSupportedWin(); @@ -780,7 +781,7 @@ ThreadTicks ThreadTicks::Now() { #else UNREACHABLE(); #endif -#elif V8_OS_MACOSX +#elif V8_OS_DARWIN return ThreadTicks(ComputeThreadTicks()); #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \ defined(V8_OS_ANDROID) diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h index 2fc7859dd74117..d4be4109f97b5b 100644 --- a/deps/v8/src/base/platform/time.h +++ b/deps/v8/src/base/platform/time.h @@ -433,11 +433,6 @@ class V8_BASE_EXPORT TimeTicks final // This method never returns a null TimeTicks. static TimeTicks Now(); - // This is equivalent to Now() but DCHECKs that IsHighResolution(). Useful for - // test frameworks that rely on high resolution clocks (in practice all - // platforms but low-end Windows devices have high resolution clocks). - static TimeTicks HighResolutionNow(); - // Returns true if the high-resolution clock is working on this system. static bool IsHighResolution(); diff --git a/deps/v8/src/utils/pointer-with-payload.h b/deps/v8/src/base/pointer-with-payload.h similarity index 68% rename from deps/v8/src/utils/pointer-with-payload.h rename to deps/v8/src/base/pointer-with-payload.h index 6200f410775d6e..94801a9af75bca 100644 --- a/deps/v8/src/utils/pointer-with-payload.h +++ b/deps/v8/src/base/pointer-with-payload.h @@ -2,21 +2,20 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_UTILS_POINTER_WITH_PAYLOAD_H_ -#define V8_UTILS_POINTER_WITH_PAYLOAD_H_ +#ifndef V8_BASE_POINTER_WITH_PAYLOAD_H_ +#define V8_BASE_POINTER_WITH_PAYLOAD_H_ #include #include -#include "include/v8config.h" #include "src/base/logging.h" namespace v8 { -namespace internal { +namespace base { template struct PointerWithPayloadTraits { - static constexpr int value = + static constexpr int kAvailableBits = alignof(PointerType) >= 8 ? 3 : alignof(PointerType) >= 4 ? 2 : 1; }; @@ -37,82 +36,83 @@ struct PointerWithPayloadTraits : public PointerWithPayloadTraits { // // Here we store a bool that needs 1 bit of storage state into the lower bits // of int *, which points to some int data; - template class PointerWithPayload { - // We have log2(ptr alignment) kAvailBits free to use - static constexpr int kAvailBits = PointerWithPayloadTraits< - typename std::remove_const::type>::value; - static_assert( - kAvailBits >= NumPayloadBits, - "Ptr does not have sufficient alignment for the selected amount of " - "storage bits."); - - static constexpr uintptr_t kPayloadMask = - (uintptr_t{1} << NumPayloadBits) - 1; - static constexpr uintptr_t kPointerMask = ~kPayloadMask; - public: PointerWithPayload() = default; explicit PointerWithPayload(PointerType* pointer) - : pointer_(reinterpret_cast(pointer)) { + : pointer_with_payload_(reinterpret_cast(pointer)) { DCHECK_EQ(GetPointer(), pointer); DCHECK_EQ(GetPayload(), static_cast(0)); } explicit PointerWithPayload(PayloadType payload) - : pointer_(static_cast(payload)) { + : pointer_with_payload_(static_cast(payload)) { DCHECK_EQ(GetPointer(), nullptr); DCHECK_EQ(GetPayload(), payload); } PointerWithPayload(PointerType* pointer, PayloadType payload) { - update(pointer, payload); + Update(pointer, payload); } V8_INLINE PointerType* GetPointer() const { - return reinterpret_cast(pointer_ & kPointerMask); + return reinterpret_cast(pointer_with_payload_ & kPointerMask); } // An optimized version of GetPointer for when we know the payload value. V8_INLINE PointerType* GetPointerWithKnownPayload(PayloadType payload) const { DCHECK_EQ(GetPayload(), payload); - return reinterpret_cast(pointer_ - + return reinterpret_cast(pointer_with_payload_ - static_cast(payload)); } V8_INLINE PointerType* operator->() const { return GetPointer(); } - V8_INLINE void update(PointerType* new_pointer, PayloadType new_payload) { - pointer_ = reinterpret_cast(new_pointer) | - static_cast(new_payload); + V8_INLINE void Update(PointerType* new_pointer, PayloadType new_payload) { + pointer_with_payload_ = reinterpret_cast(new_pointer) | + static_cast(new_payload); DCHECK_EQ(GetPayload(), new_payload); DCHECK_EQ(GetPointer(), new_pointer); } V8_INLINE void SetPointer(PointerType* newptr) { DCHECK_EQ(reinterpret_cast(newptr) & kPayloadMask, 0); - pointer_ = reinterpret_cast(newptr) | (pointer_ & kPayloadMask); + pointer_with_payload_ = reinterpret_cast(newptr) | + (pointer_with_payload_ & kPayloadMask); DCHECK_EQ(GetPointer(), newptr); } V8_INLINE PayloadType GetPayload() const { - return static_cast(pointer_ & kPayloadMask); + return static_cast(pointer_with_payload_ & kPayloadMask); } V8_INLINE void SetPayload(PayloadType new_payload) { uintptr_t new_payload_ptr = static_cast(new_payload); DCHECK_EQ(new_payload_ptr & kPayloadMask, new_payload_ptr); - pointer_ = (pointer_ & kPointerMask) | new_payload_ptr; + pointer_with_payload_ = + (pointer_with_payload_ & kPointerMask) | new_payload_ptr; DCHECK_EQ(GetPayload(), new_payload); } private: - uintptr_t pointer_ = 0; + static constexpr int kAvailableBits = PointerWithPayloadTraits< + typename std::remove_const::type>::kAvailableBits; + static_assert( + kAvailableBits >= NumPayloadBits, + "Ptr does not have sufficient alignment for the selected amount of " + "storage bits. Override PointerWithPayloadTraits to guarantee available " + "bits manually."); + + static constexpr uintptr_t kPayloadMask = + (uintptr_t{1} << NumPayloadBits) - 1; + static constexpr uintptr_t kPointerMask = ~kPayloadMask; + + uintptr_t pointer_with_payload_ = 0; }; -} // namespace internal +} // namespace base } // namespace v8 -#endif // V8_UTILS_POINTER_WITH_PAYLOAD_H_ +#endif // V8_BASE_POINTER_WITH_PAYLOAD_H_ diff --git a/deps/v8/src/base/safe_conversions_impl.h b/deps/v8/src/base/safe_conversions_impl.h index 5d9277df24972c..89a41740b0815f 100644 --- a/deps/v8/src/base/safe_conversions_impl.h +++ b/deps/v8/src/base/safe_conversions_impl.h @@ -12,6 +12,7 @@ #ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_ #define V8_BASE_SAFE_CONVERSIONS_IMPL_H_ +#include #include #include @@ -195,7 +196,7 @@ class RangeCheck { public: constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound) : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {} - constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {} + constexpr RangeCheck() : is_underflow_(false), is_overflow_(false) {} constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; } constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; } constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; } diff --git a/deps/v8/src/base/sanitizer/lsan-page-allocator.cc b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc index bb52eb368fd61e..c50bb4611b9b3f 100644 --- a/deps/v8/src/base/sanitizer/lsan-page-allocator.cc +++ b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc @@ -50,25 +50,21 @@ bool LsanPageAllocator::CanAllocateSharedPages() { } bool LsanPageAllocator::FreePages(void* address, size_t size) { - bool result = page_allocator_->FreePages(address, size); + CHECK(page_allocator_->FreePages(address, size)); #if defined(LEAK_SANITIZER) - if (result) { - __lsan_unregister_root_region(address, size); - } + __lsan_unregister_root_region(address, size); #endif - return result; + return true; } bool LsanPageAllocator::ReleasePages(void* address, size_t size, size_t new_size) { - bool result = page_allocator_->ReleasePages(address, size, new_size); + CHECK(page_allocator_->ReleasePages(address, size, new_size)); #if defined(LEAK_SANITIZER) - if (result) { - __lsan_unregister_root_region(address, size); - __lsan_register_root_region(address, new_size); - } + __lsan_unregister_root_region(address, size); + __lsan_register_root_region(address, new_size); #endif - return result; + return true; } } // namespace base diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc index 1877c44b7be866..cd8d0decae3354 100644 --- a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc +++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc @@ -17,7 +17,8 @@ namespace base { LsanVirtualAddressSpace::LsanVirtualAddressSpace( std::unique_ptr vas) : VirtualAddressSpace(vas->page_size(), vas->allocation_granularity(), - vas->base(), vas->size()), + vas->base(), vas->size(), + vas->max_page_permissions()), vas_(std::move(vas)) { DCHECK_NOT_NULL(vas_); } @@ -27,28 +28,45 @@ Address LsanVirtualAddressSpace::AllocatePages(Address hint, size_t size, PagePermissions permissions) { Address result = vas_->AllocatePages(hint, size, alignment, permissions); #if defined(LEAK_SANITIZER) - if (result != 0) { + if (result) { __lsan_register_root_region(reinterpret_cast(result), size); } #endif // defined(LEAK_SANITIZER) return result; } -bool LsanVirtualAddressSpace::FreePages(Address address, size_t size) { - bool result = vas_->FreePages(address, size); +void LsanVirtualAddressSpace::FreePages(Address address, size_t size) { + vas_->FreePages(address, size); +#if defined(LEAK_SANITIZER) + __lsan_unregister_root_region(reinterpret_cast(address), size); +#endif // defined(LEAK_SANITIZER) +} + +Address LsanVirtualAddressSpace::AllocateSharedPages( + Address hint, size_t size, PagePermissions permissions, + PlatformSharedMemoryHandle handle, uint64_t offset) { + Address result = + vas_->AllocateSharedPages(hint, size, permissions, handle, offset); #if defined(LEAK_SANITIZER) if (result) { - __lsan_unregister_root_region(reinterpret_cast(address), size); + __lsan_register_root_region(reinterpret_cast(result), size); } #endif // defined(LEAK_SANITIZER) return result; } +void LsanVirtualAddressSpace::FreeSharedPages(Address address, size_t size) { + vas_->FreeSharedPages(address, size); +#if defined(LEAK_SANITIZER) + __lsan_unregister_root_region(reinterpret_cast(address), size); +#endif // defined(LEAK_SANITIZER) +} + std::unique_ptr LsanVirtualAddressSpace::AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) { + PagePermissions max_page_permissions) { auto subspace = - vas_->AllocateSubspace(hint, size, alignment, max_permissions); + vas_->AllocateSubspace(hint, size, alignment, max_page_permissions); #if defined(LEAK_SANITIZER) if (subspace) { subspace = std::make_unique(std::move(subspace)); diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h index cc165617101292..00cd32a39f5f11 100644 --- a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h +++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h @@ -33,18 +33,33 @@ class V8_BASE_EXPORT LsanVirtualAddressSpace final Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions) override; - bool FreePages(Address address, size_t size) override; + void FreePages(Address address, size_t size) override; + + Address AllocateSharedPages(Address hint, size_t size, + PagePermissions permissions, + PlatformSharedMemoryHandle handle, + uint64_t offset) override; + + void FreeSharedPages(Address address, size_t size) override; bool SetPagePermissions(Address address, size_t size, PagePermissions permissions) override { return vas_->SetPagePermissions(address, size, permissions); } + bool AllocateGuardRegion(Address address, size_t size) override { + return vas_->AllocateGuardRegion(address, size); + } + + void FreeGuardRegion(Address address, size_t size) override { + vas_->FreeGuardRegion(address, size); + } + bool CanAllocateSubspaces() override { return vas_->CanAllocateSubspaces(); } std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) override; + PagePermissions max_page_permissions) override; bool DiscardSystemPages(Address address, size_t size) override { return vas_->DiscardSystemPages(address, size); diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc index 6f69e2aa9c5e4f..143aa4ae892743 100644 --- a/deps/v8/src/base/sys-info.cc +++ b/deps/v8/src/base/sys-info.cc @@ -63,7 +63,7 @@ int SysInfo::NumberOfProcessors() { // static int64_t SysInfo::AmountOfPhysicalMemory() { -#if V8_OS_MACOSX +#if V8_OS_DARWIN int mib[2] = {CTL_HW, HW_MEMSIZE}; int64_t memsize = 0; size_t len = sizeof(memsize); diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h index 91c726474e1920..807ff4f2a8e81d 100644 --- a/deps/v8/src/base/threaded-list.h +++ b/deps/v8/src/base/threaded-list.h @@ -160,6 +160,15 @@ class ThreadedListBase final : public BaseClass { return *this; } + bool is_null() { return entry_ == nullptr; } + + void InsertBefore(T* value) { + T* old_entry_value = *entry_; + *entry_ = value; + entry_ = TLTraits::next(value); + *entry_ = old_entry_value; + } + Iterator() : entry_(nullptr) {} private: @@ -178,6 +187,10 @@ class ThreadedListBase final : public BaseClass { using reference = const value_type; using pointer = const value_type*; + // Allow implicit conversion to const iterator. + // NOLINTNEXTLINE + ConstIterator(Iterator& iterator) : entry_(iterator.entry_) {} + public: ConstIterator& operator++() { entry_ = TLTraits::next(*entry_); diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc index f6f9dcfef2a43a..f6dc62893cf2f2 100644 --- a/deps/v8/src/base/utils/random-number-generator.cc +++ b/deps/v8/src/base/utils/random-number-generator.cc @@ -56,7 +56,7 @@ RandomNumberGenerator::RandomNumberGenerator() { DCHECK_EQ(0, result); USE(result); SetSeed((static_cast(first_half) << 32) + second_half); -#elif V8_OS_MACOSX || V8_OS_FREEBSD || V8_OS_OPENBSD +#elif V8_OS_DARWIN || V8_OS_FREEBSD || V8_OS_OPENBSD // Despite its prefix suggests it is not RC4 algorithm anymore. // It always succeeds while having decent performance and // no file descriptor involved. @@ -87,8 +87,7 @@ RandomNumberGenerator::RandomNumberGenerator() { // which provides reasonable entropy, see: // https://code.google.com/p/v8/issues/detail?id=2905 int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24; - seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16; - seed ^= TimeTicks::Now().ToInternalValue() << 8; + seed ^= TimeTicks::Now().ToInternalValue(); SetSeed(seed); #endif // V8_OS_CYGWIN || V8_OS_WIN } diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.cc b/deps/v8/src/base/virtual-address-space-page-allocator.cc index 297b9adbf95105..f88afdcc192b34 100644 --- a/deps/v8/src/base/virtual-address-space-page-allocator.cc +++ b/deps/v8/src/base/virtual-address-space-page-allocator.cc @@ -28,7 +28,8 @@ bool VirtualAddressSpacePageAllocator::FreePages(void* ptr, size_t size) { size = result->second; resized_allocations_.erase(result); } - return vas_->FreePages(address, size); + vas_->FreePages(address, size); + return true; } bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size, @@ -46,7 +47,8 @@ bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size, // Will fail if the allocation was resized previously, which is desired. Address address = reinterpret_cast
(ptr); resized_allocations_.insert({address, size}); - return vas_->DecommitPages(address + new_size, size - new_size); + CHECK(vas_->DecommitPages(address + new_size, size - new_size)); + return true; } bool VirtualAddressSpacePageAllocator::SetPermissions( diff --git a/deps/v8/src/base/virtual-address-space.cc b/deps/v8/src/base/virtual-address-space.cc index 9907facb57e4c4..6ef95f5ca88006 100644 --- a/deps/v8/src/base/virtual-address-space.cc +++ b/deps/v8/src/base/virtual-address-space.cc @@ -26,10 +26,34 @@ STATIC_ASSERT_ENUM(PagePermissions::kReadExecute, #undef STATIC_ASSERT_ENUM +namespace { +uint8_t PagePermissionsToBitset(PagePermissions permissions) { + switch (permissions) { + case PagePermissions::kNoAccess: + return 0b000; + case PagePermissions::kRead: + return 0b100; + case PagePermissions::kReadWrite: + return 0b110; + case PagePermissions::kReadWriteExecute: + return 0b111; + case PagePermissions::kReadExecute: + return 0b101; + } +} +} // namespace + +bool IsSubset(PagePermissions lhs, PagePermissions rhs) { + uint8_t lhs_bits = PagePermissionsToBitset(lhs); + uint8_t rhs_bits = PagePermissionsToBitset(rhs); + return (lhs_bits & rhs_bits) == lhs_bits; +} + VirtualAddressSpace::VirtualAddressSpace() : VirtualAddressSpaceBase(OS::CommitPageSize(), OS::AllocatePageSize(), kNullAddress, - std::numeric_limits::max()) { + std::numeric_limits::max(), + PagePermissions::kReadWriteExecute) { #if V8_OS_WIN // On Windows, this additional step is required to lookup the VirtualAlloc2 // and friends functions. @@ -61,11 +85,11 @@ Address VirtualAddressSpace::AllocatePages(Address hint, size_t size, static_cast(permissions))); } -bool VirtualAddressSpace::FreePages(Address address, size_t size) { +void VirtualAddressSpace::FreePages(Address address, size_t size) { DCHECK(IsAligned(address, allocation_granularity())); DCHECK(IsAligned(size, allocation_granularity())); - return OS::Free(reinterpret_cast(address), size); + OS::Free(reinterpret_cast(address), size); } bool VirtualAddressSpace::SetPagePermissions(Address address, size_t size, @@ -77,13 +101,52 @@ bool VirtualAddressSpace::SetPagePermissions(Address address, size_t size, static_cast(permissions)); } +bool VirtualAddressSpace::AllocateGuardRegion(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + void* hint = reinterpret_cast(address); + void* result = OS::Allocate(hint, size, allocation_granularity(), + OS::MemoryPermission::kNoAccess); + if (result && result != hint) { + OS::Free(result, size); + } + return result == hint; +} + +void VirtualAddressSpace::FreeGuardRegion(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + OS::Free(reinterpret_cast(address), size); +} + bool VirtualAddressSpace::CanAllocateSubspaces() { return OS::CanReserveAddressSpace(); } +Address VirtualAddressSpace::AllocateSharedPages( + Address hint, size_t size, PagePermissions permissions, + PlatformSharedMemoryHandle handle, uint64_t offset) { + DCHECK(IsAligned(hint, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + DCHECK(IsAligned(offset, allocation_granularity())); + + return reinterpret_cast
(OS::AllocateShared( + reinterpret_cast(hint), size, + static_cast(permissions), handle, offset)); +} + +void VirtualAddressSpace::FreeSharedPages(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + OS::FreeShared(reinterpret_cast(address), size); +} + std::unique_ptr VirtualAddressSpace::AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) { + PagePermissions max_page_permissions) { DCHECK(IsAligned(alignment, allocation_granularity())); DCHECK(IsAligned(hint, alignment)); DCHECK(IsAligned(size, allocation_granularity())); @@ -91,11 +154,11 @@ std::unique_ptr VirtualAddressSpace::AllocateSubspace( base::Optional reservation = OS::CreateAddressSpaceReservation( reinterpret_cast(hint), size, alignment, - static_cast(max_permissions)); + static_cast(max_page_permissions)); if (!reservation.has_value()) return std::unique_ptr(); return std::unique_ptr( - new VirtualAddressSubspace(*reservation, this)); + new VirtualAddressSubspace(*reservation, this, max_page_permissions)); } bool VirtualAddressSpace::DiscardSystemPages(Address address, size_t size) { @@ -112,15 +175,17 @@ bool VirtualAddressSpace::DecommitPages(Address address, size_t size) { return OS::DecommitPages(reinterpret_cast(address), size); } -bool VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) { - return OS::FreeAddressSpaceReservation(subspace->reservation_); +void VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) { + OS::FreeAddressSpaceReservation(subspace->reservation_); } VirtualAddressSubspace::VirtualAddressSubspace( - AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space) - : VirtualAddressSpaceBase( - parent_space->page_size(), parent_space->allocation_granularity(), - reinterpret_cast
(reservation.base()), reservation.size()), + AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space, + PagePermissions max_page_permissions) + : VirtualAddressSpaceBase(parent_space->page_size(), + parent_space->allocation_granularity(), + reinterpret_cast
(reservation.base()), + reservation.size(), max_page_permissions), reservation_(reservation), region_allocator_(reinterpret_cast
(reservation.base()), reservation.size(), @@ -141,7 +206,7 @@ VirtualAddressSubspace::VirtualAddressSubspace( } VirtualAddressSubspace::~VirtualAddressSubspace() { - CHECK(parent_space_->FreeSubspace(this)); + parent_space_->FreeSubspace(this); } void VirtualAddressSubspace::SetRandomSeed(int64_t seed) { @@ -153,7 +218,7 @@ Address VirtualAddressSubspace::RandomPageAddress() { MutexGuard guard(&mutex_); // Note: the random numbers generated here aren't uniformly distributed if the // size isn't a power of two. - Address addr = base() + (rng_.NextInt64() % size()); + Address addr = base() + (static_cast(rng_.NextInt64()) % size()); return RoundDown(addr, allocation_granularity()); } @@ -163,6 +228,7 @@ Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size, DCHECK(IsAligned(alignment, allocation_granularity())); DCHECK(IsAligned(hint, alignment)); DCHECK(IsAligned(size, allocation_granularity())); + DCHECK(IsSubset(permissions, max_page_permissions())); MutexGuard guard(&mutex_); @@ -179,38 +245,91 @@ Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size, return address; } -bool VirtualAddressSubspace::FreePages(Address address, size_t size) { +void VirtualAddressSubspace::FreePages(Address address, size_t size) { DCHECK(IsAligned(address, allocation_granularity())); DCHECK(IsAligned(size, allocation_granularity())); MutexGuard guard(&mutex_); - if (region_allocator_.CheckRegion(address) != size) return false; - // The order here is important: on Windows, the allocation first has to be // freed to a placeholder before the placeholder can be merged (during the // merge_callback) with any surrounding placeholder mappings. CHECK(reservation_.Free(reinterpret_cast(address), size)); CHECK_EQ(size, region_allocator_.FreeRegion(address)); - return true; } bool VirtualAddressSubspace::SetPagePermissions(Address address, size_t size, PagePermissions permissions) { DCHECK(IsAligned(address, page_size())); DCHECK(IsAligned(size, page_size())); + DCHECK(IsSubset(permissions, max_page_permissions())); return reservation_.SetPermissions( reinterpret_cast(address), size, static_cast(permissions)); } +bool VirtualAddressSubspace::AllocateGuardRegion(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + MutexGuard guard(&mutex_); + + // It is guaranteed that reserved address space is inaccessible, so we just + // need to mark the region as in-use in the region allocator. + return region_allocator_.AllocateRegionAt(address, size); +} + +void VirtualAddressSubspace::FreeGuardRegion(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + MutexGuard guard(&mutex_); + CHECK_EQ(size, region_allocator_.FreeRegion(address)); +} + +Address VirtualAddressSubspace::AllocateSharedPages( + Address hint, size_t size, PagePermissions permissions, + PlatformSharedMemoryHandle handle, uint64_t offset) { + DCHECK(IsAligned(hint, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + DCHECK(IsAligned(offset, allocation_granularity())); + + MutexGuard guard(&mutex_); + + Address address = + region_allocator_.AllocateRegion(hint, size, allocation_granularity()); + if (address == RegionAllocator::kAllocationFailure) return kNullAddress; + + if (!reservation_.AllocateShared( + reinterpret_cast(address), size, + static_cast(permissions), handle, offset)) { + CHECK_EQ(size, region_allocator_.FreeRegion(address)); + return kNullAddress; + } + + return address; +} + +void VirtualAddressSubspace::FreeSharedPages(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + MutexGuard guard(&mutex_); + // The order here is important: on Windows, the allocation first has to be + // freed to a placeholder before the placeholder can be merged (during the + // merge_callback) with any surrounding placeholder mappings. + CHECK(reservation_.FreeShared(reinterpret_cast(address), size)); + CHECK_EQ(size, region_allocator_.FreeRegion(address)); +} + std::unique_ptr VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) { + PagePermissions max_page_permissions) { DCHECK(IsAligned(alignment, allocation_granularity())); DCHECK(IsAligned(hint, alignment)); DCHECK(IsAligned(size, allocation_granularity())); + DCHECK(IsSubset(max_page_permissions, this->max_page_permissions())); MutexGuard guard(&mutex_); @@ -222,13 +341,13 @@ VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size, base::Optional reservation = reservation_.CreateSubReservation( reinterpret_cast(address), size, - static_cast(max_permissions)); + static_cast(max_page_permissions)); if (!reservation.has_value()) { CHECK_EQ(size, region_allocator_.FreeRegion(address)); return nullptr; } return std::unique_ptr( - new VirtualAddressSubspace(*reservation, this)); + new VirtualAddressSubspace(*reservation, this, max_page_permissions)); } bool VirtualAddressSubspace::DiscardSystemPages(Address address, size_t size) { @@ -246,16 +365,13 @@ bool VirtualAddressSubspace::DecommitPages(Address address, size_t size) { return reservation_.DecommitPages(reinterpret_cast(address), size); } -bool VirtualAddressSubspace::FreeSubspace(VirtualAddressSubspace* subspace) { +void VirtualAddressSubspace::FreeSubspace(VirtualAddressSubspace* subspace) { MutexGuard guard(&mutex_); AddressSpaceReservation reservation = subspace->reservation_; Address base = reinterpret_cast
(reservation.base()); - if (region_allocator_.FreeRegion(base) != reservation.size()) { - return false; - } - - return reservation_.FreeSubReservation(reservation); + CHECK_EQ(reservation.size(), region_allocator_.FreeRegion(base)); + CHECK(reservation_.FreeSubReservation(reservation)); } } // namespace base diff --git a/deps/v8/src/base/virtual-address-space.h b/deps/v8/src/base/virtual-address-space.h index 5cfe462079987f..36813677778984 100644 --- a/deps/v8/src/base/virtual-address-space.h +++ b/deps/v8/src/base/virtual-address-space.h @@ -32,9 +32,15 @@ class VirtualAddressSpaceBase // Called by a subspace during destruction. Responsible for freeing the // address space reservation and any other data associated with the subspace // in the parent space. - virtual bool FreeSubspace(VirtualAddressSubspace* subspace) = 0; + virtual void FreeSubspace(VirtualAddressSubspace* subspace) = 0; }; +/* + * Helper routine to determine whether one set of page permissions (the lhs) is + * a subset of another one (the rhs). + */ +V8_BASE_EXPORT bool IsSubset(PagePermissions lhs, PagePermissions rhs); + /* * The virtual address space of the current process. Conceptionally, there * should only be one such "root" instance. However, in practice there is no @@ -53,23 +59,34 @@ class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase { Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions access) override; - bool FreePages(Address address, size_t size) override; + void FreePages(Address address, size_t size) override; bool SetPagePermissions(Address address, size_t size, PagePermissions access) override; + bool AllocateGuardRegion(Address address, size_t size) override; + + void FreeGuardRegion(Address address, size_t size) override; + + Address AllocateSharedPages(Address hint, size_t size, + PagePermissions permissions, + PlatformSharedMemoryHandle handle, + uint64_t offset) override; + + void FreeSharedPages(Address address, size_t size) override; + bool CanAllocateSubspaces() override; std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) override; + PagePermissions max_page_permissions) override; bool DiscardSystemPages(Address address, size_t size) override; bool DecommitPages(Address address, size_t size) override; private: - bool FreeSubspace(VirtualAddressSubspace* subspace) override; + void FreeSubspace(VirtualAddressSubspace* subspace) override; }; /* @@ -87,16 +104,27 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase { Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions) override; - bool FreePages(Address address, size_t size) override; + void FreePages(Address address, size_t size) override; bool SetPagePermissions(Address address, size_t size, PagePermissions permissions) override; + bool AllocateGuardRegion(Address address, size_t size) override; + + void FreeGuardRegion(Address address, size_t size) override; + + Address AllocateSharedPages(Address hint, size_t size, + PagePermissions permissions, + PlatformSharedMemoryHandle handle, + uint64_t offset) override; + + void FreeSharedPages(Address address, size_t size) override; + bool CanAllocateSubspaces() override { return true; } std::unique_ptr AllocateSubspace( Address hint, size_t size, size_t alignment, - PagePermissions max_permissions) override; + PagePermissions max_page_permissions) override; bool DiscardSystemPages(Address address, size_t size) override; @@ -107,10 +135,11 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase { // allocating sub spaces. friend class v8::base::VirtualAddressSpace; - bool FreeSubspace(VirtualAddressSubspace* subspace) override; + void FreeSubspace(VirtualAddressSubspace* subspace) override; VirtualAddressSubspace(AddressSpaceReservation reservation, - VirtualAddressSpaceBase* parent_space); + VirtualAddressSpaceBase* parent_space, + PagePermissions max_page_permissions); // The address space reservation backing this subspace. AddressSpaceReservation reservation_; diff --git a/deps/v8/src/base/vlq-base64.h b/deps/v8/src/base/vlq-base64.h index 5d8633798bcf30..1a06750d0867b4 100644 --- a/deps/v8/src/base/vlq-base64.h +++ b/deps/v8/src/base/vlq-base64.h @@ -5,7 +5,8 @@ #ifndef V8_BASE_VLQ_BASE64_H_ #define V8_BASE_VLQ_BASE64_H_ -#include +#include +#include #include "src/base/base-export.h" diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h index db3c05ce18ae55..f77b85e2ef20c6 100644 --- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h +++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h @@ -83,6 +83,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ add(rscratch, fp, + Operand(interpreter_register.ToOperand() * kSystemPointerSize)); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -474,7 +479,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ LoadFunction(kJSFunctionRegister); __ Push(kJSFunctionRegister); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ Pop(kInterpreterAccumulatorRegister, params_size); __ masm()->SmiUntag(params_size); @@ -502,9 +507,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + TurboAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h index 7824f92c2aa7ef..b08ac0d7ac839c 100644 --- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -79,6 +79,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ Add(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -557,7 +562,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ LoadFunction(kJSFunctionRegister); __ masm()->PushArgument(kJSFunctionRegister); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ masm()->Pop(kInterpreterAccumulatorRegister, params_size); __ masm()->SmiUntag(params_size); @@ -583,10 +588,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, - kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h index b8c876a8d373f6..e0fe720bc4d820 100644 --- a/deps/v8/src/baseline/baseline-assembler.h +++ b/deps/v8/src/baseline/baseline-assembler.h @@ -26,6 +26,8 @@ class BaselineAssembler { explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {} inline static MemOperand RegisterFrameOperand( interpreter::Register interpreter_register); + inline void RegisterFrameAddress(interpreter::Register interpreter_register, + Register rscratch); inline MemOperand ContextOperand(); inline MemOperand FunctionOperand(); inline MemOperand FeedbackVectorOperand(); diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc index fe0e9d84cc007f..e0f528bcbe2127 100644 --- a/deps/v8/src/baseline/baseline-batch-compiler.cc +++ b/deps/v8/src/baseline/baseline-batch-compiler.cc @@ -27,6 +27,12 @@ namespace v8 { namespace internal { namespace baseline { +static bool CanCompileWithConcurrentBaseline(SharedFunctionInfo shared, + Isolate* isolate) { + return !shared.is_compiled() || shared.HasBaselineCode() || + !CanCompileWithBaseline(isolate, shared); +} + class BaselineCompilerTask { public: BaselineCompilerTask(Isolate* isolate, PersistentHandles* handles, @@ -60,8 +66,7 @@ class BaselineCompilerTask { } // Don't install the code if the bytecode has been flushed or has // already some baseline code installed. - if (!shared_function_info_->is_compiled() || - shared_function_info_->HasBaselineCode()) { + if (!CanCompileWithConcurrentBaseline(*shared_function_info_, isolate)) { return; } shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore); @@ -90,8 +95,7 @@ class BaselineCompilerTask { class BaselineBatchCompilerJob { public: BaselineBatchCompilerJob(Isolate* isolate, Handle task_queue, - int batch_size) - : isolate_for_local_isolate_(isolate) { + int batch_size) { handles_ = isolate->NewPersistentHandles(); tasks_.reserve(batch_size); for (int i = 0; i < batch_size; i++) { @@ -103,7 +107,7 @@ class BaselineBatchCompilerJob { if (!maybe_sfi.GetHeapObjectIfWeak(&obj)) continue; // Skip functions where the bytecode has been flushed. SharedFunctionInfo shared = SharedFunctionInfo::cast(obj); - if (ShouldSkipFunction(shared)) continue; + if (CanCompileWithConcurrentBaseline(shared, isolate)) continue; tasks_.emplace_back(isolate, handles_.get(), shared); } if (FLAG_trace_baseline_concurrent_compilation) { @@ -113,34 +117,14 @@ class BaselineBatchCompilerJob { } } - bool ShouldSkipFunction(SharedFunctionInfo shared) { - return !shared.is_compiled() || shared.HasBaselineCode() || - !CanCompileWithBaseline(isolate_for_local_isolate_, shared); - } - // Executed in the background thread. - void Compile() { -#ifdef V8_RUNTIME_CALL_STATS - WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope( - isolate_for_local_isolate_->counters() - ->worker_thread_runtime_call_stats()); - LocalIsolate local_isolate(isolate_for_local_isolate_, - ThreadKind::kBackground, - runtime_call_stats_scope.Get()); -#else - LocalIsolate local_isolate(isolate_for_local_isolate_, - ThreadKind::kBackground); -#endif - local_isolate.heap()->AttachPersistentHandles(std::move(handles_)); - UnparkedScope unparked_scope(&local_isolate); - LocalHandleScope handle_scope(&local_isolate); - + void Compile(LocalIsolate* local_isolate) { + local_isolate->heap()->AttachPersistentHandles(std::move(handles_)); for (auto& task : tasks_) { - task.Compile(&local_isolate); + task.Compile(local_isolate); } - // Get the handle back since we'd need them to install the code later. - handles_ = local_isolate.heap()->DetachPersistentHandles(); + handles_ = local_isolate->heap()->DetachPersistentHandles(); } // Executed in the main thread. @@ -151,7 +135,6 @@ class BaselineBatchCompilerJob { } private: - Isolate* isolate_for_local_isolate_; std::vector tasks_; std::unique_ptr handles_; }; @@ -169,14 +152,19 @@ class ConcurrentBaselineCompiler { outgoing_queue_(outcoming_queue) {} void Run(JobDelegate* delegate) override { + LocalIsolate local_isolate(isolate_, ThreadKind::kBackground); + UnparkedScope unparked_scope(&local_isolate); + LocalHandleScope handle_scope(&local_isolate); + + // Since we're going to compile an entire batch, this guarantees that + // we only switch back the memory chunks to RX at the end. + CodePageCollectionMemoryModificationScope batch_alloc(isolate_->heap()); + while (!incoming_queue_->IsEmpty() && !delegate->ShouldYield()) { - // Since we're going to compile an entire batch, this guarantees that - // we only switch back the memory chunks to RX at the end. - CodePageCollectionMemoryModificationScope batch_alloc(isolate_->heap()); std::unique_ptr job; if (!incoming_queue_->Dequeue(&job)) break; DCHECK_NOT_NULL(job); - job->Compile(); + job->Compile(&local_isolate); outgoing_queue_->Enqueue(std::move(job)); } isolate_->stack_guard()->RequestInstallBaselineCode(); diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 3ef0c68727b258..e0575350200969 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -586,7 +586,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel( if (weight < 0) { SaveAccumulatorScope accumulator_scope(&basm_); - CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode, + CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck, __ FunctionOperand()); } } @@ -841,13 +841,13 @@ void BaselineCompiler::VisitMov() { StoreRegister(1, scratch); } -void BaselineCompiler::VisitLdaNamedProperty() { +void BaselineCompiler::VisitGetNamedProperty() { CallBuiltin(RegisterOperand(0), // object Constant(1), // name IndexAsTagged(2)); // slot } -void BaselineCompiler::VisitLdaNamedPropertyFromSuper() { +void BaselineCompiler::VisitGetNamedPropertyFromSuper() { __ LoadPrototype( LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(), kInterpreterAccumulatorRegister); @@ -860,7 +860,7 @@ void BaselineCompiler::VisitLdaNamedPropertyFromSuper() { IndexAsTagged(2)); // slot } -void BaselineCompiler::VisitLdaKeyedProperty() { +void BaselineCompiler::VisitGetKeyedProperty() { CallBuiltin( RegisterOperand(0), // object kInterpreterAccumulatorRegister, // key @@ -921,7 +921,12 @@ void BaselineCompiler::VisitStaModuleVariable() { __ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset, value); } -void BaselineCompiler::VisitStaNamedProperty() { +void BaselineCompiler::VisitSetNamedProperty() { + // StoreIC is currently a base class for multiple property store operations + // and contains mixed logic for named and keyed, set and define operations, + // the paths are controlled by feedback. + // TODO(v8:12548): refactor SetNamedIC as a subclass of StoreIC, which can be + // called here. CallBuiltin( RegisterOperand(0), // object Constant(1), // name @@ -929,15 +934,20 @@ void BaselineCompiler::VisitStaNamedProperty() { IndexAsTagged(2)); // slot } -void BaselineCompiler::VisitStaNamedOwnProperty() { - CallBuiltin( +void BaselineCompiler::VisitDefineNamedOwnProperty() { + CallBuiltin( RegisterOperand(0), // object Constant(1), // name kInterpreterAccumulatorRegister, // value IndexAsTagged(2)); // slot } -void BaselineCompiler::VisitStaKeyedProperty() { +void BaselineCompiler::VisitSetKeyedProperty() { + // KeyedStoreIC is currently a base class for multiple keyed property store + // operations and contains mixed logic for set and define operations, + // the paths are controlled by feedback. + // TODO(v8:12548): refactor SetKeyedIC as a subclass of KeyedStoreIC, which + // can be called here. CallBuiltin( RegisterOperand(0), // object RegisterOperand(1), // key @@ -945,8 +955,8 @@ void BaselineCompiler::VisitStaKeyedProperty() { IndexAsTagged(2)); // slot } -void BaselineCompiler::VisitStaKeyedPropertyAsDefine() { - CallBuiltin( +void BaselineCompiler::VisitDefineKeyedOwnProperty() { + CallBuiltin( RegisterOperand(0), // object RegisterOperand(1), // key kInterpreterAccumulatorRegister, // value @@ -961,11 +971,12 @@ void BaselineCompiler::VisitStaInArrayLiteral() { IndexAsTagged(2)); // slot } -void BaselineCompiler::VisitStaDataPropertyInLiteral() { - // Here we should save the accumulator, since StaDataPropertyInLiteral doesn't - // write the accumulator, but Runtime::kDefineDataPropertyInLiteral returns - // the value that we got from the accumulator so this still works. - CallRuntime(Runtime::kDefineDataPropertyInLiteral, +void BaselineCompiler::VisitDefineKeyedOwnPropertyInLiteral() { + // Here we should save the accumulator, since + // DefineKeyedOwnPropertyInLiteral doesn't write the accumulator, but + // Runtime::kDefineKeyedOwnPropertyInLiteral returns the value that we got + // from the accumulator so this still works. + CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral, RegisterOperand(0), // object RegisterOperand(1), // name kInterpreterAccumulatorRegister, // value @@ -1211,14 +1222,12 @@ void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count, void BaselineCompiler::VisitCallAnyReceiver() { interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); - if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver. BuildCall(Index(3), arg_count, args); } void BaselineCompiler::VisitCallProperty() { interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); - if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver. BuildCall(Index(3), arg_count, args); } @@ -1271,7 +1280,6 @@ void BaselineCompiler::VisitCallWithSpread() { args = args.Truncate(args.register_count() - 1); uint32_t arg_count = args.register_count(); - if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver. CallBuiltin( RegisterOperand(0), // kFunction @@ -1329,6 +1337,19 @@ void BaselineCompiler::VisitIntrinsicCopyDataProperties( CallBuiltin(args); } +void BaselineCompiler:: + VisitIntrinsicCopyDataPropertiesWithExcludedPropertiesOnStack( + interpreter::RegisterList args) { + BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); + Register rscratch = scratch_scope.AcquireScratch(); + // Use an offset from args[0] instead of args[1] to pass a valid "end of" + // pointer in the case where args.register_count() == 1. + basm_.RegisterFrameAddress(interpreter::Register(args[0].index() + 1), + rscratch); + CallBuiltin( + args[0], args.register_count() - 1, rscratch); +} + void BaselineCompiler::VisitIntrinsicCreateIterResultObject( interpreter::RegisterList args) { CallBuiltin(args); @@ -2121,15 +2142,8 @@ void BaselineCompiler::VisitReturn() { iterator().current_bytecode_size_without_prefix(); int parameter_count = bytecode_->parameter_count(); - if (kJSArgcIncludesReceiver) { - TailCallBuiltin(parameter_count, - -profiling_weight); - - } else { - int parameter_count_without_receiver = parameter_count - 1; - TailCallBuiltin( - parameter_count_without_receiver, -profiling_weight); - } + TailCallBuiltin(parameter_count, + -profiling_weight); } void BaselineCompiler::VisitThrowReferenceErrorIfHole() { diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h index e280bee3da8b56..6c36c7e8ba31fa 100644 --- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h +++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h @@ -84,6 +84,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(ebp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ lea(rscratch, MemOperand(ebp, interpreter_register.ToOperand() * + kSystemPointerSize)); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(ebp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -428,7 +433,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ Push(MemOperand(ebp, InterpreterFrameConstants::kFunctionOffset)); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ Pop(kInterpreterAccumulatorRegister, params_size); __ masm()->SmiUntag(params_size); @@ -457,10 +462,9 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments( - params_size, scratch, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ masm()->DropArguments(params_size, scratch, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h index 185bb349c286f9..25b279ff8e7177 100644 --- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h +++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h @@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope { if (!assembler_->scratch_register_scope_) { // If we haven't opened a scratch scope yet, for the first one add a // couple of extra registers. - wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit()); + wrapped_scope_.Include({t0, t1, t2, t3}); } assembler_->scratch_register_scope_ = this; } @@ -78,6 +78,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ Add_d(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -449,7 +454,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ LoadFunction(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->SmiUntag(params_size); @@ -475,10 +480,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); - + TurboAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h index 9cc0e749bd31c1..c33ff88024251e 100644 --- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h +++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h @@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope { if (!assembler_->scratch_register_scope_) { // If we haven't opened a scratch scope yet, for the first one add a // couple of extra registers. - wrapped_scope_.Include(t4.bit() | t5.bit() | t6.bit() | t7.bit()); + wrapped_scope_.Include({t4, t5, t6, t7}); } assembler_->scratch_register_scope_ = this; } @@ -80,6 +80,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ Addu(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -461,7 +466,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ LoadFunction(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->SmiUntag(params_size); @@ -488,9 +493,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + TurboAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h index 3f4dd6d4559b93..8aa9122f51691f 100644 --- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h +++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h @@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope { if (!assembler_->scratch_register_scope_) { // If we haven't opened a scratch scope yet, for the first one add a // couple of extra registers. - wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit()); + wrapped_scope_.Include({t0, t1, t2, t3}); } assembler_->scratch_register_scope_ = this; } @@ -78,6 +78,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ Daddu(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -459,7 +464,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ LoadFunction(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->SmiUntag(params_size); @@ -486,9 +491,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + TurboAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h index 110f7b74659c3f..2058cd7ff3acbb 100644 --- a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h +++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -13,56 +13,99 @@ namespace v8 { namespace internal { namespace baseline { +namespace detail { + +static constexpr Register kScratchRegisters[] = {r9, r10, ip}; +static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters); + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + return op.rb() == target || op.rx() == target; +} +#endif +} // namespace detail + class BaselineAssembler::ScratchRegisterScope { public: explicit ScratchRegisterScope(BaselineAssembler* assembler) : assembler_(assembler), prev_scope_(assembler->scratch_register_scope_), - wrapped_scope_(assembler->masm()) { - if (!assembler_->scratch_register_scope_) { - // If we haven't opened a scratch scope yet, for the first one add a - // couple of extra registers. - DCHECK(wrapped_scope_.CanAcquire()); - wrapped_scope_.Include(r8, r9); - wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister); - } + registers_used_(prev_scope_ == nullptr ? 0 + : prev_scope_->registers_used_) { assembler_->scratch_register_scope_ = this; } ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } - Register AcquireScratch() { return wrapped_scope_.Acquire(); } + Register AcquireScratch() { + DCHECK_LT(registers_used_, detail::kNumScratchRegisters); + return detail::kScratchRegisters[registers_used_++]; + } private: BaselineAssembler* assembler_; ScratchRegisterScope* prev_scope_; - UseScratchRegisterScope wrapped_scope_; + int registers_used_; }; // TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler. enum class Condition : uint32_t { - kEqual = static_cast(eq), - kNotEqual = static_cast(ne), + kEqual, + kNotEqual, - kLessThan = static_cast(lt), - kGreaterThan = static_cast(gt), - kLessThanEqual = static_cast(le), - kGreaterThanEqual = static_cast(ge), + kLessThan, + kGreaterThan, + kLessThanEqual, + kGreaterThanEqual, - kUnsignedLessThan = static_cast(lo), - kUnsignedGreaterThan = static_cast(hi), - kUnsignedLessThanEqual = static_cast(ls), - kUnsignedGreaterThanEqual = static_cast(hs), + kUnsignedLessThan, + kUnsignedGreaterThan, + kUnsignedLessThanEqual, + kUnsignedGreaterThanEqual, - kOverflow = static_cast(vs), - kNoOverflow = static_cast(vc), + kOverflow, + kNoOverflow, - kZero = static_cast(eq), - kNotZero = static_cast(ne), + kZero, + kNotZero }; inline internal::Condition AsMasmCondition(Condition cond) { - UNIMPLEMENTED(); - return static_cast(cond); + STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition)); + switch (cond) { + case Condition::kEqual: + return eq; + case Condition::kNotEqual: + return ne; + case Condition::kLessThan: + return lt; + case Condition::kGreaterThan: + return gt; + case Condition::kLessThanEqual: + return le; + case Condition::kGreaterThanEqual: + return ge; + + case Condition::kUnsignedLessThan: + return lt; + case Condition::kUnsignedGreaterThan: + return gt; + case Condition::kUnsignedLessThanEqual: + return le; + case Condition::kUnsignedGreaterThanEqual: + return ge; + + case Condition::kOverflow: + return overflow; + case Condition::kNoOverflow: + return nooverflow; + + case Condition::kZero: + return eq; + case Condition::kNotZero: + return ne; + default: + UNREACHABLE(); + } } namespace detail { @@ -83,6 +126,10 @@ MemOperand BaselineAssembler::RegisterFrameOperand( UNIMPLEMENTED(); return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + UNIMPLEMENTED(); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { UNIMPLEMENTED(); return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h index 96420093d16790..7aef7d138e0ad4 100644 --- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -76,6 +76,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ Add64(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -478,7 +483,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ LoadFunction(kJSFunctionRegister); __ masm()->Push(kJSFunctionRegister); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->SmiUntag(params_size); @@ -505,9 +510,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, - kJSArgcIncludesReceiver - ? MacroAssembler::kCountIncludesReceiver - : MacroAssembler::kCountExcludesReceiver); + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h index ce7afbf4ea3315..705e7bbd859600 100644 --- a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h +++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h @@ -76,7 +76,6 @@ inline internal::Condition AsMasmCondition(Condition cond) { return eq; case Condition::kNotEqual: return ne; - case Condition::kLessThan: return lt; case Condition::kGreaterThan: @@ -134,10 +133,10 @@ inline bool IsSignedCondition(Condition cond) { } } -#define __ assm->masm()-> +#define __ assm-> // s390x helper -void JumpIfHelper(BaselineAssembler* assm, Condition cc, Register lhs, - Register rhs, Label* target) { +static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs, + Register rhs, Label* target) { if (IsSignedCondition(cc)) { __ CmpS64(lhs, rhs); } else { @@ -154,6 +153,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ AddS64(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -256,32 +260,32 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value, ScratchRegisterScope temps(this); Register tmp = temps.AcquireScratch(); __ LoadU64(tmp, operand); - JumpIfHelper(this, cc, value, tmp, target); + JumpIfHelper(masm_, cc, value, tmp, target); } void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, Label* target, Label::Distance) { __ AssertSmi(value); __ LoadSmiLiteral(r0, smi); - JumpIfHelper(this, cc, value, r0, target); + JumpIfHelper(masm_, cc, value, r0, target); } void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target, Label::Distance) { __ AssertSmi(lhs); __ AssertSmi(rhs); - JumpIfHelper(this, cc, lhs, rhs, target); + JumpIfHelper(masm_, cc, lhs, rhs, target); } void BaselineAssembler::JumpIfTagged(Condition cc, Register value, MemOperand operand, Label* target, Label::Distance) { __ LoadU64(r0, operand); - JumpIfHelper(this, cc, value, r0, target); + JumpIfHelper(masm_, cc, value, r0, target); } void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, Register value, Label* target, Label::Distance) { __ LoadU64(r0, operand); - JumpIfHelper(this, cc, r0, value, target); + JumpIfHelper(masm_, cc, r0, value, target); } void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, Label* target, Label::Distance) { @@ -289,28 +293,28 @@ void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, } void BaselineAssembler::Move(interpreter::Register output, Register source) { - UNIMPLEMENTED(); + Move(RegisterFrameOperand(output), source); } void BaselineAssembler::Move(Register output, TaggedIndex value) { - UNIMPLEMENTED(); + __ mov(output, Operand(value.ptr())); } void BaselineAssembler::Move(MemOperand output, Register source) { - UNIMPLEMENTED(); + __ StoreU64(source, output); } void BaselineAssembler::Move(Register output, ExternalReference reference) { - UNIMPLEMENTED(); + __ Move(output, reference); } void BaselineAssembler::Move(Register output, Handle value) { - UNIMPLEMENTED(); + __ Move(output, value); } void BaselineAssembler::Move(Register output, int32_t value) { - UNIMPLEMENTED(); + __ mov(output, Operand(value)); } void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { - UNIMPLEMENTED(); + __ mov(output, source); } void BaselineAssembler::MoveSmi(Register output, Register source) { - UNIMPLEMENTED(); + __ mov(output, source); } namespace detail { @@ -319,7 +323,8 @@ template inline Register ToRegister(BaselineAssembler* basm, BaselineAssembler::ScratchRegisterScope* scope, Arg arg) { - UNIMPLEMENTED(); + Register reg = scope->AcquireScratch(); + basm->Move(reg, arg); return reg; } inline Register ToRegister(BaselineAssembler* basm, @@ -424,63 +429,188 @@ void BaselineAssembler::Pop(T... registers) { void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, int offset) { - UNIMPLEMENTED(); + __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, int offset) { - UNIMPLEMENTED(); + __ LoadTaggedSignedField(output, FieldMemOperand(source, offset)); } void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, int offset) { - UNIMPLEMENTED(); + __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0); } void BaselineAssembler::LoadByteField(Register output, Register source, int offset) { - UNIMPLEMENTED(); + __ LoadU8(output, FieldMemOperand(source, offset)); } void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, Smi value) { - UNIMPLEMENTED(); + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ LoadSmiLiteral(tmp, value); + __ StoreTaggedField(tmp, FieldMemOperand(target, offset), r0); } void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, int offset, Register value) { - UNIMPLEMENTED(); + ASM_CODE_COMMENT(masm_); + Register scratch = WriteBarrierDescriptor::SlotAddressRegister(); + DCHECK(!AreAliased(target, value, scratch)); + __ StoreTaggedField(value, FieldMemOperand(target, offset), r0); + __ RecordWriteField(target, offset, value, scratch, kLRHasNotBeenSaved, + SaveFPRegsMode::kIgnore); } void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, int offset, Register value) { - UNIMPLEMENTED(); + __ StoreTaggedField(value, FieldMemOperand(target, offset), r0); } void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( int32_t weight, Label* skip_interrupt_label) { - UNIMPLEMENTED(); + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ LoadU32( + interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ AddS32(interrupt_budget, Operand(weight)); + __ StoreU32( + interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), r0); + if (skip_interrupt_label) { + // Use compare flags set by add + DCHECK_LT(weight, 0); + __ b(ge, skip_interrupt_label); + } } void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( Register weight, Label* skip_interrupt_label) { - UNIMPLEMENTED(); + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ LoadU32( + interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ AddS32(interrupt_budget, interrupt_budget, weight); + __ StoreU32( + interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + if (skip_interrupt_label) __ b(ge, skip_interrupt_label); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { + if (rhs.value() == 0) return; + __ LoadSmiLiteral(r0, rhs); + if (SmiValuesAre31Bits()) { + __ AddS32(lhs, lhs, r0); + } else { + __ AddS64(lhs, lhs, r0); + } } -void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { UNIMPLEMENTED(); } - void BaselineAssembler::Switch(Register reg, int case_value_base, Label** labels, int num_labels) { - UNIMPLEMENTED(); + ASM_CODE_COMMENT(masm_); + Label fallthrough, jump_table; + if (case_value_base != 0) { + __ AddS64(reg, Operand(-case_value_base)); + } + + // Mostly copied from code-generator-arm.cc + ScratchRegisterScope scope(this); + JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels), + &fallthrough); + // Ensure to emit the constant pool first if necessary. + int entry_size_log2 = 3; + __ ShiftLeftU32(reg, reg, Operand(entry_size_log2)); + __ larl(r1, &jump_table); + __ lay(reg, MemOperand(reg, r1)); + __ b(reg); + __ b(&fallthrough); + __ bind(&jump_table); + for (int i = 0; i < num_labels; ++i) { + __ b(labels[i], Label::kFar); + __ nop(); + } + __ bind(&fallthrough); } #undef __ #define __ basm. -void BaselineAssembler::EmitReturn(MacroAssembler* masm) { UNIMPLEMENTED(); } +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { + ASM_CODE_COMMENT(masm); + BaselineAssembler basm(masm); + + Register weight = BaselineLeaveFrameDescriptor::WeightRegister(); + Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister(); + + { + ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget"); + + Label skip_interrupt_label; + __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label); + { + __ masm()->SmiTag(params_size); + __ Push(params_size, kInterpreterAccumulatorRegister); + + __ LoadContext(kContextRegister); + __ LoadFunction(kJSFunctionRegister); + __ Push(kJSFunctionRegister); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); + + __ Pop(kInterpreterAccumulatorRegister, params_size); + __ masm()->SmiUntag(params_size); + } + + __ Bind(&skip_interrupt_label); + } + + BaselineAssembler::ScratchRegisterScope temps(&basm); + Register actual_params_size = temps.AcquireScratch(); + // Compute the size of the actual parameters + receiver (in bytes). + __ Move(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); + + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + Label corrected_args_count; + JumpIfHelper(__ masm(), Condition::kGreaterThanEqual, params_size, + actual_params_size, &corrected_args_count); + __ masm()->mov(params_size, actual_params_size); + __ Bind(&corrected_args_count); + + // Leave the frame (also dropping the register file). + __ masm()->LeaveFrame(StackFrame::BASELINE); + + // Drop receiver + arguments. + __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); + __ masm()->Ret(); +} #undef __ inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { - UNIMPLEMENTED(); + assembler_->masm()->CmpU64(reg, kInterpreterAccumulatorRegister); + assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue); } } // namespace baseline diff --git a/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h index c481c549401a30..0d42949fca288a 100644 --- a/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h +++ b/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h @@ -14,11 +14,82 @@ namespace baseline { #define __ basm_. -void BaselineCompiler::Prologue() { UNIMPLEMENTED(); } +void BaselineCompiler::Prologue() { + // Enter the frame here, since CallBuiltin will override lr. + __ masm()->EnterFrame(StackFrame::BASELINE); + DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); -void BaselineCompiler::PrologueFillFrame() { UNIMPLEMENTED(); } + PrologueFillFrame(); +} -void BaselineCompiler::VerifyFrameSize() { UNIMPLEMENTED(); } +void BaselineCompiler::PrologueFillFrame() { + ASM_CODE_COMMENT(&masm_); + // Inlined register frame fill + interpreter::Register new_target_or_generator_register = + bytecode_->incoming_new_target_or_generator_register(); + if (FLAG_debug_code) { + __ masm()->CompareRoot(kInterpreterAccumulatorRegister, + RootIndex::kUndefinedValue); + __ masm()->Assert(eq, AbortReason::kUnexpectedValue); + } + int register_count = bytecode_->register_count(); + // Magic value + const int kLoopUnrollSize = 8; + const int new_target_index = new_target_or_generator_register.index(); + const bool has_new_target = new_target_index != kMaxInt; + if (has_new_target) { + DCHECK_LE(new_target_index, register_count); + for (int i = 0; i < new_target_index; i++) { + __ Push(kInterpreterAccumulatorRegister); + } + // Push new_target_or_generator. + __ Push(kJavaScriptCallNewTargetRegister); + register_count -= new_target_index + 1; + } + if (register_count < 2 * kLoopUnrollSize) { + // If the frame is small enough, just unroll the frame fill completely. + for (int i = 0; i < register_count; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + + } else { + // Extract the first few registers to round to the unroll size. + int first_registers = register_count % kLoopUnrollSize; + for (int i = 0; i < first_registers; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + BaselineAssembler::ScratchRegisterScope temps(&basm_); + Register scratch = temps.AcquireScratch(); + + __ Move(scratch, register_count / kLoopUnrollSize); + // We enter the loop unconditionally, so make sure we need to loop at least + // once. + DCHECK_GT(register_count / kLoopUnrollSize, 0); + Label loop; + __ Bind(&loop); + for (int i = 0; i < kLoopUnrollSize; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + __ masm()->SubS64(scratch, Operand(1)); + __ masm()->b(gt, &loop); + } +} + +void BaselineCompiler::VerifyFrameSize() { + BaselineAssembler::ScratchRegisterScope temps(&basm_); + Register scratch = temps.AcquireScratch(); + + __ masm()->AddS64(scratch, sp, + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size())); + __ masm()->CmpU64(scratch, fp); + __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer); +} } // namespace baseline } // namespace internal diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h index aa9564dceaa001..594b794672f436 100644 --- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h @@ -86,6 +86,11 @@ MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { return MemOperand(rbp, interpreter_register.ToOperand() * kSystemPointerSize); } +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ leaq(rscratch, MemOperand(rbp, interpreter_register.ToOperand() * + kSystemPointerSize)); +} MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(rbp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -440,7 +445,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ LoadContext(kContextRegister); __ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset)); - __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); __ Pop(kInterpreterAccumulatorRegister, params_size); __ masm()->SmiUntag(params_size); @@ -468,10 +473,9 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments( - params_size, scratch, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ masm()->DropArguments(params_size, scratch, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h index 300229c97df9b4..f36695276207ee 100644 --- a/deps/v8/src/bigint/bigint.h +++ b/deps/v8/src/bigint/bigint.h @@ -301,6 +301,10 @@ class Processor { // Z := the contents of {accumulator}. // Assume that this leaves {accumulator} in unusable state. Status FromString(RWDigits Z, FromStringAccumulator* accumulator); + + protected: + // Use {Destroy} or {Destroyer} instead of the destructor directly. + ~Processor() = default; }; inline int AddResultLength(int x_length, int y_length) { @@ -418,13 +422,13 @@ class FromStringAccumulator { : max_digits_(std::max(max_digits, kStackParts)) {} // Step 2: Call this method to read all characters. - // {Char} should be a character type, such as uint8_t or uint16_t. - // {end} should be one past the last character (i.e. {start == end} would - // indicate an empty string). - // Returns the current position when an invalid character is encountered. - template - ALWAYS_INLINE const Char* Parse(const Char* start, const Char* end, - digit_t radix); + // {CharIt} should be a forward iterator and + // std::iterator_traits::value_type shall be a character type, such as + // uint8_t or uint16_t. {end} should be one past the last character (i.e. + // {start == end} would indicate an empty string). Returns the current + // position when an invalid character is encountered. + template + ALWAYS_INLINE CharIt Parse(CharIt start, CharIt end, digit_t radix); // Step 3: Check if a result is available, and determine its required // allocation size (guaranteed to be <= max_digits passed to the constructor). @@ -434,14 +438,13 @@ class FromStringAccumulator { } // Step 4: Use BigIntProcessor::FromString() to retrieve the result into an - // {RWDigits} struct allocated for the size returned by step 2. + // {RWDigits} struct allocated for the size returned by step 3. private: friend class ProcessorImpl; - template - ALWAYS_INLINE const Char* ParsePowerTwo(const Char* start, const Char* end, - digit_t radix); + template + ALWAYS_INLINE CharIt ParsePowerTwo(CharIt start, CharIt end, digit_t radix); ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part, bool is_last); ALWAYS_INLINE bool AddPart(digit_t part); @@ -491,10 +494,9 @@ static constexpr uint8_t kCharValue[] = { // A space- and time-efficient way to map {2,4,8,16,32} to {1,2,3,4,5}. static constexpr uint8_t kCharBits[] = {1, 2, 3, 0, 4, 0, 0, 0, 5}; -template -const Char* FromStringAccumulator::ParsePowerTwo(const Char* current, - const Char* end, - digit_t radix) { +template +CharIt FromStringAccumulator::ParsePowerTwo(CharIt current, CharIt end, + digit_t radix) { radix_ = static_cast(radix); const int char_bits = kCharBits[radix >> 2]; int bits_left; @@ -528,11 +530,10 @@ const Char* FromStringAccumulator::ParsePowerTwo(const Char* current, return current; } -template -const Char* FromStringAccumulator::Parse(const Char* start, const Char* end, - digit_t radix) { +template +CharIt FromStringAccumulator::Parse(CharIt start, CharIt end, digit_t radix) { BIGINT_H_DCHECK(2 <= radix && radix <= 36); - const Char* current = start; + CharIt current = start; #if !HAVE_BUILTIN_MUL_OVERFLOW const digit_t kMaxMultiplier = (~digit_t{0}) / radix; #endif diff --git a/deps/v8/src/bigint/tostring.cc b/deps/v8/src/bigint/tostring.cc index 0447ce0c22651c..3f1a277c3da7f0 100644 --- a/deps/v8/src/bigint/tostring.cc +++ b/deps/v8/src/bigint/tostring.cc @@ -127,6 +127,7 @@ class ToStringFormatter { out_end_(out + chars_available), out_(out_end_), processor_(processor) { + digits_.Normalize(); DCHECK(chars_available >= ToStringResultLength(digits_, radix_, sign_)); } diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc index 0d994d2d034303..5aeac1f17905d3 100644 --- a/deps/v8/src/builtins/accessors.cc +++ b/deps/v8/src/builtins/accessors.cc @@ -516,6 +516,7 @@ Handle Accessors::FunctionGetArguments(JavaScriptFrame* frame, void Accessors::FunctionArgumentsGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); + isolate->CountUsage(v8::Isolate::kFunctionPrototypeArguments); HandleScope scope(isolate); Handle function = Handle::cast(Utils::OpenHandle(*info.Holder())); @@ -690,6 +691,7 @@ MaybeHandle FindCaller(Isolate* isolate, void Accessors::FunctionCallerGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); + isolate->CountUsage(v8::Isolate::kFunctionPrototypeCaller); HandleScope scope(isolate); Handle function = Handle::cast(Utils::OpenHandle(*info.Holder())); @@ -767,75 +769,28 @@ Handle Accessors::MakeBoundFunctionNameInfo(Isolate* isolate) { void Accessors::ErrorStackGetter( v8::Local key, const v8::PropertyCallbackInfo& info) { - i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); + Isolate* isolate = reinterpret_cast(info.GetIsolate()); HandleScope scope(isolate); - Handle holder = + Handle formatted_stack; + Handle error_object = Handle::cast(Utils::OpenHandle(*info.Holder())); - - // Retrieve the stack trace. It can either be structured data in the form of - // a FixedArray of StackFrameInfo objects, an already formatted stack trace - // (string) or whatever the "prepareStackTrace" callback produced. - - Handle stack_trace; - Handle stack_trace_symbol = isolate->factory()->stack_trace_symbol(); - MaybeHandle maybe_stack_trace = - JSObject::GetProperty(isolate, holder, stack_trace_symbol); - if (!maybe_stack_trace.ToHandle(&stack_trace) || - stack_trace->IsUndefined(isolate)) { - Handle result = isolate->factory()->undefined_value(); - info.GetReturnValue().Set(Utils::ToLocal(result)); - return; - } - - // Only format the stack-trace the first time around. The check for a - // FixedArray is sufficient as the user callback can not create plain - // FixedArrays and the result is a String in case we format the stack - // trace ourselves. - - if (!stack_trace->IsFixedArray()) { - info.GetReturnValue().Set(Utils::ToLocal(stack_trace)); - return; - } - - Handle formatted_stack_trace; - if (!ErrorUtils::FormatStackTrace(isolate, holder, stack_trace) - .ToHandle(&formatted_stack_trace)) { - isolate->OptionalRescheduleException(false); - return; - } - - // Replace the structured stack-trace with the formatting result. - MaybeHandle result = Object::SetProperty( - isolate, holder, isolate->factory()->stack_trace_symbol(), - formatted_stack_trace, StoreOrigin::kMaybeKeyed, - Just(ShouldThrow::kThrowOnError)); - if (result.is_null()) { + if (!ErrorUtils::GetFormattedStack(isolate, error_object) + .ToHandle(&formatted_stack)) { isolate->OptionalRescheduleException(false); return; } - - v8::Local value = Utils::ToLocal(formatted_stack_trace); - info.GetReturnValue().Set(value); + info.GetReturnValue().Set(Utils::ToLocal(formatted_stack)); } void Accessors::ErrorStackSetter( - v8::Local name, v8::Local val, + v8::Local name, v8::Local value, const v8::PropertyCallbackInfo& info) { - i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); + Isolate* isolate = reinterpret_cast(info.GetIsolate()); HandleScope scope(isolate); - Handle obj = Handle::cast( - Utils::OpenHandle(*v8::Local(info.This()))); - Handle value = Handle::cast(Utils::OpenHandle(*val)); - - // Store the value in the internal symbol to avoid reconfiguration to - // a data property. - MaybeHandle result = Object::SetProperty( - isolate, obj, isolate->factory()->stack_trace_symbol(), value, - StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError)); - if (result.is_null()) { - isolate->OptionalRescheduleException(false); - return; - } + Handle error_object = + Handle::cast(Utils::OpenHandle(*info.Holder())); + ErrorUtils::SetFormattedStack(isolate, error_object, + Utils::OpenHandle(*value)); } Handle Accessors::MakeErrorStackInfo(Isolate* isolate) { diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 00f1009610cfb6..fe2536fa0a85cb 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -89,11 +89,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, Register counter = scratch; Register value = temps.Acquire(); Label loop, entry; - if (kJSArgcIncludesReceiver) { - __ sub(counter, argc, Operand(kJSArgcReceiverSlots)); - } else { - __ mov(counter, argc); - } + __ sub(counter, argc, Operand(kJSArgcReceiverSlots)); __ b(&entry); __ bind(&loop); __ ldr(value, MemOperand(array, counter, LSL, kSystemPointerSizeLog2)); @@ -162,9 +158,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Remove caller arguments from the stack and return. __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + TurboAssembler::kCountIncludesReceiver); __ Jump(lr); __ bind(&stack_overflow); @@ -314,9 +308,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Remove caller arguments from the stack and return. __ DropArguments(r1, TurboAssembler::kCountIsSmi, - kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + TurboAssembler::kCountIncludesReceiver); __ Jump(lr); __ bind(&check_receiver); @@ -441,9 +433,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); __ ldrh(r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); - if (kJSArgcIncludesReceiver) { - __ sub(r3, r3, Operand(kJSArgcReceiverSlots)); - } + __ sub(r3, r3, Operand(kJSArgcReceiverSlots)); __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -565,7 +555,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // r1: microtask_queue // Preserve all but r0 and pass them to entry_trampoline. Label invoke, handler_entry, exit; - const RegList kCalleeSavedWithoutFp = kCalleeSaved & ~fp.bit(); + const RegList kCalleeSavedWithoutFp = kCalleeSaved - fp; // Update |pushed_stack_space| when we manipulate the stack. int pushed_stack_space = EntryFrameConstants::kCallerFPOffset; @@ -599,7 +589,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, masm->isolate())); __ ldr(r5, MemOperand(r4)); - __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | fp.bit() | lr.bit()); + __ stm(db_w, sp, {r5, r6, r7, fp, lr}); pushed_stack_space += 5 * kPointerSize /* r5, r6, r7, fp, lr */; // Clear c_entry_fp, now we've pushed its previous value to the stack. @@ -700,7 +690,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, Operand(-EntryFrameConstants::kCallerFPOffset - kSystemPointerSize /* already popped one */)); - __ ldm(ia_w, sp, fp.bit() | lr.bit()); + __ ldm(ia_w, sp, {fp, lr}); // Restore callee-saved vfp registers. __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); @@ -767,11 +757,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Check if we have enough stack space to push all arguments + receiver. // Clobbers r5. Label enough_stack_space, stack_overflow; - if (kJSArgcIncludesReceiver) { - __ mov(r6, r0); - } else { - __ add(r6, r0, Operand(1)); // Add one for receiver. - } + __ mov(r6, r0); __ StackOverflowCheck(r6, r5, &stack_overflow); __ b(&enough_stack_space); __ bind(&stack_overflow); @@ -870,9 +856,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ ldr(actual_params_size, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2)); - if (!kJSArgcIncludesReceiver) { - __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); - } // If actual is bigger than formal, then we should use it to free up the stack // arguments. @@ -953,22 +936,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, // ----------------------------------- DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker)); - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimization_marker, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals( + masm, optimization_marker, + OptimizationMarker::kCompileTurbofan_NotConcurrent, + Runtime::kCompileTurbofan_NotConcurrent); TailCallRuntimeIfMarkerEquals(masm, optimization_marker, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals(masm, optimization_marker, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); + OptimizationMarker::kCompileTurbofan_Concurrent, + Runtime::kCompileTurbofan_Concurrent); - // Marker should be one of LogFirstExecution / CompileOptimized / - // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach - // here. + // Marker should be one of CompileOptimized / CompileOptimizedConcurrent. + // InOptimizationQueue and None shouldn't reach here. if (FLAG_debug_code) { __ stop(); } @@ -1077,9 +1054,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( DCHECK(!AreAliased(optimization_state, feedback_vector)); Label maybe_has_optimized_code; // Check if optimized code is available - __ tst( - optimization_state, - Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker)); + __ tst(optimization_state, + Operand(FeedbackVector::kHasCompileOptimizedMarker)); __ b(eq, &maybe_has_optimized_code); Register optimization_marker = optimization_state; @@ -1226,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { temps.Exclude(optimization_state); // Drop the frame created by the baseline call. - __ ldm(ia_w, sp, fp.bit() | lr.bit()); + __ ldm(ia_w, sp, {fp, lr}); MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, feedback_vector); __ Trap(); @@ -1520,12 +1496,8 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ sub(r0, r0, Operand(1)); } - const bool skip_receiver = - receiver_mode == ConvertReceiverMode::kNullOrUndefined; - if (kJSArgcIncludesReceiver && skip_receiver) { + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { __ sub(r3, r0, Operand(kJSArgcReceiverSlots)); - } else if (!kJSArgcIncludesReceiver && !skip_receiver) { - __ add(r3, r0, Operand(1)); } else { __ mov(r3, r0); } @@ -1584,11 +1556,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( __ sub(r0, r0, Operand(1)); } - Register argc_without_receiver = r0; - if (kJSArgcIncludesReceiver) { - argc_without_receiver = r6; - __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots)); - } + Register argc_without_receiver = r6; + __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots)); // Push the arguments. r4 and r5 will be modified. GenerateInterpreterPushArgs(masm, argc_without_receiver, r4, r5); @@ -1927,10 +1896,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg __ cmp(r0, Operand(JSParameterCount(2)), ge); __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray - __ DropArgumentsAndPushNewReceiver( - r0, r5, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2006,10 +1973,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument __ cmp(r0, Operand(JSParameterCount(3)), ge); __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList - __ DropArgumentsAndPushNewReceiver( - r0, r5, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2051,10 +2016,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList __ cmp(r0, Operand(JSParameterCount(3)), ge); __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target - __ DropArgumentsAndPushNewReceiver( - r0, r4, TurboAssembler::kCountIsInteger, - kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2103,11 +2066,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments( Label loop, done; __ bind(&loop); __ cmp(old_sp, end); - if (kJSArgcIncludesReceiver) { - __ b(ge, &done); - } else { - __ b(gt, &done); - } + __ b(ge, &done); __ ldr(value, MemOperand(old_sp, kSystemPointerSize, PostIndex)); __ str(value, MemOperand(dest, kSystemPointerSize, PostIndex)); __ b(&loop); @@ -2220,9 +2179,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label stack_done, stack_overflow; __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - if (kJSArgcIncludesReceiver) { - __ sub(r5, r5, Operand(kJSArgcReceiverSlots)); - } + __ sub(r5, r5, Operand(kJSArgcReceiverSlots)); __ sub(r5, r5, r2, SetCC); __ b(le, &stack_done); { @@ -2283,13 +2240,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- r0 : the number of arguments // -- r1 : the function to call (checked to be a JSFunction) // ----------------------------------- - __ AssertFunction(r1); + __ AssertCallableFunction(r1); - Label class_constructor; __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset)); - __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); - __ b(ne, &class_constructor); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2364,14 +2317,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ ldrh(r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump); - - // The function is a "classConstructor", need to raise an exception. - __ bind(&class_constructor); - { - FrameScope frame(masm, StackFrame::INTERNAL); - __ push(r1); - __ CallRuntime(Runtime::kThrowConstructorNonCallableError); - } } namespace { @@ -2512,6 +2457,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ cmp(instance_type, Operand(JS_PROXY_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); + // Check if target is a wrapped function and call CallWrappedFunction external + // builtin + __ cmp(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE)); + __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction), + RelocInfo::CODE_TARGET, eq); + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) // Check that the function is not a "classConstructor". __ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE)); @@ -2677,9 +2628,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Save all parameter registers (see wasm-linkage.h). They might be // overwritten in the runtime call below. We don't have any callee-saved // registers in wasm, so no need to store anything else. - RegList gp_regs = 0; + RegList gp_regs; for (Register gp_param_reg : wasm::kGpParamRegisters) { - gp_regs |= gp_param_reg.bit(); + gp_regs.set(gp_param_reg); } DwVfpRegister lowest_fp_reg = std::begin(wasm::kFpParamRegisters)[0]; DwVfpRegister highest_fp_reg = std::end(wasm::kFpParamRegisters)[-1]; @@ -2688,10 +2639,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { fp_param_reg.code() <= highest_fp_reg.code()); } - CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters)); + CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1, arraysize(wasm::kFpParamRegisters)); - CHECK_EQ(NumRegs(gp_regs), + CHECK_EQ(gp_regs.Count(), WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs); CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1, WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs); @@ -2724,20 +2675,19 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK); STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32); - constexpr uint32_t last = - 31 - base::bits::CountLeadingZeros32( - WasmDebugBreakFrameConstants::kPushedFpRegs); - constexpr uint32_t first = base::bits::CountTrailingZeros32( - WasmDebugBreakFrameConstants::kPushedFpRegs); + constexpr DwVfpRegister last = + WasmDebugBreakFrameConstants::kPushedFpRegs.last(); + constexpr DwVfpRegister first = + WasmDebugBreakFrameConstants::kPushedFpRegs.first(); static_assert( - base::bits::CountPopulation( - WasmDebugBreakFrameConstants::kPushedFpRegs) == last - first + 1, + WasmDebugBreakFrameConstants::kPushedFpRegs.Count() == + last.code() - first.code() + 1, "All registers in the range from first to last have to be set"); // Save all parameter registers. They might hold live values, we restore // them after the runtime call. - constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(first); - constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(last); + constexpr DwVfpRegister lowest_fp_reg = first; + constexpr DwVfpRegister highest_fp_reg = last; // Store gp parameter registers. __ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs); @@ -2766,6 +2716,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmResume(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); @@ -3379,12 +3339,12 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, STATIC_ASSERT(kNumberOfRegisters == 16); // Everything but pc, lr and ip which will be saved but not restored. - RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); + RegList restored_regs = kJSCallerSaved | kCalleeSaved | RegList{ip}; // Push all 16 registers (needed to populate FrameDescription::registers_). // TODO(v8:1588): Note that using pc with stm is deprecated, so we should // perhaps handle this a bit differently. - __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); + __ stm(db_w, sp, restored_regs | RegList{sp, lr, pc}); { UseScratchRegisterScope temps(masm); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index b75ffcc0656146..e6321c614c8995 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -112,12 +112,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiTag(x11, argc); __ Push(x11, padreg); - // Add a slot for the receiver (if not already included), and round up to - // maintain alignment. + // Round up to maintain alignment. Register slot_count = x2; Register slot_count_without_rounding = x12; - constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2; - __ Add(slot_count_without_rounding, argc, additional_slots); + __ Add(slot_count_without_rounding, argc, 1); __ Bic(slot_count, slot_count_without_rounding, 1); __ Claim(slot_count); @@ -130,8 +128,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Store padding, if needed. __ Tbnz(slot_count_without_rounding, 0, &already_aligned); - __ Str(padreg, - MemOperand(x2, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize)); + __ Str(padreg, MemOperand(x2)); __ Bind(&already_aligned); // TODO(victorgomes): When the arguments adaptor is completely removed, we @@ -151,11 +148,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Add(src, fp, StandardFrameConstants::kCallerSPOffset + kSystemPointerSize); // Skip receiver. - if (kJSArgcIncludesReceiver) { - __ Sub(count, argc, kJSArgcReceiverSlots); - } else { - __ Mov(count, argc); - } + __ Sub(count, argc, kJSArgcReceiverSlots); __ CopyDoubleWords(dst, src, count); } @@ -197,9 +190,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(x1, kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); __ Ret(); __ Bind(&stack_overflow); @@ -322,11 +313,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Round the number of arguments down to the next even number, and claim // slots for the arguments. If the number of arguments was odd, the last // argument will overwrite one of the receivers pushed above. - Register argc_without_receiver = x12; - if (kJSArgcIncludesReceiver) { - argc_without_receiver = x11; - __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots); - } + Register argc_without_receiver = x11; + __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots); __ Bic(x10, x12, 1); // Check if we have enough stack space to push all arguments. @@ -390,9 +378,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Leave construct frame. __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(x1, kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); __ Ret(); // Otherwise we do a smi check and fall through to check if the return value @@ -432,19 +418,19 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { __ Unreachable(); } -static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm, - Register code, Register scratch) { +static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm, + Register code, Register scratch) { // Verify that the code kind is baseline code via the CodeKind. - __ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset)); - __ DecodeField(scratch); + __ Ldr(scratch, FieldMemOperand(code, CodeT::kFlagsOffset)); + __ DecodeField(scratch); __ Cmp(scratch, Operand(static_cast(CodeKind::BASELINE))); __ Assert(eq, AbortReason::kExpectedBaselineData); } -static void AssertCodeIsBaseline(MacroAssembler* masm, Register code, - Register scratch) { +static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code, + Register scratch) { DCHECK(!AreAliased(code, scratch)); - return AssertCodeIsBaselineAllowClobber(masm, code, scratch); + return AssertCodeTIsBaselineAllowClobber(masm, code, scratch); } // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under @@ -459,12 +445,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, if (FLAG_debug_code) { Label not_baseline; __ B(ne, ¬_baseline); - if (V8_EXTERNAL_CODE_SPACE_BOOL) { - __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data); - AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1); - } else { - AssertCodeIsBaseline(masm, sfi_data, scratch1); - } + AssertCodeTIsBaseline(masm, sfi_data, scratch1); __ B(eq, is_baseline); __ Bind(¬_baseline); } else { @@ -531,9 +512,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Ldrh(w10, FieldMemOperand( x10, SharedFunctionInfo::kFormalParameterCountOffset)); - if (kJSArgcIncludesReceiver) { - __ Sub(x10, x10, kJSArgcReceiverSlots); - } + __ Sub(x10, x10, kJSArgcReceiverSlots); // Claim slots for arguments and receiver (rounded up to a multiple of two). __ Add(x11, x10, 2); __ Bic(x11, x11, 1); @@ -809,7 +788,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // // Invoke the function by calling through JS entry trampoline builtin and // pop the faked function when we return. - Handle trampoline_code = + Handle trampoline_code = masm->isolate()->builtins()->code_handle(entry_trampoline); __ Call(trampoline_code, RelocInfo::CODE_TARGET); @@ -904,10 +883,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, masm->isolate())); __ Ldr(cp, MemOperand(scratch)); - // Claim enough space for the arguments, the function and the receiver (if - // it is not included in argc already), including an optional slot of - // padding. - constexpr int additional_slots = kJSArgcIncludesReceiver ? 2 : 3; + // Claim enough space for the arguments and the function, including an + // optional slot of padding. + constexpr int additional_slots = 2; __ Add(slots_to_claim, argc, additional_slots); __ Bic(slots_to_claim, slots_to_claim, 1); @@ -931,9 +909,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Poke(receiver, 0); // Store function on the stack. __ SlotAddress(scratch, argc); - __ Str( - function, - MemOperand(scratch, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize)); + __ Str(function, MemOperand(scratch)); // Copy arguments to the stack in a loop, in reverse order. // x4: argc. @@ -941,12 +917,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, Label loop, done; // Skip the argument set up if we have no arguments. - if (kJSArgcIncludesReceiver) { - __ Cmp(argc, JSParameterCount(0)); - __ B(eq, &done); - } else { - __ Cbz(argc, &done); - } + __ Cmp(argc, JSParameterCount(0)); + __ B(eq, &done); // scratch has been set to point to the location of the function, which // marks the end of the argument copy. @@ -960,11 +932,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex)); // Loop if we've not reached the end of copy marker. __ Cmp(x0, scratch); - if (kJSArgcIncludesReceiver) { - __ B(lt, &loop); - } else { - __ B(le, &loop); - } + __ B(lt, &loop); __ Bind(&done); @@ -994,9 +962,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // x28 : pointer cage base register (kPtrComprCageBaseRegister). // x29 : frame pointer (fp). - Handle builtin = is_construct - ? BUILTIN_CODE(masm->isolate(), Construct) - : masm->isolate()->builtins()->Call(); + Handle builtin = is_construct + ? BUILTIN_CODE(masm->isolate(), Construct) + : masm->isolate()->builtins()->Call(); __ Call(builtin, RelocInfo::CODE_TARGET); // Exit the JS internal frame and remove the parameters (except function), @@ -1054,9 +1022,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ Ldr(actual_params_size, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2); - if (!kJSArgcIncludesReceiver) { - __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); - } // If actual is bigger than formal, then we should use it to free up the stack // arguments. @@ -1157,22 +1122,16 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ASM_CODE_COMMENT(masm); DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker)); - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimization_marker, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals( + masm, optimization_marker, + OptimizationMarker::kCompileTurbofan_NotConcurrent, + Runtime::kCompileTurbofan_NotConcurrent); TailCallRuntimeIfMarkerEquals(masm, optimization_marker, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals(masm, optimization_marker, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); + OptimizationMarker::kCompileTurbofan_Concurrent, + Runtime::kCompileTurbofan_Concurrent); - // Marker should be one of LogFirstExecution / CompileOptimized / - // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach - // here. + // Marker should be one of CompileOptimized / CompileOptimizedConcurrent. + // InOptimizationQueue and None shouldn't reach here. if (FLAG_debug_code) { __ Unreachable(); } @@ -1277,10 +1236,9 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( DCHECK(!AreAliased(optimization_state, feedback_vector)); Label maybe_has_optimized_code; // Check if optimized code is available - __ TestAndBranchIfAllClear( - optimization_state, - FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker, - &maybe_has_optimized_code); + __ TestAndBranchIfAllClear(optimization_state, + FeedbackVector::kHasCompileOptimizedMarker, + &maybe_has_optimized_code); Register optimization_marker = optimization_state; __ DecodeField(optimization_marker); @@ -1719,10 +1677,8 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, __ Sub(num_args, num_args, 1); } - // Add receiver (if not already included in argc) and round up to an even - // number of slots. - constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2; - __ Add(slots_to_claim, num_args, additional_slots); + // Round up to an even number of slots. + __ Add(slots_to_claim, num_args, 1); __ Bic(slots_to_claim, slots_to_claim, 1); // Add a stack check before pushing arguments. @@ -1746,10 +1702,8 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, const bool skip_receiver = receiver_mode == ConvertReceiverMode::kNullOrUndefined; - if (kJSArgcIncludesReceiver && skip_receiver) { + if (skip_receiver) { __ Sub(slots_to_copy, num_args, kJSArgcReceiverSlots); - } else if (!kJSArgcIncludesReceiver && !skip_receiver) { - __ Add(slots_to_copy, num_args, 1); } else { __ Mov(slots_to_copy, num_args); } @@ -1835,8 +1789,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( // Tail call to the array construct stub (still in the caller // context at this point). - Handle code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl); - __ Jump(code, RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), + RelocInfo::CODE_TARGET); } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { // Call the constructor with x0, x1, and x3 unmodified. __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), @@ -2144,6 +2098,10 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { __ LeaveFrame(StackFrame::STUB); } + if (V8_EXTERNAL_CODE_SPACE_BOOL) { + __ LoadCodeDataContainerCodeNonBuiltin(x0, x0); + } + // Load deoptimization data from the code object. // = [#deoptimization_data_offset] __ LoadTaggedPointerField( @@ -2210,9 +2168,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Peek(arg_array, 2 * kSystemPointerSize); __ bind(&done); } - __ DropArguments(argc, kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); __ PushArgument(this_arg); // ----------- S t a t e ------------- @@ -2259,12 +2215,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { { Label non_zero; Register scratch = x10; - if (kJSArgcIncludesReceiver) { - __ Cmp(argc, JSParameterCount(0)); - __ B(gt, &non_zero); - } else { - __ Cbnz(argc, &non_zero); - } + __ Cmp(argc, JSParameterCount(0)); + __ B(gt, &non_zero); __ LoadRoot(scratch, RootIndex::kUndefinedValue); // Overwrite receiver with undefined, which will be the new receiver. // We do not need to overwrite the padding slot above it with anything. @@ -2283,11 +2235,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Register copy_to = x11; Register count = x12; UseScratchRegisterScope temps(masm); - Register argc_without_receiver = argc; - if (kJSArgcIncludesReceiver) { - argc_without_receiver = temps.AcquireX(); - __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots); - } + Register argc_without_receiver = temps.AcquireX(); + __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots); + // CopyDoubleWords changes the count argument. __ Mov(count, argc_without_receiver); __ Tbz(argc_without_receiver, 0, &even); @@ -2355,9 +2305,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Peek(arguments_list, 3 * kSystemPointerSize); __ bind(&done); } - __ DropArguments(argc, kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); __ PushArgument(this_argument); // ----------- S t a t e ------------- @@ -2415,9 +2363,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ bind(&done); } - __ DropArguments(argc, kJSArgcIncludesReceiver - ? TurboAssembler::kCountIncludesReceiver - : TurboAssembler::kCountExcludesReceiver); + __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); // Push receiver (undefined). __ PushArgument(undefined_value); @@ -2453,11 +2399,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, Register slots_to_copy = x10; Register slots_to_claim = x12; - if (kJSArgcIncludesReceiver) { - __ Mov(slots_to_copy, argc); - } else { - __ Add(slots_to_copy, argc, 1); // Copy with receiver. - } + __ Mov(slots_to_copy, argc); __ Mov(slots_to_claim, len); __ Tbz(slots_to_claim, 0, &even); @@ -2469,9 +2411,6 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, Register scratch = x11; __ Add(slots_to_claim, len, 1); __ And(scratch, argc, 1); - if (!kJSArgcIncludesReceiver) { - __ Eor(scratch, scratch, 1); - } __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1)); } @@ -2495,7 +2434,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, // static // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, - Handle code) { + Handle code) { // ----------- S t a t e ------------- // -- x1 : target // -- x0 : number of parameters on the stack @@ -2549,12 +2488,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // scenes and we want to avoid that in a loop. // TODO(all): Consider using Ldp and Stp. Register dst = x16; - if (kJSArgcIncludesReceiver) { - __ SlotAddress(dst, argc); - } else { - __ Add(dst, argc, Immediate(1)); // Consider the receiver as well. - __ SlotAddress(dst, dst); - } + __ SlotAddress(dst, argc); __ Add(argc, argc, len); // Update new argc. __ Bind(&loop); __ Sub(len, len, 1); @@ -2575,7 +2509,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // static void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, - Handle code) { + Handle code) { // ----------- S t a t e ------------- // -- x0 : the number of arguments // -- x3 : the new.target (for [[Construct]] calls) @@ -2608,9 +2542,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Register len = x6; Label stack_done, stack_overflow; __ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - if (kJSArgcIncludesReceiver) { - __ Subs(len, len, kJSArgcReceiverSlots); - } + __ Subs(len, len, kJSArgcReceiverSlots); __ Subs(len, len, start_index); __ B(le, &stack_done); // Check for stack overflow. @@ -2628,12 +2560,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ lsl(start_index, start_index, kSystemPointerSizeLog2); __ Add(args_fp, args_fp, start_index); // Point to the position to copy to. - if (kJSArgcIncludesReceiver) { - __ SlotAddress(dst, argc); - } else { - __ Add(x10, argc, 1); - __ SlotAddress(dst, x10); - } + __ SlotAddress(dst, argc); // Update total number of arguments. __ Add(argc, argc, len); __ CopyDoubleWords(dst, args_fp, len); @@ -2655,14 +2582,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- x0 : the number of arguments // -- x1 : the function to call (checked to be a JSFunction) // ----------------------------------- - __ AssertFunction(x1); + __ AssertCallableFunction(x1); - Label class_constructor; __ LoadTaggedPointerField( x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); - __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset)); - __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask, - &class_constructor); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2738,15 +2661,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Ldrh(x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump); - - // The function is a "classConstructor", need to raise an exception. - __ Bind(&class_constructor); - { - FrameScope frame(masm, StackFrame::INTERNAL); - __ PushArgument(x1); - __ CallRuntime(Runtime::kThrowConstructorNonCallableError); - __ Unreachable(); - } } namespace { @@ -2802,9 +2716,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { Register scratch = x10; Register receiver = x14; - if (kJSArgcIncludesReceiver) { - __ Sub(argc, argc, kJSArgcReceiverSlots); - } + __ Sub(argc, argc, kJSArgcReceiverSlots); __ Add(total_argc, argc, bound_argc); __ Peek(receiver, 0); @@ -2873,11 +2785,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ Cbnz(counter, &loop); } // Update argc. - if (kJSArgcIncludesReceiver) { - __ Add(argc, total_argc, kJSArgcReceiverSlots); - } else { - __ Mov(argc, total_argc); - } + __ Add(argc, total_argc, kJSArgcReceiverSlots); } __ Bind(&no_bound_arguments); } @@ -2944,6 +2852,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ Cmp(instance_type, JS_PROXY_TYPE); __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); + // Check if target is a wrapped function and call CallWrappedFunction external + // builtin + __ Cmp(instance_type, JS_WRAPPED_FUNCTION_TYPE); + __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction), + RelocInfo::CODE_TARGET, eq); + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) // Check that the function is not a "classConstructor". __ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE); @@ -3123,27 +3037,27 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Save all parameter registers (see wasm-linkage.h). They might be // overwritten in the runtime call below. We don't have any callee-saved // registers in wasm, so no need to store anything else. - RegList gp_regs = 0; + RegList gp_regs; for (Register gp_param_reg : wasm::kGpParamRegisters) { - gp_regs |= gp_param_reg.bit(); + gp_regs.set(gp_param_reg); } // Also push x1, because we must push multiples of 16 bytes (see // {TurboAssembler::PushCPURegList}. - CHECK_EQ(1, NumRegs(gp_regs) % 2); - gp_regs |= x1.bit(); - CHECK_EQ(0, NumRegs(gp_regs) % 2); + CHECK_EQ(1, gp_regs.Count() % 2); + gp_regs.set(x1); + CHECK_EQ(0, gp_regs.Count() % 2); - RegList fp_regs = 0; + DoubleRegList fp_regs; for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { - fp_regs |= fp_param_reg.bit(); + fp_regs.set(fp_param_reg); } - CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters) + 1); - CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters)); + CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1); + CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, - NumRegs(gp_regs)); + gp_regs.Count()); CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, - NumRegs(fp_regs)); + fp_regs.Count()); __ PushXRegList(gp_regs); __ PushQRegList(fp_regs); @@ -3202,6 +3116,16 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmResume(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); @@ -3944,8 +3868,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, // Save all allocatable double registers. CPURegList saved_double_registers( - CPURegister::kVRegister, kDRegSizeInBits, - RegisterConfiguration::Default()->allocatable_double_codes_mask()); + kDRegSizeInBits, + DoubleRegList::FromBits( + RegisterConfiguration::Default()->allocatable_double_codes_mask())); DCHECK_EQ(saved_double_registers.Count() % 2, 0); __ PushCPURegList(saved_double_registers); @@ -4196,12 +4121,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Assert(eq, AbortReason::kExpectedBaselineData); } + if (FLAG_debug_code) { + AssertCodeTIsBaseline(masm, code_obj, x3); + } if (V8_EXTERNAL_CODE_SPACE_BOOL) { __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); } - if (FLAG_debug_code) { - AssertCodeIsBaseline(masm, code_obj, x3); - } // Load the feedback vector. Register feedback_vector = x2; @@ -4340,7 +4265,7 @@ void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline( template void Builtins::Generate_DynamicCheckMapsTrampoline( - MacroAssembler* masm, Handle builtin_target) { + MacroAssembler* masm, Handle builtin_target) { FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); @@ -4349,7 +4274,9 @@ void Builtins::Generate_DynamicCheckMapsTrampoline( RegList registers = descriptor.allocatable_registers(); // FLAG_debug_code is enabled CSA checks will call C function and so we need // to save all CallerSaved registers too. - if (FLAG_debug_code) registers |= kCallerSaved.list(); + if (FLAG_debug_code) { + registers |= RegList::FromBits(static_cast(kCallerSaved.bits())); + } __ MaybeSaveRegisters(registers); // Load the immediate arguments from the deopt exit to pass to the builtin. @@ -4396,14 +4323,14 @@ void Builtins::Generate_DynamicCheckMapsTrampoline( } __ MaybeRestoreRegisters(registers); __ LeaveFrame(StackFrame::INTERNAL); - Handle deopt_eager = masm->isolate()->builtins()->code_handle( + Handle deopt_eager = masm->isolate()->builtins()->code_handle( Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager)); __ Jump(deopt_eager, RelocInfo::CODE_TARGET); __ Bind(&bailout); __ MaybeRestoreRegisters(registers); __ LeaveFrame(StackFrame::INTERNAL); - Handle deopt_bailout = masm->isolate()->builtins()->code_handle( + Handle deopt_bailout = masm->isolate()->builtins()->code_handle( Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout)); __ Jump(deopt_bailout, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq index a4bf6f002d20bb..c88a0c2800045f 100644 --- a/deps/v8/src/builtins/array-join.tq +++ b/deps/v8/src/builtins/array-join.tq @@ -55,7 +55,8 @@ LoadJoinElement( builtin LoadJoinTypedElement( context: Context, receiver: JSReceiver, k: uintptr): JSAny { const typedArray: JSTypedArray = UnsafeCast(receiver); - dcheck(!IsDetachedBuffer(typedArray.buffer)); + dcheck(!typed_array::IsJSArrayBufferViewDetachedOrOutOfBoundsBoolean( + typedArray)); return typed_array::LoadFixedTypedArrayElementAsTagged( typedArray.data_ptr, k, typed_array::KindForArrayType()); } @@ -103,7 +104,19 @@ CannotUseSameArrayAccessor(implicit context: Context)( _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map, _initialLen: Number): bool { const typedArray: JSTypedArray = UnsafeCast(receiver); - return IsDetachedBuffer(typedArray.buffer); + // When this is called from toLocaleString(), the underlying buffer might get + // detached / resized (in the case of RAB / GSAB) during iterating the + // elements. When this is called from join(), it can happen only before the + // first element (during parameter conversion). The code below doesn't + // differentiate between these two cases, but does the checks in both cases. + if (IsDetachedBuffer(typedArray.buffer)) { + return true; + } + if (IsVariableLengthJSArrayBufferView(typedArray)) { + // TODO(v8:11111): Add a fast(er) path here. + return true; + } + return false; } // Calculates the running total length of the resulting string. If the @@ -387,6 +400,28 @@ transitioning ArrayJoin(implicit context: Context)( loadFn = LoadJoinTypedElement; } else if (kind == ElementsKind::BIGINT64_ELEMENTS) { loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_UINT8_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_INT8_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_UINT16_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_INT16_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_UINT32_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_INT32_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_FLOAT32_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_FLOAT64_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_UINT8_CLAMPED_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_BIGUINT64_ELEMENTS) { + loadFn = LoadJoinTypedElement; + } else if (kind == ElementsKind::RAB_GSAB_BIGINT64_ELEMENTS) { + loadFn = LoadJoinTypedElement; } else { unreachable; } @@ -513,7 +548,7 @@ macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver): // Builtin call was not nested (receiver is the first entry) and // did not contain other nested arrays that expanded the stack. if (stack.objects[0] == receiver && len == kMinJoinStackSize) { - StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER); + stack.objects[0] = TheHole; } else deferred { JoinStackPop(stack, receiver); @@ -616,12 +651,13 @@ transitioning javascript builtin TypedArrayPrototypeJoin( // Spec: ValidateTypedArray is applied to the this value prior to evaluating // the algorithm. - const typedArray: JSTypedArray = typed_array::ValidateTypedArray( + const length = typed_array::ValidateTypedArrayAndGetLength( context, receiver, '%TypedArray%.prototype.join'); - const length = Convert(typedArray.length); + const typedArray: JSTypedArray = UnsafeCast(receiver); return CycleProtectedArrayJoin( - false, typedArray, length, separator, Undefined, Undefined); + false, typedArray, Convert(length), separator, Undefined, + Undefined); } // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring @@ -632,11 +668,11 @@ transitioning javascript builtin TypedArrayPrototypeToLocaleString( // Spec: ValidateTypedArray is applied to the this value prior to evaluating // the algorithm. - const typedArray: JSTypedArray = typed_array::ValidateTypedArray( + const length = typed_array::ValidateTypedArrayAndGetLength( context, receiver, '%TypedArray%.prototype.toLocaleString'); - const length = Convert(typedArray.length); + const typedArray: JSTypedArray = UnsafeCast(receiver); return CycleProtectedArrayJoin( - true, typedArray, length, ',', locales, options); + true, typedArray, Convert(length), ',', locales, options); } } diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 69e9faef533496..dbcc05de28cb3f 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -9,16 +9,18 @@ #include 'src/heap/factory-inl.h' #include 'src/objects/arguments.h' #include 'src/objects/bigint.h' +#include 'src/objects/call-site-info.h' #include 'src/objects/elements-kind.h' #include 'src/objects/free-space.h' #include 'src/objects/js-function.h' #include 'src/objects/js-generator.h' #include 'src/objects/js-promise.h' #include 'src/objects/js-regexp-string-iterator.h' +#include 'src/objects/js-shadow-realms.h' +#include 'src/objects/js-struct.h' #include 'src/objects/js-weak-refs.h' #include 'src/objects/objects.h' #include 'src/objects/source-text-module.h' -#include 'src/objects/stack-frame-info.h' #include 'src/objects/synthetic-module.h' #include 'src/objects/template-objects.h' #include 'src/torque/runtime-support.h' @@ -26,6 +28,8 @@ type void; type never; +type IntegerLiteral constexpr 'IntegerLiteral'; + type Tagged generates 'TNode' constexpr 'MaybeObject'; type StrongTagged extends Tagged generates 'TNode' constexpr 'Object'; @@ -209,6 +213,8 @@ extern class HashTable extends FixedArray generates 'TNode'; extern class OrderedHashMap extends HashTable; extern class OrderedHashSet extends HashTable; extern class OrderedNameDictionary extends HashTable; +extern class NameToIndexHashTable extends HashTable; +extern class RegisteredSymbolTable extends HashTable; extern class NameDictionary extends HashTable; extern class GlobalDictionary extends HashTable; extern class SimpleNumberDictionary extends HashTable; @@ -252,7 +258,8 @@ type CallableApiObject extends JSObject; // A JSProxy with the callable bit set. type CallableJSProxy extends JSProxy; -type Callable = JSFunction|JSBoundFunction|CallableJSProxy|CallableApiObject; +type Callable = JSFunction|JSBoundFunction|JSWrappedFunction|CallableJSProxy| + CallableApiObject; type WriteBarrierMode generates 'TNode' constexpr 'WriteBarrierMode'; @@ -306,6 +313,16 @@ extern enum ElementsKind extends int32 { BIGUINT64_ELEMENTS, BIGINT64_ELEMENTS, RAB_GSAB_UINT8_ELEMENTS, + RAB_GSAB_INT8_ELEMENTS, + RAB_GSAB_UINT16_ELEMENTS, + RAB_GSAB_INT16_ELEMENTS, + RAB_GSAB_UINT32_ELEMENTS, + RAB_GSAB_INT32_ELEMENTS, + RAB_GSAB_FLOAT32_ELEMENTS, + RAB_GSAB_FLOAT64_ELEMENTS, + RAB_GSAB_UINT8_CLAMPED_ELEMENTS, + RAB_GSAB_BIGUINT64_ELEMENTS, + RAB_GSAB_BIGINT64_ELEMENTS, // TODO(torque): Allow duplicate enum values. // FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, // FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND, @@ -412,7 +429,7 @@ extern enum MessageTemplate { kWasmTrapRemByZero, kWasmTrapFloatUnrepresentable, kWasmTrapFuncSigMismatch, - kWasmTrapDataSegmentDropped, + kWasmTrapDataSegmentOutOfBounds, kWasmTrapElemSegmentDropped, kWasmTrapTableOutOfBounds, kWasmTrapRethrowNull, @@ -485,9 +502,6 @@ const kWasmArrayHeaderSize: const kHeapObjectHeaderSize: constexpr int32 generates 'HeapObject::kHeaderSize'; -const kDictModePrototypes: - constexpr bool generates 'V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL'; - type TheHole extends Oddball; type Null extends Oddball; type Undefined extends Oddball; @@ -561,7 +575,8 @@ extern class Filler extends HeapObject generates 'TNode'; // Like JSObject, but created from API function. @apiExposedInstanceTypeValue(0x422) @doNotGenerateCast -extern class JSApiObject extends JSObject generates 'TNode'; +extern class JSApiObject extends JSObjectWithEmbedderSlots + generates 'TNode'; // TODO(gsathya): This only exists to make JSApiObject instance type into a // range. @@ -604,7 +619,7 @@ transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny): if (Float64IsNaN(value)) return SmiConstant(0); value = math::Float64Trunc(value); // ToInteger normalizes -0 to +0. - if (value == 0.0) return SmiConstant(0); + if (value == 0) return SmiConstant(0); const result = ChangeFloat64ToTagged(value); dcheck(IsNumberNormalized(result)); return result; @@ -889,8 +904,12 @@ macro Float64IsNaN(n: float64): bool { } // The type of all tagged values that can safely be compared with TaggedEqual. -type TaggedWithIdentity = - JSReceiver|FixedArrayBase|Oddball|Map|WeakCell|Context|EmptyString; +@if(V8_ENABLE_WEBASSEMBLY) +type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map | + WeakCell | Context | EmptyString | WasmInternalFunction; +@ifnot(V8_ENABLE_WEBASSEMBLY) +type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map | + WeakCell | Context | EmptyString; extern operator '==' macro TaggedEqual(TaggedWithIdentity, Object): bool; extern operator '==' macro TaggedEqual(Object, TaggedWithIdentity): bool; @@ -976,6 +995,38 @@ extern operator '==' macro ConstexprInt32Equal( extern operator '!=' macro ConstexprInt32NotEqual( constexpr int32, constexpr int32): constexpr bool; +// IntegerLiteral overloads +extern macro ConstexprIntegerLiteralToInt31(constexpr IntegerLiteral): + constexpr int31; +extern macro ConstexprIntegerLiteralToInt32(constexpr IntegerLiteral): + constexpr int32; +extern macro ConstexprIntegerLiteralToUint32(constexpr IntegerLiteral): + constexpr uint32; +extern macro ConstexprIntegerLiteralToUint64(constexpr IntegerLiteral): + constexpr uint64; +extern macro ConstexprIntegerLiteralToIntptr(constexpr IntegerLiteral): + constexpr intptr; +extern macro ConstexprIntegerLiteralToUintptr(constexpr IntegerLiteral): + constexpr uintptr; +extern macro ConstexprIntegerLiteralToInt8(constexpr IntegerLiteral): + constexpr int8; +extern macro ConstexprIntegerLiteralToUint8(constexpr IntegerLiteral): + constexpr uint8; +extern macro ConstexprIntegerLiteralToFloat64(constexpr IntegerLiteral): + constexpr float64; + +extern operator '==' macro ConstexprIntegerLiteralEqual( + constexpr IntegerLiteral, constexpr IntegerLiteral): constexpr bool; +extern operator '+' macro ConstexprIntegerLiteralAdd( + constexpr IntegerLiteral, + constexpr IntegerLiteral): constexpr IntegerLiteral; +extern operator '<<' macro ConstexprIntegerLiteralLeftShift( + constexpr IntegerLiteral, + constexpr IntegerLiteral): constexpr IntegerLiteral; +extern operator '|' macro ConstexprIntegerLiteralBitwiseOr( + constexpr IntegerLiteral, + constexpr IntegerLiteral): constexpr IntegerLiteral; + extern operator '==' macro Word32Equal(int32, int32): bool; extern operator '==' macro Word32Equal(uint32, uint32): bool; extern operator '!=' macro Word32NotEqual(int32, int32): bool; @@ -1165,19 +1216,29 @@ extern macro IntPtrConstant(constexpr int32): intptr; extern macro Uint16Constant(constexpr uint16): uint16; extern macro Int32Constant(constexpr int31): int31; extern macro Int32Constant(constexpr int32): int32; +macro Int32Constant(i: constexpr IntegerLiteral): int32 { + return Int32Constant(ConstexprIntegerLiteralToInt32(i)); +} extern macro Int64Constant(constexpr int64): int64; extern macro Uint64Constant(constexpr uint64): uint64; extern macro Float64Constant(constexpr int32): float64; extern macro Float64Constant(constexpr float64): float64; +extern macro Float64Constant(constexpr IntegerLiteral): float64; extern macro SmiConstant(constexpr int31): Smi; extern macro SmiConstant(constexpr Smi): Smi; extern macro SmiConstant(constexpr MessageTemplate): Smi; extern macro SmiConstant(constexpr bool): Smi; extern macro SmiConstant(constexpr uint32): Smi; +macro SmiConstant(il: constexpr IntegerLiteral): Smi { + return SmiConstant(ConstexprIntegerLiteralToInt31(il)); +} extern macro BoolConstant(constexpr bool): bool; extern macro StringConstant(constexpr string): String; extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot; extern macro IntPtrConstant(constexpr intptr): intptr; +macro IntPtrConstant(il: constexpr IntegerLiteral): intptr { + return IntPtrConstant(ConstexprIntegerLiteralToIntptr(il)); +} extern macro PointerConstant(constexpr RawPtr): RawPtr; extern macro SingleCharacterStringConstant(constexpr string): String; extern macro Float64SilenceNaN(float64): float64; @@ -1872,6 +1933,18 @@ extern operator '[]' macro LoadWeakFixedArrayElement( extern operator '[]' macro LoadUint8Ptr(RawPtr, intptr): uint8; +extern enum HashFieldType extends uint32 constexpr 'Name::HashFieldType' { + kHash, + kIntegerIndex, + kForwardingIndex, + kEmpty +} + +operator '==' macro HashFieldTypeEquals( + s1: HashFieldType, s2: HashFieldType): bool { + return Word32Equal(s1, s2); +} + const kNoHashSentinel: constexpr int32 generates 'PropertyArray::kNoHashSentinel'; extern macro LoadNameHash(Name): uint32; diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index e5a3d4468606f0..914f032acdb088 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -128,11 +128,8 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( TNode typed_array = CAST(receiver_); o_ = typed_array; - // TODO(v8:11111): Support RAB / GSAB. - TNode array_buffer = LoadJSArrayBufferViewBuffer(typed_array); - ThrowIfArrayBufferIsDetached(context_, array_buffer, name_); - - len_ = LoadJSTypedArrayLength(typed_array); + Label throw_detached(this, Label::kDeferred); + len_ = LoadJSTypedArrayLengthAndCheckDetached(typed_array, &throw_detached); Label throw_not_callable(this, Label::kDeferred); Label distinguish_types(this); @@ -146,13 +143,16 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( BIND(&throw_not_callable); ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_); + BIND(&throw_detached); + ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_); + Label unexpected_instance_type(this); BIND(&unexpected_instance_type); Unreachable(); std::vector elements_kinds = { #define ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS, - TYPED_ARRAYS(ELEMENTS_KIND) + TYPED_ARRAYS(ELEMENTS_KIND) RAB_GSAB_TYPED_ARRAYS(ELEMENTS_KIND) #undef ELEMENTS_KIND }; std::list